about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/alembic
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/alembic')
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/__init__.py4
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/__main__.py4
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/autogenerate/__init__.py10
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/autogenerate/api.py650
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/autogenerate/compare.py1317
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/autogenerate/render.py1125
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/autogenerate/rewriter.py240
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/command.py760
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/config.py640
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/context.py5
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/context.pyi856
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/ddl/__init__.py6
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/ddl/_autogen.py329
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/ddl/base.py336
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/ddl/impl.py885
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/ddl/mssql.py419
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/ddl/mysql.py491
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/ddl/oracle.py202
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/ddl/postgresql.py850
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/ddl/sqlite.py237
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/environment.py1
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/migration.py1
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/op.py5
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/op.pyi1337
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/operations/__init__.py15
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/operations/base.py1906
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/operations/batch.py718
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/operations/ops.py2799
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/operations/schemaobj.py290
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/operations/toimpl.py225
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/py.typed0
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/runtime/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/runtime/environment.py1051
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/runtime/migration.py1391
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/script/__init__.py4
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/script/base.py1066
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/script/revision.py1728
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/script/write_hooks.py179
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/async/README1
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/async/alembic.ini.mako117
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/async/env.py89
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/async/script.py.mako28
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/generic/README1
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/generic/alembic.ini.mako119
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/generic/env.py78
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/generic/script.py.mako28
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/multidb/README12
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/multidb/alembic.ini.mako124
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/multidb/env.py140
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/templates/multidb/script.py.mako51
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/assertions.py175
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/env.py502
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/fixtures.py306
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/plugin/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/plugin/bootstrap.py4
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/requirements.py176
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/schemacompare.py169
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/suite/__init__.py7
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/suite/_autogen_fixtures.py335
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_comments.py242
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_computed.py144
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_diffs.py273
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_fks.py1190
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_identity.py226
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/suite/test_environment.py364
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/suite/test_op.py42
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/util.py126
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/testing/warnings.py31
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/util/__init__.py28
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/util/compat.py90
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/util/editor.py81
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/util/exc.py25
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/util/langhelpers.py332
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/util/messaging.py118
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/util/pyfiles.py114
-rw-r--r--.venv/lib/python3.12/site-packages/alembic/util/sqla_compat.py497
77 files changed, 28496 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/alembic/__init__.py b/.venv/lib/python3.12/site-packages/alembic/__init__.py
new file mode 100644
index 00000000..70a5916e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/__init__.py
@@ -0,0 +1,4 @@
+from . import context
+from . import op
+
+__version__ = "1.15.1"
diff --git a/.venv/lib/python3.12/site-packages/alembic/__main__.py b/.venv/lib/python3.12/site-packages/alembic/__main__.py
new file mode 100644
index 00000000..af1b8e87
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/__main__.py
@@ -0,0 +1,4 @@
+from .config import main
+
+if __name__ == "__main__":
+    main(prog="alembic")
diff --git a/.venv/lib/python3.12/site-packages/alembic/autogenerate/__init__.py b/.venv/lib/python3.12/site-packages/alembic/autogenerate/__init__.py
new file mode 100644
index 00000000..445ddb25
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/autogenerate/__init__.py
@@ -0,0 +1,10 @@
+from .api import _render_migration_diffs as _render_migration_diffs
+from .api import compare_metadata as compare_metadata
+from .api import produce_migrations as produce_migrations
+from .api import render_python_code as render_python_code
+from .api import RevisionContext as RevisionContext
+from .compare import _produce_net_changes as _produce_net_changes
+from .compare import comparators as comparators
+from .render import render_op_text as render_op_text
+from .render import renderers as renderers
+from .rewriter import Rewriter as Rewriter
diff --git a/.venv/lib/python3.12/site-packages/alembic/autogenerate/api.py b/.venv/lib/python3.12/site-packages/alembic/autogenerate/api.py
new file mode 100644
index 00000000..811462e8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/autogenerate/api.py
@@ -0,0 +1,650 @@
+from __future__ import annotations
+
+import contextlib
+from typing import Any
+from typing import Dict
+from typing import Iterator
+from typing import List
+from typing import Optional
+from typing import Sequence
+from typing import Set
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import inspect
+
+from . import compare
+from . import render
+from .. import util
+from ..operations import ops
+from ..util import sqla_compat
+
+"""Provide the 'autogenerate' feature which can produce migration operations
+automatically."""
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine import Connection
+    from sqlalchemy.engine import Dialect
+    from sqlalchemy.engine import Inspector
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.schema import Table
+
+    from ..config import Config
+    from ..operations.ops import DowngradeOps
+    from ..operations.ops import MigrationScript
+    from ..operations.ops import UpgradeOps
+    from ..runtime.environment import NameFilterParentNames
+    from ..runtime.environment import NameFilterType
+    from ..runtime.environment import ProcessRevisionDirectiveFn
+    from ..runtime.environment import RenderItemFn
+    from ..runtime.migration import MigrationContext
+    from ..script.base import Script
+    from ..script.base import ScriptDirectory
+    from ..script.revision import _GetRevArg
+
+
+def compare_metadata(context: MigrationContext, metadata: MetaData) -> Any:
+    """Compare a database schema to that given in a
+    :class:`~sqlalchemy.schema.MetaData` instance.
+
+    The database connection is presented in the context
+    of a :class:`.MigrationContext` object, which
+    provides database connectivity as well as optional
+    comparison functions to use for datatypes and
+    server defaults - see the "autogenerate" arguments
+    at :meth:`.EnvironmentContext.configure`
+    for details on these.
+
+    The return format is a list of "diff" directives,
+    each representing individual differences::
+
+        from alembic.migration import MigrationContext
+        from alembic.autogenerate import compare_metadata
+        from sqlalchemy import (
+            create_engine,
+            MetaData,
+            Column,
+            Integer,
+            String,
+            Table,
+            text,
+        )
+        import pprint
+
+        engine = create_engine("sqlite://")
+
+        with engine.begin() as conn:
+            conn.execute(
+                text(
+                    '''
+                        create table foo (
+                            id integer not null primary key,
+                            old_data varchar,
+                            x integer
+                        )
+                    '''
+                )
+            )
+            conn.execute(text("create table bar (data varchar)"))
+
+        metadata = MetaData()
+        Table(
+            "foo",
+            metadata,
+            Column("id", Integer, primary_key=True),
+            Column("data", Integer),
+            Column("x", Integer, nullable=False),
+        )
+        Table("bat", metadata, Column("info", String))
+
+        mc = MigrationContext.configure(engine.connect())
+
+        diff = compare_metadata(mc, metadata)
+        pprint.pprint(diff, indent=2, width=20)
+
+    Output::
+
+        [
+            (
+                "add_table",
+                Table(
+                    "bat",
+                    MetaData(),
+                    Column("info", String(), table=<bat>),
+                    schema=None,
+                ),
+            ),
+            (
+                "remove_table",
+                Table(
+                    "bar",
+                    MetaData(),
+                    Column("data", VARCHAR(), table=<bar>),
+                    schema=None,
+                ),
+            ),
+            (
+                "add_column",
+                None,
+                "foo",
+                Column("data", Integer(), table=<foo>),
+            ),
+            [
+                (
+                    "modify_nullable",
+                    None,
+                    "foo",
+                    "x",
+                    {
+                        "existing_comment": None,
+                        "existing_server_default": False,
+                        "existing_type": INTEGER(),
+                    },
+                    True,
+                    False,
+                )
+            ],
+            (
+                "remove_column",
+                None,
+                "foo",
+                Column("old_data", VARCHAR(), table=<foo>),
+            ),
+        ]
+
+    :param context: a :class:`.MigrationContext`
+     instance.
+    :param metadata: a :class:`~sqlalchemy.schema.MetaData`
+     instance.
+
+    .. seealso::
+
+        :func:`.produce_migrations` - produces a :class:`.MigrationScript`
+        structure based on metadata comparison.
+
+    """
+
+    migration_script = produce_migrations(context, metadata)
+    assert migration_script.upgrade_ops is not None
+    return migration_script.upgrade_ops.as_diffs()
+
+
+def produce_migrations(
+    context: MigrationContext, metadata: MetaData
+) -> MigrationScript:
+    """Produce a :class:`.MigrationScript` structure based on schema
+    comparison.
+
+    This function does essentially what :func:`.compare_metadata` does,
+    but then runs the resulting list of diffs to produce the full
+    :class:`.MigrationScript` object.   For an example of what this looks like,
+    see the example in :ref:`customizing_revision`.
+
+    .. seealso::
+
+        :func:`.compare_metadata` - returns more fundamental "diff"
+        data from comparing a schema.
+
+    """
+
+    autogen_context = AutogenContext(context, metadata=metadata)
+
+    migration_script = ops.MigrationScript(
+        rev_id=None,
+        upgrade_ops=ops.UpgradeOps([]),
+        downgrade_ops=ops.DowngradeOps([]),
+    )
+
+    compare._populate_migration_script(autogen_context, migration_script)
+
+    return migration_script
+
+
+def render_python_code(
+    up_or_down_op: Union[UpgradeOps, DowngradeOps],
+    sqlalchemy_module_prefix: str = "sa.",
+    alembic_module_prefix: str = "op.",
+    render_as_batch: bool = False,
+    imports: Sequence[str] = (),
+    render_item: Optional[RenderItemFn] = None,
+    migration_context: Optional[MigrationContext] = None,
+    user_module_prefix: Optional[str] = None,
+) -> str:
+    """Render Python code given an :class:`.UpgradeOps` or
+    :class:`.DowngradeOps` object.
+
+    This is a convenience function that can be used to test the
+    autogenerate output of a user-defined :class:`.MigrationScript` structure.
+
+    :param up_or_down_op: :class:`.UpgradeOps` or :class:`.DowngradeOps` object
+    :param sqlalchemy_module_prefix: module prefix for SQLAlchemy objects
+    :param alembic_module_prefix: module prefix for Alembic constructs
+    :param render_as_batch: use "batch operations" style for rendering
+    :param imports: sequence of import symbols to add
+    :param render_item: callable to render items
+    :param migration_context: optional :class:`.MigrationContext`
+    :param user_module_prefix: optional string prefix for user-defined types
+
+     .. versionadded:: 1.11.0
+
+    """
+    opts = {
+        "sqlalchemy_module_prefix": sqlalchemy_module_prefix,
+        "alembic_module_prefix": alembic_module_prefix,
+        "render_item": render_item,
+        "render_as_batch": render_as_batch,
+        "user_module_prefix": user_module_prefix,
+    }
+
+    if migration_context is None:
+        from ..runtime.migration import MigrationContext
+        from sqlalchemy.engine.default import DefaultDialect
+
+        migration_context = MigrationContext.configure(
+            dialect=DefaultDialect()
+        )
+
+    autogen_context = AutogenContext(migration_context, opts=opts)
+    autogen_context.imports = set(imports)
+    return render._indent(
+        render._render_cmd_body(up_or_down_op, autogen_context)
+    )
+
+
+def _render_migration_diffs(
+    context: MigrationContext, template_args: Dict[Any, Any]
+) -> None:
+    """legacy, used by test_autogen_composition at the moment"""
+
+    autogen_context = AutogenContext(context)
+
+    upgrade_ops = ops.UpgradeOps([])
+    compare._produce_net_changes(autogen_context, upgrade_ops)
+
+    migration_script = ops.MigrationScript(
+        rev_id=None,
+        upgrade_ops=upgrade_ops,
+        downgrade_ops=upgrade_ops.reverse(),
+    )
+
+    render._render_python_into_templatevars(
+        autogen_context, migration_script, template_args
+    )
+
+
+class AutogenContext:
+    """Maintains configuration and state that's specific to an
+    autogenerate operation."""
+
+    metadata: Union[MetaData, Sequence[MetaData], None] = None
+    """The :class:`~sqlalchemy.schema.MetaData` object
+    representing the destination.
+
+    This object is the one that is passed within ``env.py``
+    to the :paramref:`.EnvironmentContext.configure.target_metadata`
+    parameter.  It represents the structure of :class:`.Table` and other
+    objects as stated in the current database model, and represents the
+    destination structure for the database being examined.
+
+    While the :class:`~sqlalchemy.schema.MetaData` object is primarily
+    known as a collection of :class:`~sqlalchemy.schema.Table` objects,
+    it also has an :attr:`~sqlalchemy.schema.MetaData.info` dictionary
+    that may be used by end-user schemes to store additional schema-level
+    objects that are to be compared in custom autogeneration schemes.
+
+    """
+
+    connection: Optional[Connection] = None
+    """The :class:`~sqlalchemy.engine.base.Connection` object currently
+    connected to the database backend being compared.
+
+    This is obtained from the :attr:`.MigrationContext.bind` and is
+    ultimately set up in the ``env.py`` script.
+
+    """
+
+    dialect: Optional[Dialect] = None
+    """The :class:`~sqlalchemy.engine.Dialect` object currently in use.
+
+    This is normally obtained from the
+    :attr:`~sqlalchemy.engine.base.Connection.dialect` attribute.
+
+    """
+
+    imports: Set[str] = None  # type: ignore[assignment]
+    """A ``set()`` which contains string Python import directives.
+
+    The directives are to be rendered into the ``${imports}`` section
+    of a script template.  The set is normally empty and can be modified
+    within hooks such as the
+    :paramref:`.EnvironmentContext.configure.render_item` hook.
+
+    .. seealso::
+
+        :ref:`autogen_render_types`
+
+    """
+
+    migration_context: MigrationContext = None  # type: ignore[assignment]
+    """The :class:`.MigrationContext` established by the ``env.py`` script."""
+
+    def __init__(
+        self,
+        migration_context: MigrationContext,
+        metadata: Union[MetaData, Sequence[MetaData], None] = None,
+        opts: Optional[Dict[str, Any]] = None,
+        autogenerate: bool = True,
+    ) -> None:
+        if (
+            autogenerate
+            and migration_context is not None
+            and migration_context.as_sql
+        ):
+            raise util.CommandError(
+                "autogenerate can't use as_sql=True as it prevents querying "
+                "the database for schema information"
+            )
+
+        if opts is None:
+            opts = migration_context.opts
+
+        self.metadata = metadata = (
+            opts.get("target_metadata", None) if metadata is None else metadata
+        )
+
+        if (
+            autogenerate
+            and metadata is None
+            and migration_context is not None
+            and migration_context.script is not None
+        ):
+            raise util.CommandError(
+                "Can't proceed with --autogenerate option; environment "
+                "script %s does not provide "
+                "a MetaData object or sequence of objects to the context."
+                % (migration_context.script.env_py_location)
+            )
+
+        include_object = opts.get("include_object", None)
+        include_name = opts.get("include_name", None)
+
+        object_filters = []
+        name_filters = []
+        if include_object:
+            object_filters.append(include_object)
+        if include_name:
+            name_filters.append(include_name)
+
+        self._object_filters = object_filters
+        self._name_filters = name_filters
+
+        self.migration_context = migration_context
+        if self.migration_context is not None:
+            self.connection = self.migration_context.bind
+            self.dialect = self.migration_context.dialect
+
+        self.imports = set()
+        self.opts: Dict[str, Any] = opts
+        self._has_batch: bool = False
+
+    @util.memoized_property
+    def inspector(self) -> Inspector:
+        if self.connection is None:
+            raise TypeError(
+                "can't return inspector as this "
+                "AutogenContext has no database connection"
+            )
+        return inspect(self.connection)
+
+    @contextlib.contextmanager
+    def _within_batch(self) -> Iterator[None]:
+        self._has_batch = True
+        yield
+        self._has_batch = False
+
+    def run_name_filters(
+        self,
+        name: Optional[str],
+        type_: NameFilterType,
+        parent_names: NameFilterParentNames,
+    ) -> bool:
+        """Run the context's name filters and return True if the targets
+        should be part of the autogenerate operation.
+
+        This method should be run for every kind of name encountered within the
+        reflection side of an autogenerate operation, giving the environment
+        the chance to filter what names should be reflected as database
+        objects.  The filters here are produced directly via the
+        :paramref:`.EnvironmentContext.configure.include_name` parameter.
+
+        """
+        if "schema_name" in parent_names:
+            if type_ == "table":
+                table_name = name
+            else:
+                table_name = parent_names.get("table_name", None)
+            if table_name:
+                schema_name = parent_names["schema_name"]
+                if schema_name:
+                    parent_names["schema_qualified_table_name"] = "%s.%s" % (
+                        schema_name,
+                        table_name,
+                    )
+                else:
+                    parent_names["schema_qualified_table_name"] = table_name
+
+        for fn in self._name_filters:
+            if not fn(name, type_, parent_names):
+                return False
+        else:
+            return True
+
+    def run_object_filters(
+        self,
+        object_: SchemaItem,
+        name: sqla_compat._ConstraintName,
+        type_: NameFilterType,
+        reflected: bool,
+        compare_to: Optional[SchemaItem],
+    ) -> bool:
+        """Run the context's object filters and return True if the targets
+        should be part of the autogenerate operation.
+
+        This method should be run for every kind of object encountered within
+        an autogenerate operation, giving the environment the chance
+        to filter what objects should be included in the comparison.
+        The filters here are produced directly via the
+        :paramref:`.EnvironmentContext.configure.include_object` parameter.
+
+        """
+        for fn in self._object_filters:
+            if not fn(object_, name, type_, reflected, compare_to):
+                return False
+        else:
+            return True
+
+    run_filters = run_object_filters
+
+    @util.memoized_property
+    def sorted_tables(self) -> List[Table]:
+        """Return an aggregate of the :attr:`.MetaData.sorted_tables`
+        collection(s).
+
+        For a sequence of :class:`.MetaData` objects, this
+        concatenates the :attr:`.MetaData.sorted_tables` collection
+        for each individual :class:`.MetaData`  in the order of the
+        sequence.  It does **not** collate the sorted tables collections.
+
+        """
+        result = []
+        for m in util.to_list(self.metadata):
+            result.extend(m.sorted_tables)
+        return result
+
+    @util.memoized_property
+    def table_key_to_table(self) -> Dict[str, Table]:
+        """Return an aggregate  of the :attr:`.MetaData.tables` dictionaries.
+
+        The :attr:`.MetaData.tables` collection is a dictionary of table key
+        to :class:`.Table`; this method aggregates the dictionary across
+        multiple :class:`.MetaData` objects into one dictionary.
+
+        Duplicate table keys are **not** supported; if two :class:`.MetaData`
+        objects contain the same table key, an exception is raised.
+
+        """
+        result: Dict[str, Table] = {}
+        for m in util.to_list(self.metadata):
+            intersect = set(result).intersection(set(m.tables))
+            if intersect:
+                raise ValueError(
+                    "Duplicate table keys across multiple "
+                    "MetaData objects: %s"
+                    % (", ".join('"%s"' % key for key in sorted(intersect)))
+                )
+
+            result.update(m.tables)
+        return result
+
+
+class RevisionContext:
+    """Maintains configuration and state that's specific to a revision
+    file generation operation."""
+
+    generated_revisions: List[MigrationScript]
+    process_revision_directives: Optional[ProcessRevisionDirectiveFn]
+
+    def __init__(
+        self,
+        config: Config,
+        script_directory: ScriptDirectory,
+        command_args: Dict[str, Any],
+        process_revision_directives: Optional[
+            ProcessRevisionDirectiveFn
+        ] = None,
+    ) -> None:
+        self.config = config
+        self.script_directory = script_directory
+        self.command_args = command_args
+        self.process_revision_directives = process_revision_directives
+        self.template_args = {
+            "config": config  # Let templates use config for
+            # e.g. multiple databases
+        }
+        self.generated_revisions = [self._default_revision()]
+
+    def _to_script(
+        self, migration_script: MigrationScript
+    ) -> Optional[Script]:
+        template_args: Dict[str, Any] = self.template_args.copy()
+
+        if getattr(migration_script, "_needs_render", False):
+            autogen_context = self._last_autogen_context
+
+            # clear out existing imports if we are doing multiple
+            # renders
+            autogen_context.imports = set()
+            if migration_script.imports:
+                autogen_context.imports.update(migration_script.imports)
+            render._render_python_into_templatevars(
+                autogen_context, migration_script, template_args
+            )
+
+        assert migration_script.rev_id is not None
+        return self.script_directory.generate_revision(
+            migration_script.rev_id,
+            migration_script.message,
+            refresh=True,
+            head=migration_script.head,
+            splice=migration_script.splice,
+            branch_labels=migration_script.branch_label,
+            version_path=migration_script.version_path,
+            depends_on=migration_script.depends_on,
+            **template_args,
+        )
+
+    def run_autogenerate(
+        self, rev: _GetRevArg, migration_context: MigrationContext
+    ) -> None:
+        self._run_environment(rev, migration_context, True)
+
+    def run_no_autogenerate(
+        self, rev: _GetRevArg, migration_context: MigrationContext
+    ) -> None:
+        self._run_environment(rev, migration_context, False)
+
+    def _run_environment(
+        self,
+        rev: _GetRevArg,
+        migration_context: MigrationContext,
+        autogenerate: bool,
+    ) -> None:
+        if autogenerate:
+            if self.command_args["sql"]:
+                raise util.CommandError(
+                    "Using --sql with --autogenerate does not make any sense"
+                )
+            if set(self.script_directory.get_revisions(rev)) != set(
+                self.script_directory.get_revisions("heads")
+            ):
+                raise util.CommandError("Target database is not up to date.")
+
+        upgrade_token = migration_context.opts["upgrade_token"]
+        downgrade_token = migration_context.opts["downgrade_token"]
+
+        migration_script = self.generated_revisions[-1]
+        if not getattr(migration_script, "_needs_render", False):
+            migration_script.upgrade_ops_list[-1].upgrade_token = upgrade_token
+            migration_script.downgrade_ops_list[-1].downgrade_token = (
+                downgrade_token
+            )
+            migration_script._needs_render = True
+        else:
+            migration_script._upgrade_ops.append(
+                ops.UpgradeOps([], upgrade_token=upgrade_token)
+            )
+            migration_script._downgrade_ops.append(
+                ops.DowngradeOps([], downgrade_token=downgrade_token)
+            )
+
+        autogen_context = AutogenContext(
+            migration_context, autogenerate=autogenerate
+        )
+        self._last_autogen_context: AutogenContext = autogen_context
+
+        if autogenerate:
+            compare._populate_migration_script(
+                autogen_context, migration_script
+            )
+
+        if self.process_revision_directives:
+            self.process_revision_directives(
+                migration_context, rev, self.generated_revisions
+            )
+
+        hook = migration_context.opts["process_revision_directives"]
+        if hook:
+            hook(migration_context, rev, self.generated_revisions)
+
+        for migration_script in self.generated_revisions:
+            migration_script._needs_render = True
+
+    def _default_revision(self) -> MigrationScript:
+        command_args: Dict[str, Any] = self.command_args
+        op = ops.MigrationScript(
+            rev_id=command_args["rev_id"] or util.rev_id(),
+            message=command_args["message"],
+            upgrade_ops=ops.UpgradeOps([]),
+            downgrade_ops=ops.DowngradeOps([]),
+            head=command_args["head"],
+            splice=command_args["splice"],
+            branch_label=command_args["branch_label"],
+            version_path=command_args["version_path"],
+            depends_on=command_args["depends_on"],
+        )
+        return op
+
+    def generate_scripts(self) -> Iterator[Optional[Script]]:
+        for generated_revision in self.generated_revisions:
+            yield self._to_script(generated_revision)
diff --git a/.venv/lib/python3.12/site-packages/alembic/autogenerate/compare.py b/.venv/lib/python3.12/site-packages/alembic/autogenerate/compare.py
new file mode 100644
index 00000000..8d6d8f1b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/autogenerate/compare.py
@@ -0,0 +1,1317 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import contextlib
+import logging
+import re
+from typing import Any
+from typing import cast
+from typing import Dict
+from typing import Iterator
+from typing import Mapping
+from typing import Optional
+from typing import Set
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy import event
+from sqlalchemy import inspect
+from sqlalchemy import schema as sa_schema
+from sqlalchemy import text
+from sqlalchemy import types as sqltypes
+from sqlalchemy.sql import expression
+from sqlalchemy.sql.schema import ForeignKeyConstraint
+from sqlalchemy.sql.schema import Index
+from sqlalchemy.sql.schema import UniqueConstraint
+from sqlalchemy.util import OrderedSet
+
+from .. import util
+from ..ddl._autogen import is_index_sig
+from ..ddl._autogen import is_uq_sig
+from ..operations import ops
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.engine.reflection import Inspector
+    from sqlalchemy.sql.elements import quoted_name
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Table
+
+    from alembic.autogenerate.api import AutogenContext
+    from alembic.ddl.impl import DefaultImpl
+    from alembic.operations.ops import AlterColumnOp
+    from alembic.operations.ops import MigrationScript
+    from alembic.operations.ops import ModifyTableOps
+    from alembic.operations.ops import UpgradeOps
+    from ..ddl._autogen import _constraint_sig
+
+
+log = logging.getLogger(__name__)
+
+
+def _populate_migration_script(
+    autogen_context: AutogenContext, migration_script: MigrationScript
+) -> None:
+    upgrade_ops = migration_script.upgrade_ops_list[-1]
+    downgrade_ops = migration_script.downgrade_ops_list[-1]
+
+    _produce_net_changes(autogen_context, upgrade_ops)
+    upgrade_ops.reverse_into(downgrade_ops)
+
+
+comparators = util.Dispatcher(uselist=True)
+
+
+def _produce_net_changes(
+    autogen_context: AutogenContext, upgrade_ops: UpgradeOps
+) -> None:
+    connection = autogen_context.connection
+    assert connection is not None
+    include_schemas = autogen_context.opts.get("include_schemas", False)
+
+    inspector: Inspector = inspect(connection)
+
+    default_schema = connection.dialect.default_schema_name
+    schemas: Set[Optional[str]]
+    if include_schemas:
+        schemas = set(inspector.get_schema_names())
+        # replace default schema name with None
+        schemas.discard("information_schema")
+        # replace the "default" schema with None
+        schemas.discard(default_schema)
+        schemas.add(None)
+    else:
+        schemas = {None}
+
+    schemas = {
+        s for s in schemas if autogen_context.run_name_filters(s, "schema", {})
+    }
+
+    assert autogen_context.dialect is not None
+    comparators.dispatch("schema", autogen_context.dialect.name)(
+        autogen_context, upgrade_ops, schemas
+    )
+
+
+@comparators.dispatch_for("schema")
+def _autogen_for_tables(
+    autogen_context: AutogenContext,
+    upgrade_ops: UpgradeOps,
+    schemas: Union[Set[None], Set[Optional[str]]],
+) -> None:
+    inspector = autogen_context.inspector
+
+    conn_table_names: Set[Tuple[Optional[str], str]] = set()
+
+    version_table_schema = (
+        autogen_context.migration_context.version_table_schema
+    )
+    version_table = autogen_context.migration_context.version_table
+
+    for schema_name in schemas:
+        tables = set(inspector.get_table_names(schema=schema_name))
+        if schema_name == version_table_schema:
+            tables = tables.difference(
+                [autogen_context.migration_context.version_table]
+            )
+
+        conn_table_names.update(
+            (schema_name, tname)
+            for tname in tables
+            if autogen_context.run_name_filters(
+                tname, "table", {"schema_name": schema_name}
+            )
+        )
+
+    metadata_table_names = OrderedSet(
+        [(table.schema, table.name) for table in autogen_context.sorted_tables]
+    ).difference([(version_table_schema, version_table)])
+
+    _compare_tables(
+        conn_table_names,
+        metadata_table_names,
+        inspector,
+        upgrade_ops,
+        autogen_context,
+    )
+
+
+def _compare_tables(
+    conn_table_names: set,
+    metadata_table_names: set,
+    inspector: Inspector,
+    upgrade_ops: UpgradeOps,
+    autogen_context: AutogenContext,
+) -> None:
+    default_schema = inspector.bind.dialect.default_schema_name
+
+    # tables coming from the connection will not have "schema"
+    # set if it matches default_schema_name; so we need a list
+    # of table names from local metadata that also have "None" if schema
+    # == default_schema_name.  Most setups will be like this anyway but
+    # some are not (see #170)
+    metadata_table_names_no_dflt_schema = OrderedSet(
+        [
+            (schema if schema != default_schema else None, tname)
+            for schema, tname in metadata_table_names
+        ]
+    )
+
+    # to adjust for the MetaData collection storing the tables either
+    # as "schemaname.tablename" or just "tablename", create a new lookup
+    # which will match the "non-default-schema" keys to the Table object.
+    tname_to_table = {
+        no_dflt_schema: autogen_context.table_key_to_table[
+            sa_schema._get_table_key(tname, schema)
+        ]
+        for no_dflt_schema, (schema, tname) in zip(
+            metadata_table_names_no_dflt_schema, metadata_table_names
+        )
+    }
+    metadata_table_names = metadata_table_names_no_dflt_schema
+
+    for s, tname in metadata_table_names.difference(conn_table_names):
+        name = "%s.%s" % (s, tname) if s else tname
+        metadata_table = tname_to_table[(s, tname)]
+        if autogen_context.run_object_filters(
+            metadata_table, tname, "table", False, None
+        ):
+            upgrade_ops.ops.append(
+                ops.CreateTableOp.from_table(metadata_table)
+            )
+            log.info("Detected added table %r", name)
+            modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
+
+            comparators.dispatch("table")(
+                autogen_context,
+                modify_table_ops,
+                s,
+                tname,
+                None,
+                metadata_table,
+            )
+            if not modify_table_ops.is_empty():
+                upgrade_ops.ops.append(modify_table_ops)
+
+    removal_metadata = sa_schema.MetaData()
+    for s, tname in conn_table_names.difference(metadata_table_names):
+        name = sa_schema._get_table_key(tname, s)
+        exists = name in removal_metadata.tables
+        t = sa_schema.Table(tname, removal_metadata, schema=s)
+
+        if not exists:
+            event.listen(
+                t,
+                "column_reflect",
+                # fmt: off
+                autogen_context.migration_context.impl.
+                _compat_autogen_column_reflect
+                (inspector),
+                # fmt: on
+            )
+            inspector.reflect_table(t, include_columns=None)
+        if autogen_context.run_object_filters(t, tname, "table", True, None):
+            modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
+
+            comparators.dispatch("table")(
+                autogen_context, modify_table_ops, s, tname, t, None
+            )
+            if not modify_table_ops.is_empty():
+                upgrade_ops.ops.append(modify_table_ops)
+
+            upgrade_ops.ops.append(ops.DropTableOp.from_table(t))
+            log.info("Detected removed table %r", name)
+
+    existing_tables = conn_table_names.intersection(metadata_table_names)
+
+    existing_metadata = sa_schema.MetaData()
+    conn_column_info = {}
+    for s, tname in existing_tables:
+        name = sa_schema._get_table_key(tname, s)
+        exists = name in existing_metadata.tables
+        t = sa_schema.Table(tname, existing_metadata, schema=s)
+        if not exists:
+            event.listen(
+                t,
+                "column_reflect",
+                # fmt: off
+                autogen_context.migration_context.impl.
+                _compat_autogen_column_reflect(inspector),
+                # fmt: on
+            )
+            inspector.reflect_table(t, include_columns=None)
+        conn_column_info[(s, tname)] = t
+
+    for s, tname in sorted(existing_tables, key=lambda x: (x[0] or "", x[1])):
+        s = s or None
+        name = "%s.%s" % (s, tname) if s else tname
+        metadata_table = tname_to_table[(s, tname)]
+        conn_table = existing_metadata.tables[name]
+
+        if autogen_context.run_object_filters(
+            metadata_table, tname, "table", False, conn_table
+        ):
+            modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
+            with _compare_columns(
+                s,
+                tname,
+                conn_table,
+                metadata_table,
+                modify_table_ops,
+                autogen_context,
+                inspector,
+            ):
+                comparators.dispatch("table")(
+                    autogen_context,
+                    modify_table_ops,
+                    s,
+                    tname,
+                    conn_table,
+                    metadata_table,
+                )
+
+            if not modify_table_ops.is_empty():
+                upgrade_ops.ops.append(modify_table_ops)
+
+
+_IndexColumnSortingOps: Mapping[str, Any] = util.immutabledict(
+    {
+        "asc": expression.asc,
+        "desc": expression.desc,
+        "nulls_first": expression.nullsfirst,
+        "nulls_last": expression.nullslast,
+        "nullsfirst": expression.nullsfirst,  # 1_3 name
+        "nullslast": expression.nullslast,  # 1_3 name
+    }
+)
+
+
+def _make_index(
+    impl: DefaultImpl, params: Dict[str, Any], conn_table: Table
+) -> Optional[Index]:
+    exprs: list[Union[Column[Any], TextClause]] = []
+    sorting = params.get("column_sorting")
+
+    for num, col_name in enumerate(params["column_names"]):
+        item: Union[Column[Any], TextClause]
+        if col_name is None:
+            assert "expressions" in params
+            name = params["expressions"][num]
+            item = text(name)
+        else:
+            name = col_name
+            item = conn_table.c[col_name]
+        if sorting and name in sorting:
+            for operator in sorting[name]:
+                if operator in _IndexColumnSortingOps:
+                    item = _IndexColumnSortingOps[operator](item)
+        exprs.append(item)
+    ix = sa_schema.Index(
+        params["name"],
+        *exprs,
+        unique=params["unique"],
+        _table=conn_table,
+        **impl.adjust_reflected_dialect_options(params, "index"),
+    )
+    if "duplicates_constraint" in params:
+        ix.info["duplicates_constraint"] = params["duplicates_constraint"]
+    return ix
+
+
+def _make_unique_constraint(
+    impl: DefaultImpl, params: Dict[str, Any], conn_table: Table
+) -> UniqueConstraint:
+    uq = sa_schema.UniqueConstraint(
+        *[conn_table.c[cname] for cname in params["column_names"]],
+        name=params["name"],
+        **impl.adjust_reflected_dialect_options(params, "unique_constraint"),
+    )
+    if "duplicates_index" in params:
+        uq.info["duplicates_index"] = params["duplicates_index"]
+
+    return uq
+
+
+def _make_foreign_key(
+    params: Dict[str, Any], conn_table: Table
+) -> ForeignKeyConstraint:
+    tname = params["referred_table"]
+    if params["referred_schema"]:
+        tname = "%s.%s" % (params["referred_schema"], tname)
+
+    options = params.get("options", {})
+
+    const = sa_schema.ForeignKeyConstraint(
+        [conn_table.c[cname] for cname in params["constrained_columns"]],
+        ["%s.%s" % (tname, n) for n in params["referred_columns"]],
+        onupdate=options.get("onupdate"),
+        ondelete=options.get("ondelete"),
+        deferrable=options.get("deferrable"),
+        initially=options.get("initially"),
+        name=params["name"],
+    )
+    # needed by 0.7
+    conn_table.append_constraint(const)
+    return const
+
+
+@contextlib.contextmanager
+def _compare_columns(
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    conn_table: Table,
+    metadata_table: Table,
+    modify_table_ops: ModifyTableOps,
+    autogen_context: AutogenContext,
+    inspector: Inspector,
+) -> Iterator[None]:
+    name = "%s.%s" % (schema, tname) if schema else tname
+    metadata_col_names = OrderedSet(
+        c.name for c in metadata_table.c if not c.system
+    )
+    metadata_cols_by_name = {
+        c.name: c for c in metadata_table.c if not c.system
+    }
+
+    conn_col_names = {
+        c.name: c
+        for c in conn_table.c
+        if autogen_context.run_name_filters(
+            c.name, "column", {"table_name": tname, "schema_name": schema}
+        )
+    }
+
+    for cname in metadata_col_names.difference(conn_col_names):
+        if autogen_context.run_object_filters(
+            metadata_cols_by_name[cname], cname, "column", False, None
+        ):
+            modify_table_ops.ops.append(
+                ops.AddColumnOp.from_column_and_tablename(
+                    schema, tname, metadata_cols_by_name[cname]
+                )
+            )
+            log.info("Detected added column '%s.%s'", name, cname)
+
+    for colname in metadata_col_names.intersection(conn_col_names):
+        metadata_col = metadata_cols_by_name[colname]
+        conn_col = conn_table.c[colname]
+        if not autogen_context.run_object_filters(
+            metadata_col, colname, "column", False, conn_col
+        ):
+            continue
+        alter_column_op = ops.AlterColumnOp(tname, colname, schema=schema)
+
+        comparators.dispatch("column")(
+            autogen_context,
+            alter_column_op,
+            schema,
+            tname,
+            colname,
+            conn_col,
+            metadata_col,
+        )
+
+        if alter_column_op.has_changes():
+            modify_table_ops.ops.append(alter_column_op)
+
+    yield
+
+    for cname in set(conn_col_names).difference(metadata_col_names):
+        if autogen_context.run_object_filters(
+            conn_table.c[cname], cname, "column", True, None
+        ):
+            modify_table_ops.ops.append(
+                ops.DropColumnOp.from_column_and_tablename(
+                    schema, tname, conn_table.c[cname]
+                )
+            )
+            log.info("Detected removed column '%s.%s'", name, cname)
+
+
+_C = TypeVar("_C", bound=Union[UniqueConstraint, ForeignKeyConstraint, Index])
+
+
+@comparators.dispatch_for("table")
+def _compare_indexes_and_uniques(
+    autogen_context: AutogenContext,
+    modify_ops: ModifyTableOps,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    conn_table: Optional[Table],
+    metadata_table: Optional[Table],
+) -> None:
+    inspector = autogen_context.inspector
+    is_create_table = conn_table is None
+    is_drop_table = metadata_table is None
+    impl = autogen_context.migration_context.impl
+
+    # 1a. get raw indexes and unique constraints from metadata ...
+    if metadata_table is not None:
+        metadata_unique_constraints = {
+            uq
+            for uq in metadata_table.constraints
+            if isinstance(uq, sa_schema.UniqueConstraint)
+        }
+        metadata_indexes = set(metadata_table.indexes)
+    else:
+        metadata_unique_constraints = set()
+        metadata_indexes = set()
+
+    conn_uniques = conn_indexes = frozenset()  # type:ignore[var-annotated]
+
+    supports_unique_constraints = False
+
+    unique_constraints_duplicate_unique_indexes = False
+
+    if conn_table is not None:
+        # 1b. ... and from connection, if the table exists
+        try:
+            conn_uniques = inspector.get_unique_constraints(  # type:ignore[assignment] # noqa
+                tname, schema=schema
+            )
+            supports_unique_constraints = True
+        except NotImplementedError:
+            pass
+        except TypeError:
+            # number of arguments is off for the base
+            # method in SQLAlchemy due to the cache decorator
+            # not being present
+            pass
+        else:
+            conn_uniques = [  # type:ignore[assignment]
+                uq
+                for uq in conn_uniques
+                if autogen_context.run_name_filters(
+                    uq["name"],
+                    "unique_constraint",
+                    {"table_name": tname, "schema_name": schema},
+                )
+            ]
+            for uq in conn_uniques:
+                if uq.get("duplicates_index"):
+                    unique_constraints_duplicate_unique_indexes = True
+        try:
+            conn_indexes = inspector.get_indexes(  # type:ignore[assignment]
+                tname, schema=schema
+            )
+        except NotImplementedError:
+            pass
+        else:
+            conn_indexes = [  # type:ignore[assignment]
+                ix
+                for ix in conn_indexes
+                if autogen_context.run_name_filters(
+                    ix["name"],
+                    "index",
+                    {"table_name": tname, "schema_name": schema},
+                )
+            ]
+
+        # 2. convert conn-level objects from raw inspector records
+        # into schema objects
+        if is_drop_table:
+            # for DROP TABLE uniques are inline, don't need them
+            conn_uniques = set()  # type:ignore[assignment]
+        else:
+            conn_uniques = {  # type:ignore[assignment]
+                _make_unique_constraint(impl, uq_def, conn_table)
+                for uq_def in conn_uniques
+            }
+
+        conn_indexes = {  # type:ignore[assignment]
+            index
+            for index in (
+                _make_index(impl, ix, conn_table) for ix in conn_indexes
+            )
+            if index is not None
+        }
+
+    # 2a. if the dialect dupes unique indexes as unique constraints
+    # (mysql and oracle), correct for that
+
+    if unique_constraints_duplicate_unique_indexes:
+        _correct_for_uq_duplicates_uix(
+            conn_uniques,
+            conn_indexes,
+            metadata_unique_constraints,
+            metadata_indexes,
+            autogen_context.dialect,
+            impl,
+        )
+
+    # 3. give the dialect a chance to omit indexes and constraints that
+    # we know are either added implicitly by the DB or that the DB
+    # can't accurately report on
+    impl.correct_for_autogen_constraints(
+        conn_uniques,  # type: ignore[arg-type]
+        conn_indexes,  # type: ignore[arg-type]
+        metadata_unique_constraints,
+        metadata_indexes,
+    )
+
+    # 4. organize the constraints into "signature" collections, the
+    # _constraint_sig() objects provide a consistent facade over both
+    # Index and UniqueConstraint so we can easily work with them
+    # interchangeably
+    metadata_unique_constraints_sig = {
+        impl._create_metadata_constraint_sig(uq)
+        for uq in metadata_unique_constraints
+    }
+
+    metadata_indexes_sig = {
+        impl._create_metadata_constraint_sig(ix) for ix in metadata_indexes
+    }
+
+    conn_unique_constraints = {
+        impl._create_reflected_constraint_sig(uq) for uq in conn_uniques
+    }
+
+    conn_indexes_sig = {
+        impl._create_reflected_constraint_sig(ix) for ix in conn_indexes
+    }
+
+    # 5. index things by name, for those objects that have names
+    metadata_names = {
+        cast(str, c.md_name_to_sql_name(autogen_context)): c
+        for c in metadata_unique_constraints_sig.union(metadata_indexes_sig)
+        if c.is_named
+    }
+
+    conn_uniques_by_name: Dict[sqla_compat._ConstraintName, _constraint_sig]
+    conn_indexes_by_name: Dict[sqla_compat._ConstraintName, _constraint_sig]
+
+    conn_uniques_by_name = {c.name: c for c in conn_unique_constraints}
+    conn_indexes_by_name = {c.name: c for c in conn_indexes_sig}
+    conn_names = {
+        c.name: c
+        for c in conn_unique_constraints.union(conn_indexes_sig)
+        if sqla_compat.constraint_name_string(c.name)
+    }
+
+    doubled_constraints = {
+        name: (conn_uniques_by_name[name], conn_indexes_by_name[name])
+        for name in set(conn_uniques_by_name).intersection(
+            conn_indexes_by_name
+        )
+    }
+
+    # 6. index things by "column signature", to help with unnamed unique
+    # constraints.
+    conn_uniques_by_sig = {uq.unnamed: uq for uq in conn_unique_constraints}
+    metadata_uniques_by_sig = {
+        uq.unnamed: uq for uq in metadata_unique_constraints_sig
+    }
+    unnamed_metadata_uniques = {
+        uq.unnamed: uq
+        for uq in metadata_unique_constraints_sig
+        if not sqla_compat._constraint_is_named(
+            uq.const, autogen_context.dialect
+        )
+    }
+
+    # assumptions:
+    # 1. a unique constraint or an index from the connection *always*
+    #    has a name.
+    # 2. an index on the metadata side *always* has a name.
+    # 3. a unique constraint on the metadata side *might* have a name.
+    # 4. The backend may double up indexes as unique constraints and
+    #    vice versa (e.g. MySQL, Postgresql)
+
+    def obj_added(obj: _constraint_sig):
+        if is_index_sig(obj):
+            if autogen_context.run_object_filters(
+                obj.const, obj.name, "index", False, None
+            ):
+                modify_ops.ops.append(ops.CreateIndexOp.from_index(obj.const))
+                log.info(
+                    "Detected added index '%r' on '%s'",
+                    obj.name,
+                    obj.column_names,
+                )
+        elif is_uq_sig(obj):
+            if not supports_unique_constraints:
+                # can't report unique indexes as added if we don't
+                # detect them
+                return
+            if is_create_table or is_drop_table:
+                # unique constraints are created inline with table defs
+                return
+            if autogen_context.run_object_filters(
+                obj.const, obj.name, "unique_constraint", False, None
+            ):
+                modify_ops.ops.append(
+                    ops.AddConstraintOp.from_constraint(obj.const)
+                )
+                log.info(
+                    "Detected added unique constraint %r on '%s'",
+                    obj.name,
+                    obj.column_names,
+                )
+        else:
+            assert False
+
+    def obj_removed(obj: _constraint_sig):
+        if is_index_sig(obj):
+            if obj.is_unique and not supports_unique_constraints:
+                # many databases double up unique constraints
+                # as unique indexes.  without that list we can't
+                # be sure what we're doing here
+                return
+
+            if autogen_context.run_object_filters(
+                obj.const, obj.name, "index", True, None
+            ):
+                modify_ops.ops.append(ops.DropIndexOp.from_index(obj.const))
+                log.info("Detected removed index %r on %r", obj.name, tname)
+        elif is_uq_sig(obj):
+            if is_create_table or is_drop_table:
+                # if the whole table is being dropped, we don't need to
+                # consider unique constraint separately
+                return
+            if autogen_context.run_object_filters(
+                obj.const, obj.name, "unique_constraint", True, None
+            ):
+                modify_ops.ops.append(
+                    ops.DropConstraintOp.from_constraint(obj.const)
+                )
+                log.info(
+                    "Detected removed unique constraint %r on %r",
+                    obj.name,
+                    tname,
+                )
+        else:
+            assert False
+
+    def obj_changed(
+        old: _constraint_sig,
+        new: _constraint_sig,
+        msg: str,
+    ):
+        if is_index_sig(old):
+            assert is_index_sig(new)
+
+            if autogen_context.run_object_filters(
+                new.const, new.name, "index", False, old.const
+            ):
+                log.info(
+                    "Detected changed index %r on %r: %s", old.name, tname, msg
+                )
+                modify_ops.ops.append(ops.DropIndexOp.from_index(old.const))
+                modify_ops.ops.append(ops.CreateIndexOp.from_index(new.const))
+        elif is_uq_sig(old):
+            assert is_uq_sig(new)
+
+            if autogen_context.run_object_filters(
+                new.const, new.name, "unique_constraint", False, old.const
+            ):
+                log.info(
+                    "Detected changed unique constraint %r on %r: %s",
+                    old.name,
+                    tname,
+                    msg,
+                )
+                modify_ops.ops.append(
+                    ops.DropConstraintOp.from_constraint(old.const)
+                )
+                modify_ops.ops.append(
+                    ops.AddConstraintOp.from_constraint(new.const)
+                )
+        else:
+            assert False
+
+    for removed_name in sorted(set(conn_names).difference(metadata_names)):
+        conn_obj = conn_names[removed_name]
+        if (
+            is_uq_sig(conn_obj)
+            and conn_obj.unnamed in unnamed_metadata_uniques
+        ):
+            continue
+        elif removed_name in doubled_constraints:
+            conn_uq, conn_idx = doubled_constraints[removed_name]
+            if (
+                all(
+                    conn_idx.unnamed != meta_idx.unnamed
+                    for meta_idx in metadata_indexes_sig
+                )
+                and conn_uq.unnamed not in metadata_uniques_by_sig
+            ):
+                obj_removed(conn_uq)
+                obj_removed(conn_idx)
+        else:
+            obj_removed(conn_obj)
+
+    for existing_name in sorted(set(metadata_names).intersection(conn_names)):
+        metadata_obj = metadata_names[existing_name]
+
+        if existing_name in doubled_constraints:
+            conn_uq, conn_idx = doubled_constraints[existing_name]
+            if is_index_sig(metadata_obj):
+                conn_obj = conn_idx
+            else:
+                conn_obj = conn_uq
+        else:
+            conn_obj = conn_names[existing_name]
+
+        if type(conn_obj) != type(metadata_obj):
+            obj_removed(conn_obj)
+            obj_added(metadata_obj)
+        else:
+            comparison = metadata_obj.compare_to_reflected(conn_obj)
+
+            if comparison.is_different:
+                # constraint are different
+                obj_changed(conn_obj, metadata_obj, comparison.message)
+            elif comparison.is_skip:
+                # constraint cannot be compared, skip them
+                thing = (
+                    "index" if is_index_sig(conn_obj) else "unique constraint"
+                )
+                log.info(
+                    "Cannot compare %s %r, assuming equal and skipping. %s",
+                    thing,
+                    conn_obj.name,
+                    comparison.message,
+                )
+            else:
+                # constraint are equal
+                assert comparison.is_equal
+
+    for added_name in sorted(set(metadata_names).difference(conn_names)):
+        obj = metadata_names[added_name]
+        obj_added(obj)
+
+    for uq_sig in unnamed_metadata_uniques:
+        if uq_sig not in conn_uniques_by_sig:
+            obj_added(unnamed_metadata_uniques[uq_sig])
+
+
+def _correct_for_uq_duplicates_uix(
+    conn_unique_constraints,
+    conn_indexes,
+    metadata_unique_constraints,
+    metadata_indexes,
+    dialect,
+    impl,
+):
+    # dedupe unique indexes vs. constraints, since MySQL / Oracle
+    # doesn't really have unique constraints as a separate construct.
+    # but look in the metadata and try to maintain constructs
+    # that already seem to be defined one way or the other
+    # on that side.  This logic was formerly local to MySQL dialect,
+    # generalized to Oracle and others. See #276
+
+    # resolve final rendered name for unique constraints defined in the
+    # metadata.   this includes truncation of long names.  naming convention
+    # names currently should already be set as cons.name, however leave this
+    # to the sqla_compat to decide.
+    metadata_cons_names = [
+        (sqla_compat._get_constraint_final_name(cons, dialect), cons)
+        for cons in metadata_unique_constraints
+    ]
+
+    metadata_uq_names = {
+        name for name, cons in metadata_cons_names if name is not None
+    }
+
+    unnamed_metadata_uqs = {
+        impl._create_metadata_constraint_sig(cons).unnamed
+        for name, cons in metadata_cons_names
+        if name is None
+    }
+
+    metadata_ix_names = {
+        sqla_compat._get_constraint_final_name(cons, dialect)
+        for cons in metadata_indexes
+        if cons.unique
+    }
+
+    # for reflection side, names are in their final database form
+    # already since they're from the database
+    conn_ix_names = {cons.name: cons for cons in conn_indexes if cons.unique}
+
+    uqs_dupe_indexes = {
+        cons.name: cons
+        for cons in conn_unique_constraints
+        if cons.info["duplicates_index"]
+    }
+
+    for overlap in uqs_dupe_indexes:
+        if overlap not in metadata_uq_names:
+            if (
+                impl._create_reflected_constraint_sig(
+                    uqs_dupe_indexes[overlap]
+                ).unnamed
+                not in unnamed_metadata_uqs
+            ):
+                conn_unique_constraints.discard(uqs_dupe_indexes[overlap])
+        elif overlap not in metadata_ix_names:
+            conn_indexes.discard(conn_ix_names[overlap])
+
+
+@comparators.dispatch_for("column")
+def _compare_nullable(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    cname: Union[quoted_name, str],
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> None:
+    metadata_col_nullable = metadata_col.nullable
+    conn_col_nullable = conn_col.nullable
+    alter_column_op.existing_nullable = conn_col_nullable
+
+    if conn_col_nullable is not metadata_col_nullable:
+        if (
+            sqla_compat._server_default_is_computed(
+                metadata_col.server_default, conn_col.server_default
+            )
+            and sqla_compat._nullability_might_be_unset(metadata_col)
+            or (
+                sqla_compat._server_default_is_identity(
+                    metadata_col.server_default, conn_col.server_default
+                )
+            )
+        ):
+            log.info(
+                "Ignoring nullable change on identity column '%s.%s'",
+                tname,
+                cname,
+            )
+        else:
+            alter_column_op.modify_nullable = metadata_col_nullable
+            log.info(
+                "Detected %s on column '%s.%s'",
+                "NULL" if metadata_col_nullable else "NOT NULL",
+                tname,
+                cname,
+            )
+
+
+@comparators.dispatch_for("column")
+def _setup_autoincrement(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    cname: quoted_name,
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> None:
+    if metadata_col.table._autoincrement_column is metadata_col:
+        alter_column_op.kw["autoincrement"] = True
+    elif metadata_col.autoincrement is True:
+        alter_column_op.kw["autoincrement"] = True
+    elif metadata_col.autoincrement is False:
+        alter_column_op.kw["autoincrement"] = False
+
+
+@comparators.dispatch_for("column")
+def _compare_type(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    cname: Union[quoted_name, str],
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> None:
+    conn_type = conn_col.type
+    alter_column_op.existing_type = conn_type
+    metadata_type = metadata_col.type
+    if conn_type._type_affinity is sqltypes.NullType:
+        log.info(
+            "Couldn't determine database type " "for column '%s.%s'",
+            tname,
+            cname,
+        )
+        return
+    if metadata_type._type_affinity is sqltypes.NullType:
+        log.info(
+            "Column '%s.%s' has no type within " "the model; can't compare",
+            tname,
+            cname,
+        )
+        return
+
+    isdiff = autogen_context.migration_context._compare_type(
+        conn_col, metadata_col
+    )
+
+    if isdiff:
+        alter_column_op.modify_type = metadata_type
+        log.info(
+            "Detected type change from %r to %r on '%s.%s'",
+            conn_type,
+            metadata_type,
+            tname,
+            cname,
+        )
+
+
+def _render_server_default_for_compare(
+    metadata_default: Optional[Any], autogen_context: AutogenContext
+) -> Optional[str]:
+    if isinstance(metadata_default, sa_schema.DefaultClause):
+        if isinstance(metadata_default.arg, str):
+            metadata_default = metadata_default.arg
+        else:
+            metadata_default = str(
+                metadata_default.arg.compile(
+                    dialect=autogen_context.dialect,
+                    compile_kwargs={"literal_binds": True},
+                )
+            )
+    if isinstance(metadata_default, str):
+        return metadata_default
+    else:
+        return None
+
+
+def _normalize_computed_default(sqltext: str) -> str:
+    """we want to warn if a computed sql expression has changed.  however
+    we don't want false positives and the warning is not that critical.
+    so filter out most forms of variability from the SQL text.
+
+    """
+
+    return re.sub(r"[ \(\)'\"`\[\]\t\r\n]", "", sqltext).lower()
+
+
+def _compare_computed_default(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: str,
+    cname: str,
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> None:
+    rendered_metadata_default = str(
+        cast(sa_schema.Computed, metadata_col.server_default).sqltext.compile(
+            dialect=autogen_context.dialect,
+            compile_kwargs={"literal_binds": True},
+        )
+    )
+
+    # since we cannot change computed columns, we do only a crude comparison
+    # here where we try to eliminate syntactical differences in order to
+    # get a minimal comparison just to emit a warning.
+
+    rendered_metadata_default = _normalize_computed_default(
+        rendered_metadata_default
+    )
+
+    if isinstance(conn_col.server_default, sa_schema.Computed):
+        rendered_conn_default = str(
+            conn_col.server_default.sqltext.compile(
+                dialect=autogen_context.dialect,
+                compile_kwargs={"literal_binds": True},
+            )
+        )
+        if rendered_conn_default is None:
+            rendered_conn_default = ""
+        else:
+            rendered_conn_default = _normalize_computed_default(
+                rendered_conn_default
+            )
+    else:
+        rendered_conn_default = ""
+
+    if rendered_metadata_default != rendered_conn_default:
+        _warn_computed_not_supported(tname, cname)
+
+
+def _warn_computed_not_supported(tname: str, cname: str) -> None:
+    util.warn("Computed default on %s.%s cannot be modified" % (tname, cname))
+
+
+def _compare_identity_default(
+    autogen_context,
+    alter_column_op,
+    schema,
+    tname,
+    cname,
+    conn_col,
+    metadata_col,
+):
+    impl = autogen_context.migration_context.impl
+    diff, ignored_attr, is_alter = impl._compare_identity_default(
+        metadata_col.server_default, conn_col.server_default
+    )
+
+    return diff, is_alter
+
+
+@comparators.dispatch_for("column")
+def _compare_server_default(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    cname: Union[quoted_name, str],
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> Optional[bool]:
+    metadata_default = metadata_col.server_default
+    conn_col_default = conn_col.server_default
+    if conn_col_default is None and metadata_default is None:
+        return False
+
+    if sqla_compat._server_default_is_computed(metadata_default):
+        return _compare_computed_default(  # type:ignore[func-returns-value]
+            autogen_context,
+            alter_column_op,
+            schema,
+            tname,
+            cname,
+            conn_col,
+            metadata_col,
+        )
+    if sqla_compat._server_default_is_computed(conn_col_default):
+        _warn_computed_not_supported(tname, cname)
+        return False
+
+    if sqla_compat._server_default_is_identity(
+        metadata_default, conn_col_default
+    ):
+        alter_column_op.existing_server_default = conn_col_default
+        diff, is_alter = _compare_identity_default(
+            autogen_context,
+            alter_column_op,
+            schema,
+            tname,
+            cname,
+            conn_col,
+            metadata_col,
+        )
+        if is_alter:
+            alter_column_op.modify_server_default = metadata_default
+            if diff:
+                log.info(
+                    "Detected server default on column '%s.%s': "
+                    "identity options attributes %s",
+                    tname,
+                    cname,
+                    sorted(diff),
+                )
+    else:
+        rendered_metadata_default = _render_server_default_for_compare(
+            metadata_default, autogen_context
+        )
+
+        rendered_conn_default = (
+            cast(Any, conn_col_default).arg.text if conn_col_default else None
+        )
+
+        alter_column_op.existing_server_default = conn_col_default
+
+        is_diff = autogen_context.migration_context._compare_server_default(
+            conn_col,
+            metadata_col,
+            rendered_metadata_default,
+            rendered_conn_default,
+        )
+        if is_diff:
+            alter_column_op.modify_server_default = metadata_default
+            log.info("Detected server default on column '%s.%s'", tname, cname)
+
+    return None
+
+
+@comparators.dispatch_for("column")
+def _compare_column_comment(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    cname: quoted_name,
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> Optional[Literal[False]]:
+    assert autogen_context.dialect is not None
+    if not autogen_context.dialect.supports_comments:
+        return None
+
+    metadata_comment = metadata_col.comment
+    conn_col_comment = conn_col.comment
+    if conn_col_comment is None and metadata_comment is None:
+        return False
+
+    alter_column_op.existing_comment = conn_col_comment
+
+    if conn_col_comment != metadata_comment:
+        alter_column_op.modify_comment = metadata_comment
+        log.info("Detected column comment '%s.%s'", tname, cname)
+
+    return None
+
+
+@comparators.dispatch_for("table")
+def _compare_foreign_keys(
+    autogen_context: AutogenContext,
+    modify_table_ops: ModifyTableOps,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    conn_table: Table,
+    metadata_table: Table,
+) -> None:
+    # if we're doing CREATE TABLE, all FKs are created
+    # inline within the table def
+    if conn_table is None or metadata_table is None:
+        return
+
+    inspector = autogen_context.inspector
+    metadata_fks = {
+        fk
+        for fk in metadata_table.constraints
+        if isinstance(fk, sa_schema.ForeignKeyConstraint)
+    }
+
+    conn_fks_list = [
+        fk
+        for fk in inspector.get_foreign_keys(tname, schema=schema)
+        if autogen_context.run_name_filters(
+            fk["name"],
+            "foreign_key_constraint",
+            {"table_name": tname, "schema_name": schema},
+        )
+    ]
+
+    conn_fks = {
+        _make_foreign_key(const, conn_table)  # type: ignore[arg-type]
+        for const in conn_fks_list
+    }
+
+    impl = autogen_context.migration_context.impl
+
+    # give the dialect a chance to correct the FKs to match more
+    # closely
+    autogen_context.migration_context.impl.correct_for_autogen_foreignkeys(
+        conn_fks, metadata_fks
+    )
+
+    metadata_fks_sig = {
+        impl._create_metadata_constraint_sig(fk) for fk in metadata_fks
+    }
+
+    conn_fks_sig = {
+        impl._create_reflected_constraint_sig(fk) for fk in conn_fks
+    }
+
+    # check if reflected FKs include options, indicating the backend
+    # can reflect FK options
+    if conn_fks_list and "options" in conn_fks_list[0]:
+        conn_fks_by_sig = {c.unnamed: c for c in conn_fks_sig}
+        metadata_fks_by_sig = {c.unnamed: c for c in metadata_fks_sig}
+    else:
+        # otherwise compare by sig without options added
+        conn_fks_by_sig = {c.unnamed_no_options: c for c in conn_fks_sig}
+        metadata_fks_by_sig = {
+            c.unnamed_no_options: c for c in metadata_fks_sig
+        }
+
+    metadata_fks_by_name = {
+        c.name: c for c in metadata_fks_sig if c.name is not None
+    }
+    conn_fks_by_name = {c.name: c for c in conn_fks_sig if c.name is not None}
+
+    def _add_fk(obj, compare_to):
+        if autogen_context.run_object_filters(
+            obj.const, obj.name, "foreign_key_constraint", False, compare_to
+        ):
+            modify_table_ops.ops.append(
+                ops.CreateForeignKeyOp.from_constraint(const.const)  # type: ignore[has-type]  # noqa: E501
+            )
+
+            log.info(
+                "Detected added foreign key (%s)(%s) on table %s%s",
+                ", ".join(obj.source_columns),
+                ", ".join(obj.target_columns),
+                "%s." % obj.source_schema if obj.source_schema else "",
+                obj.source_table,
+            )
+
+    def _remove_fk(obj, compare_to):
+        if autogen_context.run_object_filters(
+            obj.const, obj.name, "foreign_key_constraint", True, compare_to
+        ):
+            modify_table_ops.ops.append(
+                ops.DropConstraintOp.from_constraint(obj.const)
+            )
+            log.info(
+                "Detected removed foreign key (%s)(%s) on table %s%s",
+                ", ".join(obj.source_columns),
+                ", ".join(obj.target_columns),
+                "%s." % obj.source_schema if obj.source_schema else "",
+                obj.source_table,
+            )
+
+    # so far it appears we don't need to do this by name at all.
+    # SQLite doesn't preserve constraint names anyway
+
+    for removed_sig in set(conn_fks_by_sig).difference(metadata_fks_by_sig):
+        const = conn_fks_by_sig[removed_sig]
+        if removed_sig not in metadata_fks_by_sig:
+            compare_to = (
+                metadata_fks_by_name[const.name].const
+                if const.name in metadata_fks_by_name
+                else None
+            )
+            _remove_fk(const, compare_to)
+
+    for added_sig in set(metadata_fks_by_sig).difference(conn_fks_by_sig):
+        const = metadata_fks_by_sig[added_sig]
+        if added_sig not in conn_fks_by_sig:
+            compare_to = (
+                conn_fks_by_name[const.name].const
+                if const.name in conn_fks_by_name
+                else None
+            )
+            _add_fk(const, compare_to)
+
+
+@comparators.dispatch_for("table")
+def _compare_table_comment(
+    autogen_context: AutogenContext,
+    modify_table_ops: ModifyTableOps,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    conn_table: Optional[Table],
+    metadata_table: Optional[Table],
+) -> None:
+    assert autogen_context.dialect is not None
+    if not autogen_context.dialect.supports_comments:
+        return
+
+    # if we're doing CREATE TABLE, comments will be created inline
+    # with the create_table op.
+    if conn_table is None or metadata_table is None:
+        return
+
+    if conn_table.comment is None and metadata_table.comment is None:
+        return
+
+    if metadata_table.comment is None and conn_table.comment is not None:
+        modify_table_ops.ops.append(
+            ops.DropTableCommentOp(
+                tname, existing_comment=conn_table.comment, schema=schema
+            )
+        )
+    elif metadata_table.comment != conn_table.comment:
+        modify_table_ops.ops.append(
+            ops.CreateTableCommentOp(
+                tname,
+                metadata_table.comment,
+                existing_comment=conn_table.comment,
+                schema=schema,
+            )
+        )
diff --git a/.venv/lib/python3.12/site-packages/alembic/autogenerate/render.py b/.venv/lib/python3.12/site-packages/alembic/autogenerate/render.py
new file mode 100644
index 00000000..50c51fa9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/autogenerate/render.py
@@ -0,0 +1,1125 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+from io import StringIO
+import re
+from typing import Any
+from typing import cast
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from mako.pygen import PythonPrinter
+from sqlalchemy import schema as sa_schema
+from sqlalchemy import sql
+from sqlalchemy import types as sqltypes
+from sqlalchemy.sql.elements import conv
+from sqlalchemy.sql.elements import Label
+from sqlalchemy.sql.elements import quoted_name
+
+from .. import util
+from ..operations import ops
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy import Computed
+    from sqlalchemy import Identity
+    from sqlalchemy.sql.base import DialectKWArgs
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.schema import CheckConstraint
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.schema import FetchedValue
+    from sqlalchemy.sql.schema import ForeignKey
+    from sqlalchemy.sql.schema import ForeignKeyConstraint
+    from sqlalchemy.sql.schema import Index
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import PrimaryKeyConstraint
+    from sqlalchemy.sql.schema import UniqueConstraint
+    from sqlalchemy.sql.sqltypes import ARRAY
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from alembic.autogenerate.api import AutogenContext
+    from alembic.config import Config
+    from alembic.operations.ops import MigrationScript
+    from alembic.operations.ops import ModifyTableOps
+
+
+MAX_PYTHON_ARGS = 255
+
+
+def _render_gen_name(
+    autogen_context: AutogenContext,
+    name: sqla_compat._ConstraintName,
+) -> Optional[Union[quoted_name, str, _f_name]]:
+    if isinstance(name, conv):
+        return _f_name(_alembic_autogenerate_prefix(autogen_context), name)
+    else:
+        return sqla_compat.constraint_name_or_none(name)
+
+
+def _indent(text: str) -> str:
+    text = re.compile(r"^", re.M).sub("    ", text).strip()
+    text = re.compile(r" +$", re.M).sub("", text)
+    return text
+
+
+def _render_python_into_templatevars(
+    autogen_context: AutogenContext,
+    migration_script: MigrationScript,
+    template_args: Dict[str, Union[str, Config]],
+) -> None:
+    imports = autogen_context.imports
+
+    for upgrade_ops, downgrade_ops in zip(
+        migration_script.upgrade_ops_list, migration_script.downgrade_ops_list
+    ):
+        template_args[upgrade_ops.upgrade_token] = _indent(
+            _render_cmd_body(upgrade_ops, autogen_context)
+        )
+        template_args[downgrade_ops.downgrade_token] = _indent(
+            _render_cmd_body(downgrade_ops, autogen_context)
+        )
+    template_args["imports"] = "\n".join(sorted(imports))
+
+
+default_renderers = renderers = util.Dispatcher()
+
+
+def _render_cmd_body(
+    op_container: ops.OpContainer,
+    autogen_context: AutogenContext,
+) -> str:
+    buf = StringIO()
+    printer = PythonPrinter(buf)
+
+    printer.writeline(
+        "# ### commands auto generated by Alembic - please adjust! ###"
+    )
+
+    has_lines = False
+    for op in op_container.ops:
+        lines = render_op(autogen_context, op)
+        has_lines = has_lines or bool(lines)
+
+        for line in lines:
+            printer.writeline(line)
+
+    if not has_lines:
+        printer.writeline("pass")
+
+    printer.writeline("# ### end Alembic commands ###")
+
+    return buf.getvalue()
+
+
+def render_op(
+    autogen_context: AutogenContext, op: ops.MigrateOperation
+) -> List[str]:
+    renderer = renderers.dispatch(op)
+    lines = util.to_list(renderer(autogen_context, op))
+    return lines
+
+
+def render_op_text(
+    autogen_context: AutogenContext, op: ops.MigrateOperation
+) -> str:
+    return "\n".join(render_op(autogen_context, op))
+
+
+@renderers.dispatch_for(ops.ModifyTableOps)
+def _render_modify_table(
+    autogen_context: AutogenContext, op: ModifyTableOps
+) -> List[str]:
+    opts = autogen_context.opts
+    render_as_batch = opts.get("render_as_batch", False)
+
+    if op.ops:
+        lines = []
+        if render_as_batch:
+            with autogen_context._within_batch():
+                lines.append(
+                    "with op.batch_alter_table(%r, schema=%r) as batch_op:"
+                    % (op.table_name, op.schema)
+                )
+                for t_op in op.ops:
+                    t_lines = render_op(autogen_context, t_op)
+                    lines.extend(t_lines)
+                lines.append("")
+        else:
+            for t_op in op.ops:
+                t_lines = render_op(autogen_context, t_op)
+                lines.extend(t_lines)
+
+        return lines
+    else:
+        return []
+
+
+@renderers.dispatch_for(ops.CreateTableCommentOp)
+def _render_create_table_comment(
+    autogen_context: AutogenContext, op: ops.CreateTableCommentOp
+) -> str:
+    if autogen_context._has_batch:
+        templ = (
+            "{prefix}create_table_comment(\n"
+            "{indent}{comment},\n"
+            "{indent}existing_comment={existing}\n"
+            ")"
+        )
+    else:
+        templ = (
+            "{prefix}create_table_comment(\n"
+            "{indent}'{tname}',\n"
+            "{indent}{comment},\n"
+            "{indent}existing_comment={existing},\n"
+            "{indent}schema={schema}\n"
+            ")"
+        )
+    return templ.format(
+        prefix=_alembic_autogenerate_prefix(autogen_context),
+        tname=op.table_name,
+        comment="%r" % op.comment if op.comment is not None else None,
+        existing=(
+            "%r" % op.existing_comment
+            if op.existing_comment is not None
+            else None
+        ),
+        schema="'%s'" % op.schema if op.schema is not None else None,
+        indent="    ",
+    )
+
+
+@renderers.dispatch_for(ops.DropTableCommentOp)
+def _render_drop_table_comment(
+    autogen_context: AutogenContext, op: ops.DropTableCommentOp
+) -> str:
+    if autogen_context._has_batch:
+        templ = (
+            "{prefix}drop_table_comment(\n"
+            "{indent}existing_comment={existing}\n"
+            ")"
+        )
+    else:
+        templ = (
+            "{prefix}drop_table_comment(\n"
+            "{indent}'{tname}',\n"
+            "{indent}existing_comment={existing},\n"
+            "{indent}schema={schema}\n"
+            ")"
+        )
+    return templ.format(
+        prefix=_alembic_autogenerate_prefix(autogen_context),
+        tname=op.table_name,
+        existing=(
+            "%r" % op.existing_comment
+            if op.existing_comment is not None
+            else None
+        ),
+        schema="'%s'" % op.schema if op.schema is not None else None,
+        indent="    ",
+    )
+
+
+@renderers.dispatch_for(ops.CreateTableOp)
+def _add_table(autogen_context: AutogenContext, op: ops.CreateTableOp) -> str:
+    table = op.to_table()
+
+    args = [
+        col
+        for col in [
+            _render_column(col, autogen_context) for col in table.columns
+        ]
+        if col
+    ] + sorted(
+        [
+            rcons
+            for rcons in [
+                _render_constraint(
+                    cons, autogen_context, op._namespace_metadata
+                )
+                for cons in table.constraints
+            ]
+            if rcons is not None
+        ]
+    )
+
+    if len(args) > MAX_PYTHON_ARGS:
+        args_str = "*[" + ",\n".join(args) + "]"
+    else:
+        args_str = ",\n".join(args)
+
+    text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % {
+        "tablename": _ident(op.table_name),
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "args": args_str,
+    }
+    if op.schema:
+        text += ",\nschema=%r" % _ident(op.schema)
+
+    comment = table.comment
+    if comment:
+        text += ",\ncomment=%r" % _ident(comment)
+
+    info = table.info
+    if info:
+        text += f",\ninfo={info!r}"
+
+    for k in sorted(op.kw):
+        text += ",\n%s=%r" % (k.replace(" ", "_"), op.kw[k])
+
+    if table._prefixes:
+        prefixes = ", ".join("'%s'" % p for p in table._prefixes)
+        text += ",\nprefixes=[%s]" % prefixes
+
+    if op.if_not_exists is not None:
+        text += ",\nif_not_exists=%r" % bool(op.if_not_exists)
+
+    text += "\n)"
+    return text
+
+
+@renderers.dispatch_for(ops.DropTableOp)
+def _drop_table(autogen_context: AutogenContext, op: ops.DropTableOp) -> str:
+    text = "%(prefix)sdrop_table(%(tname)r" % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "tname": _ident(op.table_name),
+    }
+    if op.schema:
+        text += ", schema=%r" % _ident(op.schema)
+
+    if op.if_exists is not None:
+        text += ", if_exists=%r" % bool(op.if_exists)
+
+    text += ")"
+    return text
+
+
+def _render_dialect_kwargs_items(
+    autogen_context: AutogenContext, item: DialectKWArgs
+) -> list[str]:
+    return [
+        f"{key}={_render_potential_expr(val, autogen_context)}"
+        for key, val in item.dialect_kwargs.items()
+    ]
+
+
+@renderers.dispatch_for(ops.CreateIndexOp)
+def _add_index(autogen_context: AutogenContext, op: ops.CreateIndexOp) -> str:
+    index = op.to_index()
+
+    has_batch = autogen_context._has_batch
+
+    if has_batch:
+        tmpl = (
+            "%(prefix)screate_index(%(name)r, [%(columns)s], "
+            "unique=%(unique)r%(kwargs)s)"
+        )
+    else:
+        tmpl = (
+            "%(prefix)screate_index(%(name)r, %(table)r, [%(columns)s], "
+            "unique=%(unique)r%(schema)s%(kwargs)s)"
+        )
+
+    assert index.table is not None
+
+    opts = _render_dialect_kwargs_items(autogen_context, index)
+    if op.if_not_exists is not None:
+        opts.append("if_not_exists=%r" % bool(op.if_not_exists))
+    text = tmpl % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "name": _render_gen_name(autogen_context, index.name),
+        "table": _ident(index.table.name),
+        "columns": ", ".join(
+            _get_index_rendered_expressions(index, autogen_context)
+        ),
+        "unique": index.unique or False,
+        "schema": (
+            (", schema=%r" % _ident(index.table.schema))
+            if index.table.schema
+            else ""
+        ),
+        "kwargs": ", " + ", ".join(opts) if opts else "",
+    }
+    return text
+
+
+@renderers.dispatch_for(ops.DropIndexOp)
+def _drop_index(autogen_context: AutogenContext, op: ops.DropIndexOp) -> str:
+    index = op.to_index()
+
+    has_batch = autogen_context._has_batch
+
+    if has_batch:
+        tmpl = "%(prefix)sdrop_index(%(name)r%(kwargs)s)"
+    else:
+        tmpl = (
+            "%(prefix)sdrop_index(%(name)r, "
+            "table_name=%(table_name)r%(schema)s%(kwargs)s)"
+        )
+    opts = _render_dialect_kwargs_items(autogen_context, index)
+    if op.if_exists is not None:
+        opts.append("if_exists=%r" % bool(op.if_exists))
+    text = tmpl % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "name": _render_gen_name(autogen_context, op.index_name),
+        "table_name": _ident(op.table_name),
+        "schema": ((", schema=%r" % _ident(op.schema)) if op.schema else ""),
+        "kwargs": ", " + ", ".join(opts) if opts else "",
+    }
+    return text
+
+
+@renderers.dispatch_for(ops.CreateUniqueConstraintOp)
+def _add_unique_constraint(
+    autogen_context: AutogenContext, op: ops.CreateUniqueConstraintOp
+) -> List[str]:
+    return [_uq_constraint(op.to_constraint(), autogen_context, True)]
+
+
+@renderers.dispatch_for(ops.CreateForeignKeyOp)
+def _add_fk_constraint(
+    autogen_context: AutogenContext, op: ops.CreateForeignKeyOp
+) -> str:
+    args = [repr(_render_gen_name(autogen_context, op.constraint_name))]
+    if not autogen_context._has_batch:
+        args.append(repr(_ident(op.source_table)))
+
+    args.extend(
+        [
+            repr(_ident(op.referent_table)),
+            repr([_ident(col) for col in op.local_cols]),
+            repr([_ident(col) for col in op.remote_cols]),
+        ]
+    )
+    kwargs = [
+        "referent_schema",
+        "onupdate",
+        "ondelete",
+        "initially",
+        "deferrable",
+        "use_alter",
+        "match",
+    ]
+    if not autogen_context._has_batch:
+        kwargs.insert(0, "source_schema")
+
+    for k in kwargs:
+        if k in op.kw:
+            value = op.kw[k]
+            if value is not None:
+                args.append("%s=%r" % (k, value))
+
+    return "%(prefix)screate_foreign_key(%(args)s)" % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "args": ", ".join(args),
+    }
+
+
+@renderers.dispatch_for(ops.CreatePrimaryKeyOp)
+def _add_pk_constraint(constraint, autogen_context):
+    raise NotImplementedError()
+
+
+@renderers.dispatch_for(ops.CreateCheckConstraintOp)
+def _add_check_constraint(constraint, autogen_context):
+    raise NotImplementedError()
+
+
+@renderers.dispatch_for(ops.DropConstraintOp)
+def _drop_constraint(
+    autogen_context: AutogenContext, op: ops.DropConstraintOp
+) -> str:
+    prefix = _alembic_autogenerate_prefix(autogen_context)
+    name = _render_gen_name(autogen_context, op.constraint_name)
+    schema = _ident(op.schema) if op.schema else None
+    type_ = _ident(op.constraint_type) if op.constraint_type else None
+
+    params_strs = []
+    params_strs.append(repr(name))
+    if not autogen_context._has_batch:
+        params_strs.append(repr(_ident(op.table_name)))
+        if schema is not None:
+            params_strs.append(f"schema={schema!r}")
+    if type_ is not None:
+        params_strs.append(f"type_={type_!r}")
+
+    return f"{prefix}drop_constraint({', '.join(params_strs)})"
+
+
+@renderers.dispatch_for(ops.AddColumnOp)
+def _add_column(autogen_context: AutogenContext, op: ops.AddColumnOp) -> str:
+    schema, tname, column = op.schema, op.table_name, op.column
+    if autogen_context._has_batch:
+        template = "%(prefix)sadd_column(%(column)s)"
+    else:
+        template = "%(prefix)sadd_column(%(tname)r, %(column)s"
+        if schema:
+            template += ", schema=%(schema)r"
+        template += ")"
+    text = template % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "tname": tname,
+        "column": _render_column(column, autogen_context),
+        "schema": schema,
+    }
+    return text
+
+
+@renderers.dispatch_for(ops.DropColumnOp)
+def _drop_column(autogen_context: AutogenContext, op: ops.DropColumnOp) -> str:
+    schema, tname, column_name = op.schema, op.table_name, op.column_name
+
+    if autogen_context._has_batch:
+        template = "%(prefix)sdrop_column(%(cname)r)"
+    else:
+        template = "%(prefix)sdrop_column(%(tname)r, %(cname)r"
+        if schema:
+            template += ", schema=%(schema)r"
+        template += ")"
+
+    text = template % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "tname": _ident(tname),
+        "cname": _ident(column_name),
+        "schema": _ident(schema),
+    }
+    return text
+
+
+@renderers.dispatch_for(ops.AlterColumnOp)
+def _alter_column(
+    autogen_context: AutogenContext, op: ops.AlterColumnOp
+) -> str:
+    tname = op.table_name
+    cname = op.column_name
+    server_default = op.modify_server_default
+    type_ = op.modify_type
+    nullable = op.modify_nullable
+    comment = op.modify_comment
+    autoincrement = op.kw.get("autoincrement", None)
+    existing_type = op.existing_type
+    existing_nullable = op.existing_nullable
+    existing_comment = op.existing_comment
+    existing_server_default = op.existing_server_default
+    schema = op.schema
+
+    indent = " " * 11
+
+    if autogen_context._has_batch:
+        template = "%(prefix)salter_column(%(cname)r"
+    else:
+        template = "%(prefix)salter_column(%(tname)r, %(cname)r"
+
+    text = template % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "tname": tname,
+        "cname": cname,
+    }
+    if existing_type is not None:
+        text += ",\n%sexisting_type=%s" % (
+            indent,
+            _repr_type(existing_type, autogen_context),
+        )
+    if server_default is not False:
+        rendered = _render_server_default(server_default, autogen_context)
+        text += ",\n%sserver_default=%s" % (indent, rendered)
+
+    if type_ is not None:
+        text += ",\n%stype_=%s" % (indent, _repr_type(type_, autogen_context))
+    if nullable is not None:
+        text += ",\n%snullable=%r" % (indent, nullable)
+    if comment is not False:
+        text += ",\n%scomment=%r" % (indent, comment)
+    if existing_comment is not None:
+        text += ",\n%sexisting_comment=%r" % (indent, existing_comment)
+    if nullable is None and existing_nullable is not None:
+        text += ",\n%sexisting_nullable=%r" % (indent, existing_nullable)
+    if autoincrement is not None:
+        text += ",\n%sautoincrement=%r" % (indent, autoincrement)
+    if server_default is False and existing_server_default:
+        rendered = _render_server_default(
+            existing_server_default, autogen_context
+        )
+        text += ",\n%sexisting_server_default=%s" % (indent, rendered)
+    if schema and not autogen_context._has_batch:
+        text += ",\n%sschema=%r" % (indent, schema)
+    text += ")"
+    return text
+
+
+class _f_name:
+    def __init__(self, prefix: str, name: conv) -> None:
+        self.prefix = prefix
+        self.name = name
+
+    def __repr__(self) -> str:
+        return "%sf(%r)" % (self.prefix, _ident(self.name))
+
+
+def _ident(name: Optional[Union[quoted_name, str]]) -> Optional[str]:
+    """produce a __repr__() object for a string identifier that may
+    use quoted_name() in SQLAlchemy 0.9 and greater.
+
+    The issue worked around here is that quoted_name() doesn't have
+    very good repr() behavior by itself when unicode is involved.
+
+    """
+    if name is None:
+        return name
+    elif isinstance(name, quoted_name):
+        return str(name)
+    elif isinstance(name, str):
+        return name
+
+
+def _render_potential_expr(
+    value: Any,
+    autogen_context: AutogenContext,
+    *,
+    wrap_in_element: bool = True,
+    is_server_default: bool = False,
+    is_index: bool = False,
+) -> str:
+    if isinstance(value, sql.ClauseElement):
+        sql_text = autogen_context.migration_context.impl.render_ddl_sql_expr(
+            value, is_server_default=is_server_default, is_index=is_index
+        )
+        if wrap_in_element:
+            prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
+            element = "literal_column" if is_index else "text"
+            value_str = f"{prefix}{element}({sql_text!r})"
+            if (
+                is_index
+                and isinstance(value, Label)
+                and type(value.name) is str
+            ):
+                return value_str + f".label({value.name!r})"
+            else:
+                return value_str
+        else:
+            return repr(sql_text)
+    else:
+        return repr(value)
+
+
+def _get_index_rendered_expressions(
+    idx: Index, autogen_context: AutogenContext
+) -> List[str]:
+    return [
+        (
+            repr(_ident(getattr(exp, "name", None)))
+            if isinstance(exp, sa_schema.Column)
+            else _render_potential_expr(exp, autogen_context, is_index=True)
+        )
+        for exp in idx.expressions
+    ]
+
+
+def _uq_constraint(
+    constraint: UniqueConstraint,
+    autogen_context: AutogenContext,
+    alter: bool,
+) -> str:
+    opts: List[Tuple[str, Any]] = []
+
+    has_batch = autogen_context._has_batch
+
+    if constraint.deferrable:
+        opts.append(("deferrable", constraint.deferrable))
+    if constraint.initially:
+        opts.append(("initially", constraint.initially))
+    if not has_batch and alter and constraint.table.schema:
+        opts.append(("schema", _ident(constraint.table.schema)))
+    if not alter and constraint.name:
+        opts.append(
+            ("name", _render_gen_name(autogen_context, constraint.name))
+        )
+    dialect_options = _render_dialect_kwargs_items(autogen_context, constraint)
+
+    if alter:
+        args = [repr(_render_gen_name(autogen_context, constraint.name))]
+        if not has_batch:
+            args += [repr(_ident(constraint.table.name))]
+        args.append(repr([_ident(col.name) for col in constraint.columns]))
+        args.extend(["%s=%r" % (k, v) for k, v in opts])
+        args.extend(dialect_options)
+        return "%(prefix)screate_unique_constraint(%(args)s)" % {
+            "prefix": _alembic_autogenerate_prefix(autogen_context),
+            "args": ", ".join(args),
+        }
+    else:
+        args = [repr(_ident(col.name)) for col in constraint.columns]
+        args.extend(["%s=%r" % (k, v) for k, v in opts])
+        args.extend(dialect_options)
+        return "%(prefix)sUniqueConstraint(%(args)s)" % {
+            "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+            "args": ", ".join(args),
+        }
+
+
+def _user_autogenerate_prefix(autogen_context, target):
+    prefix = autogen_context.opts["user_module_prefix"]
+    if prefix is None:
+        return "%s." % target.__module__
+    else:
+        return prefix
+
+
+def _sqlalchemy_autogenerate_prefix(autogen_context: AutogenContext) -> str:
+    return autogen_context.opts["sqlalchemy_module_prefix"] or ""
+
+
+def _alembic_autogenerate_prefix(autogen_context: AutogenContext) -> str:
+    if autogen_context._has_batch:
+        return "batch_op."
+    else:
+        return autogen_context.opts["alembic_module_prefix"] or ""
+
+
+def _user_defined_render(
+    type_: str, object_: Any, autogen_context: AutogenContext
+) -> Union[str, Literal[False]]:
+    if "render_item" in autogen_context.opts:
+        render = autogen_context.opts["render_item"]
+        if render:
+            rendered = render(type_, object_, autogen_context)
+            if rendered is not False:
+                return rendered
+    return False
+
+
+def _render_column(
+    column: Column[Any], autogen_context: AutogenContext
+) -> str:
+    rendered = _user_defined_render("column", column, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    args: List[str] = []
+    opts: List[Tuple[str, Any]] = []
+
+    if column.server_default:
+        rendered = _render_server_default(  # type:ignore[assignment]
+            column.server_default, autogen_context
+        )
+        if rendered:
+            if _should_render_server_default_positionally(
+                column.server_default
+            ):
+                args.append(rendered)
+            else:
+                opts.append(("server_default", rendered))
+
+    if (
+        column.autoincrement is not None
+        and column.autoincrement != sqla_compat.AUTOINCREMENT_DEFAULT
+    ):
+        opts.append(("autoincrement", column.autoincrement))
+
+    if column.nullable is not None:
+        opts.append(("nullable", column.nullable))
+
+    if column.system:
+        opts.append(("system", column.system))
+
+    comment = column.comment
+    if comment:
+        opts.append(("comment", "%r" % comment))
+
+    # TODO: for non-ascii colname, assign a "key"
+    return "%(prefix)sColumn(%(name)r, %(type)s, %(args)s%(kwargs)s)" % {
+        "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+        "name": _ident(column.name),
+        "type": _repr_type(column.type, autogen_context),
+        "args": ", ".join([str(arg) for arg in args]) + ", " if args else "",
+        "kwargs": (
+            ", ".join(
+                ["%s=%s" % (kwname, val) for kwname, val in opts]
+                + [
+                    "%s=%s"
+                    % (key, _render_potential_expr(val, autogen_context))
+                    for key, val in column.kwargs.items()
+                ]
+            )
+        ),
+    }
+
+
+def _should_render_server_default_positionally(server_default: Any) -> bool:
+    return sqla_compat._server_default_is_computed(
+        server_default
+    ) or sqla_compat._server_default_is_identity(server_default)
+
+
+def _render_server_default(
+    default: Optional[
+        Union[FetchedValue, str, TextClause, ColumnElement[Any]]
+    ],
+    autogen_context: AutogenContext,
+    repr_: bool = True,
+) -> Optional[str]:
+    rendered = _user_defined_render("server_default", default, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    if sqla_compat._server_default_is_computed(default):
+        return _render_computed(cast("Computed", default), autogen_context)
+    elif sqla_compat._server_default_is_identity(default):
+        return _render_identity(cast("Identity", default), autogen_context)
+    elif isinstance(default, sa_schema.DefaultClause):
+        if isinstance(default.arg, str):
+            default = default.arg
+        else:
+            return _render_potential_expr(
+                default.arg, autogen_context, is_server_default=True
+            )
+
+    if isinstance(default, str) and repr_:
+        default = repr(re.sub(r"^'|'$", "", default))
+
+    return cast(str, default)
+
+
+def _render_computed(
+    computed: Computed, autogen_context: AutogenContext
+) -> str:
+    text = _render_potential_expr(
+        computed.sqltext, autogen_context, wrap_in_element=False
+    )
+
+    kwargs = {}
+    if computed.persisted is not None:
+        kwargs["persisted"] = computed.persisted
+    return "%(prefix)sComputed(%(text)s, %(kwargs)s)" % {
+        "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+        "text": text,
+        "kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())),
+    }
+
+
+def _render_identity(
+    identity: Identity, autogen_context: AutogenContext
+) -> str:
+    kwargs = sqla_compat._get_identity_options_dict(
+        identity, dialect_kwargs=True
+    )
+
+    return "%(prefix)sIdentity(%(kwargs)s)" % {
+        "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+        "kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())),
+    }
+
+
+def _repr_type(
+    type_: TypeEngine,
+    autogen_context: AutogenContext,
+    _skip_variants: bool = False,
+) -> str:
+    rendered = _user_defined_render("type", type_, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    if hasattr(autogen_context.migration_context, "impl"):
+        impl_rt = autogen_context.migration_context.impl.render_type(
+            type_, autogen_context
+        )
+    else:
+        impl_rt = None
+
+    mod = type(type_).__module__
+    imports = autogen_context.imports
+
+    if not _skip_variants and sqla_compat._type_has_variants(type_):
+        return _render_Variant_type(type_, autogen_context)
+    elif mod.startswith("sqlalchemy.dialects"):
+        match = re.match(r"sqlalchemy\.dialects\.(\w+)", mod)
+        assert match is not None
+        dname = match.group(1)
+        if imports is not None:
+            imports.add("from sqlalchemy.dialects import %s" % dname)
+        if impl_rt:
+            return impl_rt
+        else:
+            return "%s.%r" % (dname, type_)
+    elif impl_rt:
+        return impl_rt
+    elif mod.startswith("sqlalchemy."):
+        if "_render_%s_type" % type_.__visit_name__ in globals():
+            fn = globals()["_render_%s_type" % type_.__visit_name__]
+            return fn(type_, autogen_context)
+        else:
+            prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
+            return "%s%r" % (prefix, type_)
+    else:
+        prefix = _user_autogenerate_prefix(autogen_context, type_)
+        return "%s%r" % (prefix, type_)
+
+
+def _render_ARRAY_type(type_: ARRAY, autogen_context: AutogenContext) -> str:
+    return cast(
+        str,
+        _render_type_w_subtype(
+            type_, autogen_context, "item_type", r"(.+?\()"
+        ),
+    )
+
+
+def _render_Variant_type(
+    type_: TypeEngine, autogen_context: AutogenContext
+) -> str:
+    base_type, variant_mapping = sqla_compat._get_variant_mapping(type_)
+    base = _repr_type(base_type, autogen_context, _skip_variants=True)
+    assert base is not None and base is not False  # type: ignore[comparison-overlap]  # noqa:E501
+    for dialect in sorted(variant_mapping):
+        typ = variant_mapping[dialect]
+        base += ".with_variant(%s, %r)" % (
+            _repr_type(typ, autogen_context, _skip_variants=True),
+            dialect,
+        )
+    return base
+
+
+def _render_type_w_subtype(
+    type_: TypeEngine,
+    autogen_context: AutogenContext,
+    attrname: str,
+    regexp: str,
+    prefix: Optional[str] = None,
+) -> Union[Optional[str], Literal[False]]:
+    outer_repr = repr(type_)
+    inner_type = getattr(type_, attrname, None)
+    if inner_type is None:
+        return False
+
+    inner_repr = repr(inner_type)
+
+    inner_repr = re.sub(r"([\(\)])", r"\\\1", inner_repr)
+    sub_type = _repr_type(getattr(type_, attrname), autogen_context)
+    outer_type = re.sub(regexp + inner_repr, r"\1%s" % sub_type, outer_repr)
+
+    if prefix:
+        return "%s%s" % (prefix, outer_type)
+
+    mod = type(type_).__module__
+    if mod.startswith("sqlalchemy.dialects"):
+        match = re.match(r"sqlalchemy\.dialects\.(\w+)", mod)
+        assert match is not None
+        dname = match.group(1)
+        return "%s.%s" % (dname, outer_type)
+    elif mod.startswith("sqlalchemy"):
+        prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
+        return "%s%s" % (prefix, outer_type)
+    else:
+        return None
+
+
+_constraint_renderers = util.Dispatcher()
+
+
+def _render_constraint(
+    constraint: Constraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: Optional[MetaData],
+) -> Optional[str]:
+    try:
+        renderer = _constraint_renderers.dispatch(constraint)
+    except ValueError:
+        util.warn("No renderer is established for object %r" % constraint)
+        return "[Unknown Python object %r]" % constraint
+    else:
+        return renderer(constraint, autogen_context, namespace_metadata)
+
+
+@_constraint_renderers.dispatch_for(sa_schema.PrimaryKeyConstraint)
+def _render_primary_key(
+    constraint: PrimaryKeyConstraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: Optional[MetaData],
+) -> Optional[str]:
+    rendered = _user_defined_render("primary_key", constraint, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    if not constraint.columns:
+        return None
+
+    opts = []
+    if constraint.name:
+        opts.append(
+            ("name", repr(_render_gen_name(autogen_context, constraint.name)))
+        )
+    return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % {
+        "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+        "args": ", ".join(
+            [repr(c.name) for c in constraint.columns]
+            + ["%s=%s" % (kwname, val) for kwname, val in opts]
+        ),
+    }
+
+
+def _fk_colspec(
+    fk: ForeignKey,
+    metadata_schema: Optional[str],
+    namespace_metadata: MetaData,
+) -> str:
+    """Implement a 'safe' version of ForeignKey._get_colspec() that
+    won't fail if the remote table can't be resolved.
+
+    """
+    colspec = fk._get_colspec()
+    tokens = colspec.split(".")
+    tname, colname = tokens[-2:]
+
+    if metadata_schema is not None and len(tokens) == 2:
+        table_fullname = "%s.%s" % (metadata_schema, tname)
+    else:
+        table_fullname = ".".join(tokens[0:-1])
+
+    if (
+        not fk.link_to_name
+        and fk.parent is not None
+        and fk.parent.table is not None
+    ):
+        # try to resolve the remote table in order to adjust for column.key.
+        # the FK constraint needs to be rendered in terms of the column
+        # name.
+
+        if table_fullname in namespace_metadata.tables:
+            col = namespace_metadata.tables[table_fullname].c.get(colname)
+            if col is not None:
+                colname = _ident(col.name)  # type: ignore[assignment]
+
+    colspec = "%s.%s" % (table_fullname, colname)
+
+    return colspec
+
+
+def _populate_render_fk_opts(
+    constraint: ForeignKeyConstraint, opts: List[Tuple[str, str]]
+) -> None:
+    if constraint.onupdate:
+        opts.append(("onupdate", repr(constraint.onupdate)))
+    if constraint.ondelete:
+        opts.append(("ondelete", repr(constraint.ondelete)))
+    if constraint.initially:
+        opts.append(("initially", repr(constraint.initially)))
+    if constraint.deferrable:
+        opts.append(("deferrable", repr(constraint.deferrable)))
+    if constraint.use_alter:
+        opts.append(("use_alter", repr(constraint.use_alter)))
+    if constraint.match:
+        opts.append(("match", repr(constraint.match)))
+
+
+@_constraint_renderers.dispatch_for(sa_schema.ForeignKeyConstraint)
+def _render_foreign_key(
+    constraint: ForeignKeyConstraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: MetaData,
+) -> Optional[str]:
+    rendered = _user_defined_render("foreign_key", constraint, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    opts = []
+    if constraint.name:
+        opts.append(
+            ("name", repr(_render_gen_name(autogen_context, constraint.name)))
+        )
+
+    _populate_render_fk_opts(constraint, opts)
+
+    apply_metadata_schema = namespace_metadata.schema
+    return (
+        "%(prefix)sForeignKeyConstraint([%(cols)s], "
+        "[%(refcols)s], %(args)s)"
+        % {
+            "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+            "cols": ", ".join(
+                repr(_ident(f.parent.name)) for f in constraint.elements
+            ),
+            "refcols": ", ".join(
+                repr(_fk_colspec(f, apply_metadata_schema, namespace_metadata))
+                for f in constraint.elements
+            ),
+            "args": ", ".join(
+                ["%s=%s" % (kwname, val) for kwname, val in opts]
+            ),
+        }
+    )
+
+
+@_constraint_renderers.dispatch_for(sa_schema.UniqueConstraint)
+def _render_unique_constraint(
+    constraint: UniqueConstraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: Optional[MetaData],
+) -> str:
+    rendered = _user_defined_render("unique", constraint, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    return _uq_constraint(constraint, autogen_context, False)
+
+
+@_constraint_renderers.dispatch_for(sa_schema.CheckConstraint)
+def _render_check_constraint(
+    constraint: CheckConstraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: Optional[MetaData],
+) -> Optional[str]:
+    rendered = _user_defined_render("check", constraint, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    # detect the constraint being part of
+    # a parent type which is probably in the Table already.
+    # ideally SQLAlchemy would give us more of a first class
+    # way to detect this.
+    if (
+        constraint._create_rule
+        and hasattr(constraint._create_rule, "target")
+        and isinstance(
+            constraint._create_rule.target,
+            sqltypes.TypeEngine,
+        )
+    ):
+        return None
+    opts = []
+    if constraint.name:
+        opts.append(
+            ("name", repr(_render_gen_name(autogen_context, constraint.name)))
+        )
+    return "%(prefix)sCheckConstraint(%(sqltext)s%(opts)s)" % {
+        "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+        "opts": (
+            ", " + (", ".join("%s=%s" % (k, v) for k, v in opts))
+            if opts
+            else ""
+        ),
+        "sqltext": _render_potential_expr(
+            constraint.sqltext, autogen_context, wrap_in_element=False
+        ),
+    }
+
+
+@renderers.dispatch_for(ops.ExecuteSQLOp)
+def _execute_sql(autogen_context: AutogenContext, op: ops.ExecuteSQLOp) -> str:
+    if not isinstance(op.sqltext, str):
+        raise NotImplementedError(
+            "Autogenerate rendering of SQL Expression language constructs "
+            "not supported here; please use a plain SQL string"
+        )
+    return "op.execute(%r)" % op.sqltext
+
+
+renderers = default_renderers.branch()
diff --git a/.venv/lib/python3.12/site-packages/alembic/autogenerate/rewriter.py b/.venv/lib/python3.12/site-packages/alembic/autogenerate/rewriter.py
new file mode 100644
index 00000000..8994dcf8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/autogenerate/rewriter.py
@@ -0,0 +1,240 @@
+from __future__ import annotations
+
+from typing import Any
+from typing import Callable
+from typing import Iterator
+from typing import List
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import Union
+
+from .. import util
+from ..operations import ops
+
+if TYPE_CHECKING:
+    from ..operations.ops import AddColumnOp
+    from ..operations.ops import AlterColumnOp
+    from ..operations.ops import CreateTableOp
+    from ..operations.ops import DowngradeOps
+    from ..operations.ops import MigrateOperation
+    from ..operations.ops import MigrationScript
+    from ..operations.ops import ModifyTableOps
+    from ..operations.ops import OpContainer
+    from ..operations.ops import UpgradeOps
+    from ..runtime.migration import MigrationContext
+    from ..script.revision import _GetRevArg
+
+ProcessRevisionDirectiveFn = Callable[
+    ["MigrationContext", "_GetRevArg", List["MigrationScript"]], None
+]
+
+
+class Rewriter:
+    """A helper object that allows easy 'rewriting' of ops streams.
+
+    The :class:`.Rewriter` object is intended to be passed along
+    to the
+    :paramref:`.EnvironmentContext.configure.process_revision_directives`
+    parameter in an ``env.py`` script.    Once constructed, any number
+    of "rewrites" functions can be associated with it, which will be given
+    the opportunity to modify the structure without having to have explicit
+    knowledge of the overall structure.
+
+    The function is passed the :class:`.MigrationContext` object and
+    ``revision`` tuple that are passed to the  :paramref:`.Environment
+    Context.configure.process_revision_directives` function normally,
+    and the third argument is an individual directive of the type
+    noted in the decorator.  The function has the choice of  returning
+    a single op directive, which normally can be the directive that
+    was actually passed, or a new directive to replace it, or a list
+    of zero or more directives to replace it.
+
+    .. seealso::
+
+        :ref:`autogen_rewriter` - usage example
+
+    """
+
+    _traverse = util.Dispatcher()
+
+    _chained: Tuple[Union[ProcessRevisionDirectiveFn, Rewriter], ...] = ()
+
+    def __init__(self) -> None:
+        self.dispatch = util.Dispatcher()
+
+    def chain(
+        self,
+        other: Union[
+            ProcessRevisionDirectiveFn,
+            Rewriter,
+        ],
+    ) -> Rewriter:
+        """Produce a "chain" of this :class:`.Rewriter` to another.
+
+        This allows two or more rewriters to operate serially on a stream,
+        e.g.::
+
+            writer1 = autogenerate.Rewriter()
+            writer2 = autogenerate.Rewriter()
+
+
+            @writer1.rewrites(ops.AddColumnOp)
+            def add_column_nullable(context, revision, op):
+                op.column.nullable = True
+                return op
+
+
+            @writer2.rewrites(ops.AddColumnOp)
+            def add_column_idx(context, revision, op):
+                idx_op = ops.CreateIndexOp(
+                    "ixc", op.table_name, [op.column.name]
+                )
+                return [op, idx_op]
+
+            writer = writer1.chain(writer2)
+
+        :param other: a :class:`.Rewriter` instance
+        :return: a new :class:`.Rewriter` that will run the operations
+         of this writer, then the "other" writer, in succession.
+
+        """
+        wr = self.__class__.__new__(self.__class__)
+        wr.__dict__.update(self.__dict__)
+        wr._chained += (other,)
+        return wr
+
+    def rewrites(
+        self,
+        operator: Union[
+            Type[AddColumnOp],
+            Type[MigrateOperation],
+            Type[AlterColumnOp],
+            Type[CreateTableOp],
+            Type[ModifyTableOps],
+        ],
+    ) -> Callable[..., Any]:
+        """Register a function as rewriter for a given type.
+
+        The function should receive three arguments, which are
+        the :class:`.MigrationContext`, a ``revision`` tuple, and
+        an op directive of the type indicated.  E.g.::
+
+            @writer1.rewrites(ops.AddColumnOp)
+            def add_column_nullable(context, revision, op):
+                op.column.nullable = True
+                return op
+
+        """
+        return self.dispatch.dispatch_for(operator)
+
+    def _rewrite(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directive: MigrateOperation,
+    ) -> Iterator[MigrateOperation]:
+        try:
+            _rewriter = self.dispatch.dispatch(directive)
+        except ValueError:
+            _rewriter = None
+            yield directive
+        else:
+            if self in directive._mutations:
+                yield directive
+            else:
+                for r_directive in util.to_list(
+                    _rewriter(context, revision, directive), []
+                ):
+                    r_directive._mutations = r_directive._mutations.union(
+                        [self]
+                    )
+                    yield r_directive
+
+    def __call__(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directives: List[MigrationScript],
+    ) -> None:
+        self.process_revision_directives(context, revision, directives)
+        for process_revision_directives in self._chained:
+            process_revision_directives(context, revision, directives)
+
+    @_traverse.dispatch_for(ops.MigrationScript)
+    def _traverse_script(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directive: MigrationScript,
+    ) -> None:
+        upgrade_ops_list: List[UpgradeOps] = []
+        for upgrade_ops in directive.upgrade_ops_list:
+            ret = self._traverse_for(context, revision, upgrade_ops)
+            if len(ret) != 1:
+                raise ValueError(
+                    "Can only return single object for UpgradeOps traverse"
+                )
+            upgrade_ops_list.append(ret[0])
+
+        directive.upgrade_ops = upgrade_ops_list  # type: ignore
+
+        downgrade_ops_list: List[DowngradeOps] = []
+        for downgrade_ops in directive.downgrade_ops_list:
+            ret = self._traverse_for(context, revision, downgrade_ops)
+            if len(ret) != 1:
+                raise ValueError(
+                    "Can only return single object for DowngradeOps traverse"
+                )
+            downgrade_ops_list.append(ret[0])
+        directive.downgrade_ops = downgrade_ops_list  # type: ignore
+
+    @_traverse.dispatch_for(ops.OpContainer)
+    def _traverse_op_container(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directive: OpContainer,
+    ) -> None:
+        self._traverse_list(context, revision, directive.ops)
+
+    @_traverse.dispatch_for(ops.MigrateOperation)
+    def _traverse_any_directive(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directive: MigrateOperation,
+    ) -> None:
+        pass
+
+    def _traverse_for(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directive: MigrateOperation,
+    ) -> Any:
+        directives = list(self._rewrite(context, revision, directive))
+        for directive in directives:
+            traverser = self._traverse.dispatch(directive)
+            traverser(self, context, revision, directive)
+        return directives
+
+    def _traverse_list(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directives: Any,
+    ) -> None:
+        dest = []
+        for directive in directives:
+            dest.extend(self._traverse_for(context, revision, directive))
+
+        directives[:] = dest
+
+    def process_revision_directives(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directives: List[MigrationScript],
+    ) -> None:
+        self._traverse_list(context, revision, directives)
diff --git a/.venv/lib/python3.12/site-packages/alembic/command.py b/.venv/lib/python3.12/site-packages/alembic/command.py
new file mode 100644
index 00000000..0ae1d9a8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/command.py
@@ -0,0 +1,760 @@
+# mypy: allow-untyped-defs, allow-untyped-calls
+
+from __future__ import annotations
+
+import os
+from typing import List
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from . import autogenerate as autogen
+from . import util
+from .runtime.environment import EnvironmentContext
+from .script import ScriptDirectory
+
+if TYPE_CHECKING:
+    from alembic.config import Config
+    from alembic.script.base import Script
+    from alembic.script.revision import _RevIdType
+    from .runtime.environment import ProcessRevisionDirectiveFn
+
+
+def list_templates(config: Config) -> None:
+    """List available templates.
+
+    :param config: a :class:`.Config` object.
+
+    """
+
+    config.print_stdout("Available templates:\n")
+    for tempname in os.listdir(config.get_template_directory()):
+        with open(
+            os.path.join(config.get_template_directory(), tempname, "README")
+        ) as readme:
+            synopsis = next(readme).rstrip()
+        config.print_stdout("%s - %s", tempname, synopsis)
+
+    config.print_stdout("\nTemplates are used via the 'init' command, e.g.:")
+    config.print_stdout("\n  alembic init --template generic ./scripts")
+
+
+def init(
+    config: Config,
+    directory: str,
+    template: str = "generic",
+    package: bool = False,
+) -> None:
+    """Initialize a new scripts directory.
+
+    :param config: a :class:`.Config` object.
+
+    :param directory: string path of the target directory.
+
+    :param template: string name of the migration environment template to
+     use.
+
+    :param package: when True, write ``__init__.py`` files into the
+     environment location as well as the versions/ location.
+
+    """
+
+    if os.access(directory, os.F_OK) and os.listdir(directory):
+        raise util.CommandError(
+            "Directory %s already exists and is not empty" % directory
+        )
+
+    template_dir = os.path.join(config.get_template_directory(), template)
+    if not os.access(template_dir, os.F_OK):
+        raise util.CommandError("No such template %r" % template)
+
+    if not os.access(directory, os.F_OK):
+        with util.status(
+            f"Creating directory {os.path.abspath(directory)!r}",
+            **config.messaging_opts,
+        ):
+            os.makedirs(directory)
+
+    versions = os.path.join(directory, "versions")
+    with util.status(
+        f"Creating directory {os.path.abspath(versions)!r}",
+        **config.messaging_opts,
+    ):
+        os.makedirs(versions)
+
+    script = ScriptDirectory(directory)
+
+    config_file: str | None = None
+    for file_ in os.listdir(template_dir):
+        file_path = os.path.join(template_dir, file_)
+        if file_ == "alembic.ini.mako":
+            assert config.config_file_name is not None
+            config_file = os.path.abspath(config.config_file_name)
+            if os.access(config_file, os.F_OK):
+                util.msg(
+                    f"File {config_file!r} already exists, skipping",
+                    **config.messaging_opts,
+                )
+            else:
+                script._generate_template(
+                    file_path, config_file, script_location=directory
+                )
+        elif os.path.isfile(file_path):
+            output_file = os.path.join(directory, file_)
+            script._copy_file(file_path, output_file)
+
+    if package:
+        for path in [
+            os.path.join(os.path.abspath(directory), "__init__.py"),
+            os.path.join(os.path.abspath(versions), "__init__.py"),
+        ]:
+            with util.status(f"Adding {path!r}", **config.messaging_opts):
+                with open(path, "w"):
+                    pass
+
+    assert config_file is not None
+    util.msg(
+        "Please edit configuration/connection/logging "
+        f"settings in {config_file!r} before proceeding.",
+        **config.messaging_opts,
+    )
+
+
+def revision(
+    config: Config,
+    message: Optional[str] = None,
+    autogenerate: bool = False,
+    sql: bool = False,
+    head: str = "head",
+    splice: bool = False,
+    branch_label: Optional[_RevIdType] = None,
+    version_path: Optional[str] = None,
+    rev_id: Optional[str] = None,
+    depends_on: Optional[str] = None,
+    process_revision_directives: Optional[ProcessRevisionDirectiveFn] = None,
+) -> Union[Optional[Script], List[Optional[Script]]]:
+    """Create a new revision file.
+
+    :param config: a :class:`.Config` object.
+
+    :param message: string message to apply to the revision; this is the
+     ``-m`` option to ``alembic revision``.
+
+    :param autogenerate: whether or not to autogenerate the script from
+     the database; this is the ``--autogenerate`` option to
+     ``alembic revision``.
+
+    :param sql: whether to dump the script out as a SQL string; when specified,
+     the script is dumped to stdout.  This is the ``--sql`` option to
+     ``alembic revision``.
+
+    :param head: head revision to build the new revision upon as a parent;
+     this is the ``--head`` option to ``alembic revision``.
+
+    :param splice: whether or not the new revision should be made into a
+     new head of its own; is required when the given ``head`` is not itself
+     a head.  This is the ``--splice`` option to ``alembic revision``.
+
+    :param branch_label: string label to apply to the branch; this is the
+     ``--branch-label`` option to ``alembic revision``.
+
+    :param version_path: string symbol identifying a specific version path
+     from the configuration; this is the ``--version-path`` option to
+     ``alembic revision``.
+
+    :param rev_id: optional revision identifier to use instead of having
+     one generated; this is the ``--rev-id`` option to ``alembic revision``.
+
+    :param depends_on: optional list of "depends on" identifiers; this is the
+     ``--depends-on`` option to ``alembic revision``.
+
+    :param process_revision_directives: this is a callable that takes the
+     same form as the callable described at
+     :paramref:`.EnvironmentContext.configure.process_revision_directives`;
+     will be applied to the structure generated by the revision process
+     where it can be altered programmatically.   Note that unlike all
+     the other parameters, this option is only available via programmatic
+     use of :func:`.command.revision`.
+
+    """
+
+    script_directory = ScriptDirectory.from_config(config)
+
+    command_args = dict(
+        message=message,
+        autogenerate=autogenerate,
+        sql=sql,
+        head=head,
+        splice=splice,
+        branch_label=branch_label,
+        version_path=version_path,
+        rev_id=rev_id,
+        depends_on=depends_on,
+    )
+    revision_context = autogen.RevisionContext(
+        config,
+        script_directory,
+        command_args,
+        process_revision_directives=process_revision_directives,
+    )
+
+    environment = util.asbool(config.get_main_option("revision_environment"))
+
+    if autogenerate:
+        environment = True
+
+        if sql:
+            raise util.CommandError(
+                "Using --sql with --autogenerate does not make any sense"
+            )
+
+        def retrieve_migrations(rev, context):
+            revision_context.run_autogenerate(rev, context)
+            return []
+
+    elif environment:
+
+        def retrieve_migrations(rev, context):
+            revision_context.run_no_autogenerate(rev, context)
+            return []
+
+    elif sql:
+        raise util.CommandError(
+            "Using --sql with the revision command when "
+            "revision_environment is not configured does not make any sense"
+        )
+
+    if environment:
+        with EnvironmentContext(
+            config,
+            script_directory,
+            fn=retrieve_migrations,
+            as_sql=sql,
+            template_args=revision_context.template_args,
+            revision_context=revision_context,
+        ):
+            script_directory.run_env()
+
+        # the revision_context now has MigrationScript structure(s) present.
+        # these could theoretically be further processed / rewritten *here*,
+        # in addition to the hooks present within each run_migrations() call,
+        # or at the end of env.py run_migrations_online().
+
+    scripts = [script for script in revision_context.generate_scripts()]
+    if len(scripts) == 1:
+        return scripts[0]
+    else:
+        return scripts
+
+
+def check(config: "Config") -> None:
+    """Check if revision command with autogenerate has pending upgrade ops.
+
+    :param config: a :class:`.Config` object.
+
+    .. versionadded:: 1.9.0
+
+    """
+
+    script_directory = ScriptDirectory.from_config(config)
+
+    command_args = dict(
+        message=None,
+        autogenerate=True,
+        sql=False,
+        head="head",
+        splice=False,
+        branch_label=None,
+        version_path=None,
+        rev_id=None,
+        depends_on=None,
+    )
+    revision_context = autogen.RevisionContext(
+        config,
+        script_directory,
+        command_args,
+    )
+
+    def retrieve_migrations(rev, context):
+        revision_context.run_autogenerate(rev, context)
+        return []
+
+    with EnvironmentContext(
+        config,
+        script_directory,
+        fn=retrieve_migrations,
+        as_sql=False,
+        template_args=revision_context.template_args,
+        revision_context=revision_context,
+    ):
+        script_directory.run_env()
+
+    # the revision_context now has MigrationScript structure(s) present.
+
+    migration_script = revision_context.generated_revisions[-1]
+    diffs = []
+    for upgrade_ops in migration_script.upgrade_ops_list:
+        diffs.extend(upgrade_ops.as_diffs())
+
+    if diffs:
+        raise util.AutogenerateDiffsDetected(
+            f"New upgrade operations detected: {diffs}",
+            revision_context=revision_context,
+            diffs=diffs,
+        )
+    else:
+        config.print_stdout("No new upgrade operations detected.")
+
+
+def merge(
+    config: Config,
+    revisions: _RevIdType,
+    message: Optional[str] = None,
+    branch_label: Optional[_RevIdType] = None,
+    rev_id: Optional[str] = None,
+) -> Optional[Script]:
+    """Merge two revisions together.  Creates a new migration file.
+
+    :param config: a :class:`.Config` instance
+
+    :param revisions: The revisions to merge.
+
+    :param message: string message to apply to the revision.
+
+    :param branch_label: string label name to apply to the new revision.
+
+    :param rev_id: hardcoded revision identifier instead of generating a new
+     one.
+
+    .. seealso::
+
+        :ref:`branches`
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+    template_args = {
+        "config": config  # Let templates use config for
+        # e.g. multiple databases
+    }
+
+    environment = util.asbool(config.get_main_option("revision_environment"))
+
+    if environment:
+
+        def nothing(rev, context):
+            return []
+
+        with EnvironmentContext(
+            config,
+            script,
+            fn=nothing,
+            as_sql=False,
+            template_args=template_args,
+        ):
+            script.run_env()
+
+    return script.generate_revision(
+        rev_id or util.rev_id(),
+        message,
+        refresh=True,
+        head=revisions,
+        branch_labels=branch_label,
+        **template_args,  # type:ignore[arg-type]
+    )
+
+
+def upgrade(
+    config: Config,
+    revision: str,
+    sql: bool = False,
+    tag: Optional[str] = None,
+) -> None:
+    """Upgrade to a later version.
+
+    :param config: a :class:`.Config` instance.
+
+    :param revision: string revision target or range for --sql mode. May be
+     ``"heads"`` to target the most recent revision(s).
+
+    :param sql: if True, use ``--sql`` mode.
+
+    :param tag: an arbitrary "tag" that can be intercepted by custom
+     ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument`
+     method.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    starting_rev = None
+    if ":" in revision:
+        if not sql:
+            raise util.CommandError("Range revision not allowed")
+        starting_rev, revision = revision.split(":", 2)
+
+    def upgrade(rev, context):
+        return script._upgrade_revs(revision, rev)
+
+    with EnvironmentContext(
+        config,
+        script,
+        fn=upgrade,
+        as_sql=sql,
+        starting_rev=starting_rev,
+        destination_rev=revision,
+        tag=tag,
+    ):
+        script.run_env()
+
+
+def downgrade(
+    config: Config,
+    revision: str,
+    sql: bool = False,
+    tag: Optional[str] = None,
+) -> None:
+    """Revert to a previous version.
+
+    :param config: a :class:`.Config` instance.
+
+    :param revision: string revision target or range for --sql mode. May
+     be ``"base"`` to target the first revision.
+
+    :param sql: if True, use ``--sql`` mode.
+
+    :param tag: an arbitrary "tag" that can be intercepted by custom
+     ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument`
+     method.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+    starting_rev = None
+    if ":" in revision:
+        if not sql:
+            raise util.CommandError("Range revision not allowed")
+        starting_rev, revision = revision.split(":", 2)
+    elif sql:
+        raise util.CommandError(
+            "downgrade with --sql requires <fromrev>:<torev>"
+        )
+
+    def downgrade(rev, context):
+        return script._downgrade_revs(revision, rev)
+
+    with EnvironmentContext(
+        config,
+        script,
+        fn=downgrade,
+        as_sql=sql,
+        starting_rev=starting_rev,
+        destination_rev=revision,
+        tag=tag,
+    ):
+        script.run_env()
+
+
+def show(config: Config, rev: str) -> None:
+    """Show the revision(s) denoted by the given symbol.
+
+    :param config: a :class:`.Config` instance.
+
+    :param rev: string revision target. May be ``"current"`` to show the
+     revision(s) currently applied in the database.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    if rev == "current":
+
+        def show_current(rev, context):
+            for sc in script.get_revisions(rev):
+                config.print_stdout(sc.log_entry)
+            return []
+
+        with EnvironmentContext(config, script, fn=show_current):
+            script.run_env()
+    else:
+        for sc in script.get_revisions(rev):
+            config.print_stdout(sc.log_entry)
+
+
+def history(
+    config: Config,
+    rev_range: Optional[str] = None,
+    verbose: bool = False,
+    indicate_current: bool = False,
+) -> None:
+    """List changeset scripts in chronological order.
+
+    :param config: a :class:`.Config` instance.
+
+    :param rev_range: string revision range.
+
+    :param verbose: output in verbose mode.
+
+    :param indicate_current: indicate current revision.
+
+    """
+    base: Optional[str]
+    head: Optional[str]
+    script = ScriptDirectory.from_config(config)
+    if rev_range is not None:
+        if ":" not in rev_range:
+            raise util.CommandError(
+                "History range requires [start]:[end], " "[start]:, or :[end]"
+            )
+        base, head = rev_range.strip().split(":")
+    else:
+        base = head = None
+
+    environment = (
+        util.asbool(config.get_main_option("revision_environment"))
+        or indicate_current
+    )
+
+    def _display_history(config, script, base, head, currents=()):
+        for sc in script.walk_revisions(
+            base=base or "base", head=head or "heads"
+        ):
+            if indicate_current:
+                sc._db_current_indicator = sc.revision in currents
+
+            config.print_stdout(
+                sc.cmd_format(
+                    verbose=verbose,
+                    include_branches=True,
+                    include_doc=True,
+                    include_parents=True,
+                )
+            )
+
+    def _display_history_w_current(config, script, base, head):
+        def _display_current_history(rev, context):
+            if head == "current":
+                _display_history(config, script, base, rev, rev)
+            elif base == "current":
+                _display_history(config, script, rev, head, rev)
+            else:
+                _display_history(config, script, base, head, rev)
+            return []
+
+        with EnvironmentContext(config, script, fn=_display_current_history):
+            script.run_env()
+
+    if base == "current" or head == "current" or environment:
+        _display_history_w_current(config, script, base, head)
+    else:
+        _display_history(config, script, base, head)
+
+
+def heads(
+    config: Config, verbose: bool = False, resolve_dependencies: bool = False
+) -> None:
+    """Show current available heads in the script directory.
+
+    :param config: a :class:`.Config` instance.
+
+    :param verbose: output in verbose mode.
+
+    :param resolve_dependencies: treat dependency version as down revisions.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+    if resolve_dependencies:
+        heads = script.get_revisions("heads")
+    else:
+        heads = script.get_revisions(script.get_heads())
+
+    for rev in heads:
+        config.print_stdout(
+            rev.cmd_format(
+                verbose, include_branches=True, tree_indicators=False
+            )
+        )
+
+
+def branches(config: Config, verbose: bool = False) -> None:
+    """Show current branch points.
+
+    :param config: a :class:`.Config` instance.
+
+    :param verbose: output in verbose mode.
+
+    """
+    script = ScriptDirectory.from_config(config)
+    for sc in script.walk_revisions():
+        if sc.is_branch_point:
+            config.print_stdout(
+                "%s\n%s\n",
+                sc.cmd_format(verbose, include_branches=True),
+                "\n".join(
+                    "%s -> %s"
+                    % (
+                        " " * len(str(sc.revision)),
+                        rev_obj.cmd_format(
+                            False, include_branches=True, include_doc=verbose
+                        ),
+                    )
+                    for rev_obj in (
+                        script.get_revision(rev) for rev in sc.nextrev
+                    )
+                ),
+            )
+
+
+def current(config: Config, verbose: bool = False) -> None:
+    """Display the current revision for a database.
+
+    :param config: a :class:`.Config` instance.
+
+    :param verbose: output in verbose mode.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    def display_version(rev, context):
+        if verbose:
+            config.print_stdout(
+                "Current revision(s) for %s:",
+                util.obfuscate_url_pw(context.connection.engine.url),
+            )
+        for rev in script.get_all_current(rev):
+            config.print_stdout(rev.cmd_format(verbose))
+
+        return []
+
+    with EnvironmentContext(
+        config, script, fn=display_version, dont_mutate=True
+    ):
+        script.run_env()
+
+
+def stamp(
+    config: Config,
+    revision: _RevIdType,
+    sql: bool = False,
+    tag: Optional[str] = None,
+    purge: bool = False,
+) -> None:
+    """'stamp' the revision table with the given revision; don't
+    run any migrations.
+
+    :param config: a :class:`.Config` instance.
+
+    :param revision: target revision or list of revisions.   May be a list
+     to indicate stamping of multiple branch heads; may be ``"base"``
+     to remove all revisions from the table or ``"heads"`` to stamp the
+     most recent revision(s).
+
+     .. note:: this parameter is called "revisions" in the command line
+        interface.
+
+    :param sql: use ``--sql`` mode
+
+    :param tag: an arbitrary "tag" that can be intercepted by custom
+     ``env.py`` scripts via the :class:`.EnvironmentContext.get_tag_argument`
+     method.
+
+    :param purge: delete all entries in the version table before stamping.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    if sql:
+        destination_revs = []
+        starting_rev = None
+        for _revision in util.to_list(revision):
+            if ":" in _revision:
+                srev, _revision = _revision.split(":", 2)
+
+                if starting_rev != srev:
+                    if starting_rev is None:
+                        starting_rev = srev
+                    else:
+                        raise util.CommandError(
+                            "Stamp operation with --sql only supports a "
+                            "single starting revision at a time"
+                        )
+            destination_revs.append(_revision)
+    else:
+        destination_revs = util.to_list(revision)
+
+    def do_stamp(rev, context):
+        return script._stamp_revs(util.to_tuple(destination_revs), rev)
+
+    with EnvironmentContext(
+        config,
+        script,
+        fn=do_stamp,
+        as_sql=sql,
+        starting_rev=starting_rev if sql else None,
+        destination_rev=util.to_tuple(destination_revs),
+        tag=tag,
+        purge=purge,
+    ):
+        script.run_env()
+
+
+def edit(config: Config, rev: str) -> None:
+    """Edit revision script(s) using $EDITOR.
+
+    :param config: a :class:`.Config` instance.
+
+    :param rev: target revision.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    if rev == "current":
+
+        def edit_current(rev, context):
+            if not rev:
+                raise util.CommandError("No current revisions")
+            for sc in script.get_revisions(rev):
+                util.open_in_editor(sc.path)
+            return []
+
+        with EnvironmentContext(config, script, fn=edit_current):
+            script.run_env()
+    else:
+        revs = script.get_revisions(rev)
+        if not revs:
+            raise util.CommandError(
+                "No revision files indicated by symbol '%s'" % rev
+            )
+        for sc in revs:
+            assert sc
+            util.open_in_editor(sc.path)
+
+
+def ensure_version(config: Config, sql: bool = False) -> None:
+    """Create the alembic version table if it doesn't exist already .
+
+    :param config: a :class:`.Config` instance.
+
+    :param sql: use ``--sql`` mode.
+
+     .. versionadded:: 1.7.6
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    def do_ensure_version(rev, context):
+        context._ensure_version_table()
+        return []
+
+    with EnvironmentContext(
+        config,
+        script,
+        fn=do_ensure_version,
+        as_sql=sql,
+    ):
+        script.run_env()
diff --git a/.venv/lib/python3.12/site-packages/alembic/config.py b/.venv/lib/python3.12/site-packages/alembic/config.py
new file mode 100644
index 00000000..2c52e7cd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/config.py
@@ -0,0 +1,640 @@
+from __future__ import annotations
+
+from argparse import ArgumentParser
+from argparse import Namespace
+from configparser import ConfigParser
+import inspect
+import os
+import sys
+from typing import Any
+from typing import cast
+from typing import Dict
+from typing import Mapping
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import TextIO
+from typing import Union
+
+from typing_extensions import TypedDict
+
+from . import __version__
+from . import command
+from . import util
+from .util import compat
+
+
+class Config:
+    r"""Represent an Alembic configuration.
+
+    Within an ``env.py`` script, this is available
+    via the :attr:`.EnvironmentContext.config` attribute,
+    which in turn is available at ``alembic.context``::
+
+        from alembic import context
+
+        some_param = context.config.get_main_option("my option")
+
+    When invoking Alembic programmatically, a new
+    :class:`.Config` can be created by passing
+    the name of an .ini file to the constructor::
+
+        from alembic.config import Config
+        alembic_cfg = Config("/path/to/yourapp/alembic.ini")
+
+    With a :class:`.Config` object, you can then
+    run Alembic commands programmatically using the directives
+    in :mod:`alembic.command`.
+
+    The :class:`.Config` object can also be constructed without
+    a filename.   Values can be set programmatically, and
+    new sections will be created as needed::
+
+        from alembic.config import Config
+        alembic_cfg = Config()
+        alembic_cfg.set_main_option("script_location", "myapp:migrations")
+        alembic_cfg.set_main_option("sqlalchemy.url", "postgresql://foo/bar")
+        alembic_cfg.set_section_option("mysection", "foo", "bar")
+
+    .. warning::
+
+       When using programmatic configuration, make sure the
+       ``env.py`` file in use is compatible with the target configuration;
+       including that the call to Python ``logging.fileConfig()`` is
+       omitted if the programmatic configuration doesn't actually include
+       logging directives.
+
+    For passing non-string values to environments, such as connections and
+    engines, use the :attr:`.Config.attributes` dictionary::
+
+        with engine.begin() as connection:
+            alembic_cfg.attributes['connection'] = connection
+            command.upgrade(alembic_cfg, "head")
+
+    :param file\_: name of the .ini file to open.
+    :param ini_section: name of the main Alembic section within the
+     .ini file
+    :param output_buffer: optional file-like input buffer which
+     will be passed to the :class:`.MigrationContext` - used to redirect
+     the output of "offline generation" when using Alembic programmatically.
+    :param stdout: buffer where the "print" output of commands will be sent.
+     Defaults to ``sys.stdout``.
+
+    :param config_args: A dictionary of keys and values that will be used
+     for substitution in the alembic config file.  The dictionary as given
+     is **copied** to a new one, stored locally as the attribute
+     ``.config_args``. When the :attr:`.Config.file_config` attribute is
+     first invoked, the replacement variable ``here`` will be added to this
+     dictionary before the dictionary is passed to ``ConfigParser()``
+     to parse the .ini file.
+
+    :param attributes: optional dictionary of arbitrary Python keys/values,
+     which will be populated into the :attr:`.Config.attributes` dictionary.
+
+     .. seealso::
+
+        :ref:`connection_sharing`
+
+    """
+
+    def __init__(
+        self,
+        file_: Union[str, os.PathLike[str], None] = None,
+        ini_section: str = "alembic",
+        output_buffer: Optional[TextIO] = None,
+        stdout: TextIO = sys.stdout,
+        cmd_opts: Optional[Namespace] = None,
+        config_args: Mapping[str, Any] = util.immutabledict(),
+        attributes: Optional[Dict[str, Any]] = None,
+    ) -> None:
+        """Construct a new :class:`.Config`"""
+        self.config_file_name = file_
+        self.config_ini_section = ini_section
+        self.output_buffer = output_buffer
+        self.stdout = stdout
+        self.cmd_opts = cmd_opts
+        self.config_args = dict(config_args)
+        if attributes:
+            self.attributes.update(attributes)
+
+    cmd_opts: Optional[Namespace] = None
+    """The command-line options passed to the ``alembic`` script.
+
+    Within an ``env.py`` script this can be accessed via the
+    :attr:`.EnvironmentContext.config` attribute.
+
+    .. seealso::
+
+        :meth:`.EnvironmentContext.get_x_argument`
+
+    """
+
+    config_file_name: Union[str, os.PathLike[str], None] = None
+    """Filesystem path to the .ini file in use."""
+
+    config_ini_section: str = None  # type:ignore[assignment]
+    """Name of the config file section to read basic configuration
+    from.  Defaults to ``alembic``, that is the ``[alembic]`` section
+    of the .ini file.  This value is modified using the ``-n/--name``
+    option to the Alembic runner.
+
+    """
+
+    @util.memoized_property
+    def attributes(self) -> Dict[str, Any]:
+        """A Python dictionary for storage of additional state.
+
+
+        This is a utility dictionary which can include not just strings but
+        engines, connections, schema objects, or anything else.
+        Use this to pass objects into an env.py script, such as passing
+        a :class:`sqlalchemy.engine.base.Connection` when calling
+        commands from :mod:`alembic.command` programmatically.
+
+        .. seealso::
+
+            :ref:`connection_sharing`
+
+            :paramref:`.Config.attributes`
+
+        """
+        return {}
+
+    def print_stdout(self, text: str, *arg: Any) -> None:
+        """Render a message to standard out.
+
+        When :meth:`.Config.print_stdout` is called with additional args
+        those arguments will formatted against the provided text,
+        otherwise we simply output the provided text verbatim.
+
+        This is a no-op when the``quiet`` messaging option is enabled.
+
+        e.g.::
+
+            >>> config.print_stdout('Some text %s', 'arg')
+            Some Text arg
+
+        """
+
+        if arg:
+            output = str(text) % arg
+        else:
+            output = str(text)
+
+        util.write_outstream(self.stdout, output, "\n", **self.messaging_opts)
+
+    @util.memoized_property
+    def file_config(self) -> ConfigParser:
+        """Return the underlying ``ConfigParser`` object.
+
+        Direct access to the .ini file is available here,
+        though the :meth:`.Config.get_section` and
+        :meth:`.Config.get_main_option`
+        methods provide a possibly simpler interface.
+
+        """
+
+        if self.config_file_name:
+            here = os.path.abspath(os.path.dirname(self.config_file_name))
+        else:
+            here = ""
+        self.config_args["here"] = here
+        file_config = ConfigParser(self.config_args)
+        if self.config_file_name:
+            compat.read_config_parser(file_config, [self.config_file_name])
+        else:
+            file_config.add_section(self.config_ini_section)
+        return file_config
+
+    def get_template_directory(self) -> str:
+        """Return the directory where Alembic setup templates are found.
+
+        This method is used by the alembic ``init`` and ``list_templates``
+        commands.
+
+        """
+        import alembic
+
+        package_dir = os.path.abspath(os.path.dirname(alembic.__file__))
+        return os.path.join(package_dir, "templates")
+
+    @overload
+    def get_section(
+        self, name: str, default: None = ...
+    ) -> Optional[Dict[str, str]]: ...
+
+    # "default" here could also be a TypeVar
+    # _MT = TypeVar("_MT", bound=Mapping[str, str]),
+    # however mypy wasn't handling that correctly (pyright was)
+    @overload
+    def get_section(
+        self, name: str, default: Dict[str, str]
+    ) -> Dict[str, str]: ...
+
+    @overload
+    def get_section(
+        self, name: str, default: Mapping[str, str]
+    ) -> Union[Dict[str, str], Mapping[str, str]]: ...
+
+    def get_section(
+        self, name: str, default: Optional[Mapping[str, str]] = None
+    ) -> Optional[Mapping[str, str]]:
+        """Return all the configuration options from a given .ini file section
+        as a dictionary.
+
+        If the given section does not exist, the value of ``default``
+        is returned, which is expected to be a dictionary or other mapping.
+
+        """
+        if not self.file_config.has_section(name):
+            return default
+
+        return dict(self.file_config.items(name))
+
+    def set_main_option(self, name: str, value: str) -> None:
+        """Set an option programmatically within the 'main' section.
+
+        This overrides whatever was in the .ini file.
+
+        :param name: name of the value
+
+        :param value: the value.  Note that this value is passed to
+         ``ConfigParser.set``, which supports variable interpolation using
+         pyformat (e.g. ``%(some_value)s``).   A raw percent sign not part of
+         an interpolation symbol must therefore be escaped, e.g. ``%%``.
+         The given value may refer to another value already in the file
+         using the interpolation format.
+
+        """
+        self.set_section_option(self.config_ini_section, name, value)
+
+    def remove_main_option(self, name: str) -> None:
+        self.file_config.remove_option(self.config_ini_section, name)
+
+    def set_section_option(self, section: str, name: str, value: str) -> None:
+        """Set an option programmatically within the given section.
+
+        The section is created if it doesn't exist already.
+        The value here will override whatever was in the .ini
+        file.
+
+        :param section: name of the section
+
+        :param name: name of the value
+
+        :param value: the value.  Note that this value is passed to
+         ``ConfigParser.set``, which supports variable interpolation using
+         pyformat (e.g. ``%(some_value)s``).   A raw percent sign not part of
+         an interpolation symbol must therefore be escaped, e.g. ``%%``.
+         The given value may refer to another value already in the file
+         using the interpolation format.
+
+        """
+
+        if not self.file_config.has_section(section):
+            self.file_config.add_section(section)
+        self.file_config.set(section, name, value)
+
+    def get_section_option(
+        self, section: str, name: str, default: Optional[str] = None
+    ) -> Optional[str]:
+        """Return an option from the given section of the .ini file."""
+        if not self.file_config.has_section(section):
+            raise util.CommandError(
+                "No config file %r found, or file has no "
+                "'[%s]' section" % (self.config_file_name, section)
+            )
+        if self.file_config.has_option(section, name):
+            return self.file_config.get(section, name)
+        else:
+            return default
+
+    @overload
+    def get_main_option(self, name: str, default: str) -> str: ...
+
+    @overload
+    def get_main_option(
+        self, name: str, default: Optional[str] = None
+    ) -> Optional[str]: ...
+
+    def get_main_option(
+        self, name: str, default: Optional[str] = None
+    ) -> Optional[str]:
+        """Return an option from the 'main' section of the .ini file.
+
+        This defaults to being a key from the ``[alembic]``
+        section, unless the ``-n/--name`` flag were used to
+        indicate a different section.
+
+        """
+        return self.get_section_option(self.config_ini_section, name, default)
+
+    @util.memoized_property
+    def messaging_opts(self) -> MessagingOptions:
+        """The messaging options."""
+        return cast(
+            MessagingOptions,
+            util.immutabledict(
+                {"quiet": getattr(self.cmd_opts, "quiet", False)}
+            ),
+        )
+
+
+class MessagingOptions(TypedDict, total=False):
+    quiet: bool
+
+
+class CommandLine:
+    def __init__(self, prog: Optional[str] = None) -> None:
+        self._generate_args(prog)
+
+    def _generate_args(self, prog: Optional[str]) -> None:
+        def add_options(
+            fn: Any, parser: Any, positional: Any, kwargs: Any
+        ) -> None:
+            kwargs_opts = {
+                "template": (
+                    "-t",
+                    "--template",
+                    dict(
+                        default="generic",
+                        type=str,
+                        help="Setup template for use with 'init'",
+                    ),
+                ),
+                "message": (
+                    "-m",
+                    "--message",
+                    dict(
+                        type=str, help="Message string to use with 'revision'"
+                    ),
+                ),
+                "sql": (
+                    "--sql",
+                    dict(
+                        action="store_true",
+                        help="Don't emit SQL to database - dump to "
+                        "standard output/file instead. See docs on "
+                        "offline mode.",
+                    ),
+                ),
+                "tag": (
+                    "--tag",
+                    dict(
+                        type=str,
+                        help="Arbitrary 'tag' name - can be used by "
+                        "custom env.py scripts.",
+                    ),
+                ),
+                "head": (
+                    "--head",
+                    dict(
+                        type=str,
+                        help="Specify head revision or <branchname>@head "
+                        "to base new revision on.",
+                    ),
+                ),
+                "splice": (
+                    "--splice",
+                    dict(
+                        action="store_true",
+                        help="Allow a non-head revision as the "
+                        "'head' to splice onto",
+                    ),
+                ),
+                "depends_on": (
+                    "--depends-on",
+                    dict(
+                        action="append",
+                        help="Specify one or more revision identifiers "
+                        "which this revision should depend on.",
+                    ),
+                ),
+                "rev_id": (
+                    "--rev-id",
+                    dict(
+                        type=str,
+                        help="Specify a hardcoded revision id instead of "
+                        "generating one",
+                    ),
+                ),
+                "version_path": (
+                    "--version-path",
+                    dict(
+                        type=str,
+                        help="Specify specific path from config for "
+                        "version file",
+                    ),
+                ),
+                "branch_label": (
+                    "--branch-label",
+                    dict(
+                        type=str,
+                        help="Specify a branch label to apply to the "
+                        "new revision",
+                    ),
+                ),
+                "verbose": (
+                    "-v",
+                    "--verbose",
+                    dict(action="store_true", help="Use more verbose output"),
+                ),
+                "resolve_dependencies": (
+                    "--resolve-dependencies",
+                    dict(
+                        action="store_true",
+                        help="Treat dependency versions as down revisions",
+                    ),
+                ),
+                "autogenerate": (
+                    "--autogenerate",
+                    dict(
+                        action="store_true",
+                        help="Populate revision script with candidate "
+                        "migration operations, based on comparison "
+                        "of database to model.",
+                    ),
+                ),
+                "rev_range": (
+                    "-r",
+                    "--rev-range",
+                    dict(
+                        action="store",
+                        help="Specify a revision range; "
+                        "format is [start]:[end]",
+                    ),
+                ),
+                "indicate_current": (
+                    "-i",
+                    "--indicate-current",
+                    dict(
+                        action="store_true",
+                        help="Indicate the current revision",
+                    ),
+                ),
+                "purge": (
+                    "--purge",
+                    dict(
+                        action="store_true",
+                        help="Unconditionally erase the version table "
+                        "before stamping",
+                    ),
+                ),
+                "package": (
+                    "--package",
+                    dict(
+                        action="store_true",
+                        help="Write empty __init__.py files to the "
+                        "environment and version locations",
+                    ),
+                ),
+            }
+            positional_help = {
+                "directory": "location of scripts directory",
+                "revision": "revision identifier",
+                "revisions": "one or more revisions, or 'heads' for all heads",
+            }
+            for arg in kwargs:
+                if arg in kwargs_opts:
+                    args = kwargs_opts[arg]
+                    args, kw = args[0:-1], args[-1]
+                    parser.add_argument(*args, **kw)
+
+            for arg in positional:
+                if (
+                    arg == "revisions"
+                    or fn in positional_translations
+                    and positional_translations[fn][arg] == "revisions"
+                ):
+                    subparser.add_argument(
+                        "revisions",
+                        nargs="+",
+                        help=positional_help.get("revisions"),
+                    )
+                else:
+                    subparser.add_argument(arg, help=positional_help.get(arg))
+
+        parser = ArgumentParser(prog=prog)
+
+        parser.add_argument(
+            "--version", action="version", version="%%(prog)s %s" % __version__
+        )
+        parser.add_argument(
+            "-c",
+            "--config",
+            type=str,
+            default=os.environ.get("ALEMBIC_CONFIG", "alembic.ini"),
+            help="Alternate config file; defaults to value of "
+            'ALEMBIC_CONFIG environment variable, or "alembic.ini"',
+        )
+        parser.add_argument(
+            "-n",
+            "--name",
+            type=str,
+            default="alembic",
+            help="Name of section in .ini file to " "use for Alembic config",
+        )
+        parser.add_argument(
+            "-x",
+            action="append",
+            help="Additional arguments consumed by "
+            "custom env.py scripts, e.g. -x "
+            "setting1=somesetting -x setting2=somesetting",
+        )
+        parser.add_argument(
+            "--raiseerr",
+            action="store_true",
+            help="Raise a full stack trace on error",
+        )
+        parser.add_argument(
+            "-q",
+            "--quiet",
+            action="store_true",
+            help="Do not log to std output.",
+        )
+        subparsers = parser.add_subparsers()
+
+        positional_translations: Dict[Any, Any] = {
+            command.stamp: {"revision": "revisions"}
+        }
+
+        for fn in [getattr(command, n) for n in dir(command)]:
+            if (
+                inspect.isfunction(fn)
+                and fn.__name__[0] != "_"
+                and fn.__module__ == "alembic.command"
+            ):
+                spec = compat.inspect_getfullargspec(fn)
+                if spec[3] is not None:
+                    positional = spec[0][1 : -len(spec[3])]
+                    kwarg = spec[0][-len(spec[3]) :]
+                else:
+                    positional = spec[0][1:]
+                    kwarg = []
+
+                if fn in positional_translations:
+                    positional = [
+                        positional_translations[fn].get(name, name)
+                        for name in positional
+                    ]
+
+                # parse first line(s) of helptext without a line break
+                help_ = fn.__doc__
+                if help_:
+                    help_text = []
+                    for line in help_.split("\n"):
+                        if not line.strip():
+                            break
+                        else:
+                            help_text.append(line.strip())
+                else:
+                    help_text = []
+                subparser = subparsers.add_parser(
+                    fn.__name__, help=" ".join(help_text)
+                )
+                add_options(fn, subparser, positional, kwarg)
+                subparser.set_defaults(cmd=(fn, positional, kwarg))
+        self.parser = parser
+
+    def run_cmd(self, config: Config, options: Namespace) -> None:
+        fn, positional, kwarg = options.cmd
+
+        try:
+            fn(
+                config,
+                *[getattr(options, k, None) for k in positional],
+                **{k: getattr(options, k, None) for k in kwarg},
+            )
+        except util.CommandError as e:
+            if options.raiseerr:
+                raise
+            else:
+                util.err(str(e), **config.messaging_opts)
+
+    def main(self, argv: Optional[Sequence[str]] = None) -> None:
+        options = self.parser.parse_args(argv)
+        if not hasattr(options, "cmd"):
+            # see http://bugs.python.org/issue9253, argparse
+            # behavior changed incompatibly in py3.3
+            self.parser.error("too few arguments")
+        else:
+            cfg = Config(
+                file_=options.config,
+                ini_section=options.name,
+                cmd_opts=options,
+            )
+            self.run_cmd(cfg, options)
+
+
+def main(
+    argv: Optional[Sequence[str]] = None,
+    prog: Optional[str] = None,
+    **kwargs: Any,
+) -> None:
+    """The console runner function for Alembic."""
+
+    CommandLine(prog=prog).main(argv=argv)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/.venv/lib/python3.12/site-packages/alembic/context.py b/.venv/lib/python3.12/site-packages/alembic/context.py
new file mode 100644
index 00000000..758fca87
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/context.py
@@ -0,0 +1,5 @@
+from .runtime.environment import EnvironmentContext
+
+# create proxy functions for
+# each method on the EnvironmentContext class.
+EnvironmentContext.create_module_class_proxy(globals(), locals())
diff --git a/.venv/lib/python3.12/site-packages/alembic/context.pyi b/.venv/lib/python3.12/site-packages/alembic/context.pyi
new file mode 100644
index 00000000..9117c31e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/context.pyi
@@ -0,0 +1,856 @@
+# ### this file stubs are generated by tools/write_pyi.py - do not edit ###
+# ### imports are manually managed
+from __future__ import annotations
+
+from typing import Any
+from typing import Callable
+from typing import Collection
+from typing import Dict
+from typing import Iterable
+from typing import List
+from typing import Literal
+from typing import Mapping
+from typing import MutableMapping
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import TextIO
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from typing_extensions import ContextManager
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine.base import Connection
+    from sqlalchemy.engine.url import URL
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import FetchedValue
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .autogenerate.api import AutogenContext
+    from .config import Config
+    from .operations.ops import MigrationScript
+    from .runtime.migration import _ProxyTransaction
+    from .runtime.migration import MigrationContext
+    from .runtime.migration import MigrationInfo
+    from .script import ScriptDirectory
+
+### end imports ###
+
+def begin_transaction() -> (
+    Union[_ProxyTransaction, ContextManager[None, Optional[bool]]]
+):
+    """Return a context manager that will
+    enclose an operation within a "transaction",
+    as defined by the environment's offline
+    and transactional DDL settings.
+
+    e.g.::
+
+        with context.begin_transaction():
+            context.run_migrations()
+
+    :meth:`.begin_transaction` is intended to
+    "do the right thing" regardless of
+    calling context:
+
+    * If :meth:`.is_transactional_ddl` is ``False``,
+      returns a "do nothing" context manager
+      which otherwise produces no transactional
+      state or directives.
+    * If :meth:`.is_offline_mode` is ``True``,
+      returns a context manager that will
+      invoke the :meth:`.DefaultImpl.emit_begin`
+      and :meth:`.DefaultImpl.emit_commit`
+      methods, which will produce the string
+      directives ``BEGIN`` and ``COMMIT`` on
+      the output stream, as rendered by the
+      target backend (e.g. SQL Server would
+      emit ``BEGIN TRANSACTION``).
+    * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin`
+      on the current online connection, which
+      returns a :class:`sqlalchemy.engine.Transaction`
+      object.  This object demarcates a real
+      transaction and is itself a context manager,
+      which will roll back if an exception
+      is raised.
+
+    Note that a custom ``env.py`` script which
+    has more specific transactional needs can of course
+    manipulate the :class:`~sqlalchemy.engine.Connection`
+    directly to produce transactional state in "online"
+    mode.
+
+    """
+
+config: Config
+
+def configure(
+    connection: Optional[Connection] = None,
+    url: Union[str, URL, None] = None,
+    dialect_name: Optional[str] = None,
+    dialect_opts: Optional[Dict[str, Any]] = None,
+    transactional_ddl: Optional[bool] = None,
+    transaction_per_migration: bool = False,
+    output_buffer: Optional[TextIO] = None,
+    starting_rev: Optional[str] = None,
+    tag: Optional[str] = None,
+    template_args: Optional[Dict[str, Any]] = None,
+    render_as_batch: bool = False,
+    target_metadata: Union[MetaData, Sequence[MetaData], None] = None,
+    include_name: Optional[
+        Callable[
+            [
+                Optional[str],
+                Literal[
+                    "schema",
+                    "table",
+                    "column",
+                    "index",
+                    "unique_constraint",
+                    "foreign_key_constraint",
+                ],
+                MutableMapping[
+                    Literal[
+                        "schema_name",
+                        "table_name",
+                        "schema_qualified_table_name",
+                    ],
+                    Optional[str],
+                ],
+            ],
+            bool,
+        ]
+    ] = None,
+    include_object: Optional[
+        Callable[
+            [
+                SchemaItem,
+                Optional[str],
+                Literal[
+                    "schema",
+                    "table",
+                    "column",
+                    "index",
+                    "unique_constraint",
+                    "foreign_key_constraint",
+                ],
+                bool,
+                Optional[SchemaItem],
+            ],
+            bool,
+        ]
+    ] = None,
+    include_schemas: bool = False,
+    process_revision_directives: Optional[
+        Callable[
+            [
+                MigrationContext,
+                Union[str, Iterable[Optional[str]], Iterable[str]],
+                List[MigrationScript],
+            ],
+            None,
+        ]
+    ] = None,
+    compare_type: Union[
+        bool,
+        Callable[
+            [
+                MigrationContext,
+                Column[Any],
+                Column[Any],
+                TypeEngine[Any],
+                TypeEngine[Any],
+            ],
+            Optional[bool],
+        ],
+    ] = True,
+    compare_server_default: Union[
+        bool,
+        Callable[
+            [
+                MigrationContext,
+                Column[Any],
+                Column[Any],
+                Optional[str],
+                Optional[FetchedValue],
+                Optional[str],
+            ],
+            Optional[bool],
+        ],
+    ] = False,
+    render_item: Optional[
+        Callable[[str, Any, AutogenContext], Union[str, Literal[False]]]
+    ] = None,
+    literal_binds: bool = False,
+    upgrade_token: str = "upgrades",
+    downgrade_token: str = "downgrades",
+    alembic_module_prefix: str = "op.",
+    sqlalchemy_module_prefix: str = "sa.",
+    user_module_prefix: Optional[str] = None,
+    on_version_apply: Optional[
+        Callable[
+            [
+                MigrationContext,
+                MigrationInfo,
+                Collection[Any],
+                Mapping[str, Any],
+            ],
+            None,
+        ]
+    ] = None,
+    **kw: Any,
+) -> None:
+    """Configure a :class:`.MigrationContext` within this
+    :class:`.EnvironmentContext` which will provide database
+    connectivity and other configuration to a series of
+    migration scripts.
+
+    Many methods on :class:`.EnvironmentContext` require that
+    this method has been called in order to function, as they
+    ultimately need to have database access or at least access
+    to the dialect in use.  Those which do are documented as such.
+
+    The important thing needed by :meth:`.configure` is a
+    means to determine what kind of database dialect is in use.
+    An actual connection to that database is needed only if
+    the :class:`.MigrationContext` is to be used in
+    "online" mode.
+
+    If the :meth:`.is_offline_mode` function returns ``True``,
+    then no connection is needed here.  Otherwise, the
+    ``connection`` parameter should be present as an
+    instance of :class:`sqlalchemy.engine.Connection`.
+
+    This function is typically called from the ``env.py``
+    script within a migration environment.  It can be called
+    multiple times for an invocation.  The most recent
+    :class:`~sqlalchemy.engine.Connection`
+    for which it was called is the one that will be operated upon
+    by the next call to :meth:`.run_migrations`.
+
+    General parameters:
+
+    :param connection: a :class:`~sqlalchemy.engine.Connection`
+     to use
+     for SQL execution in "online" mode.  When present, is also
+     used to determine the type of dialect in use.
+    :param url: a string database url, or a
+     :class:`sqlalchemy.engine.url.URL` object.
+     The type of dialect to be used will be derived from this if
+     ``connection`` is not passed.
+    :param dialect_name: string name of a dialect, such as
+     "postgresql", "mssql", etc.
+     The type of dialect to be used will be derived from this if
+     ``connection`` and ``url`` are not passed.
+    :param dialect_opts: dictionary of options to be passed to dialect
+     constructor.
+    :param transactional_ddl: Force the usage of "transactional"
+     DDL on or off;
+     this otherwise defaults to whether or not the dialect in
+     use supports it.
+    :param transaction_per_migration: if True, nest each migration script
+     in a transaction rather than the full series of migrations to
+     run.
+    :param output_buffer: a file-like object that will be used
+     for textual output
+     when the ``--sql`` option is used to generate SQL scripts.
+     Defaults to
+     ``sys.stdout`` if not passed here and also not present on
+     the :class:`.Config`
+     object.  The value here overrides that of the :class:`.Config`
+     object.
+    :param output_encoding: when using ``--sql`` to generate SQL
+     scripts, apply this encoding to the string output.
+    :param literal_binds: when using ``--sql`` to generate SQL
+     scripts, pass through the ``literal_binds`` flag to the compiler
+     so that any literal values that would ordinarily be bound
+     parameters are converted to plain strings.
+
+     .. warning:: Dialects can typically only handle simple datatypes
+        like strings and numbers for auto-literal generation.  Datatypes
+        like dates, intervals, and others may still require manual
+        formatting, typically using :meth:`.Operations.inline_literal`.
+
+     .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy
+        versions prior to 0.8 where this feature is not supported.
+
+     .. seealso::
+
+        :meth:`.Operations.inline_literal`
+
+    :param starting_rev: Override the "starting revision" argument
+     when using ``--sql`` mode.
+    :param tag: a string tag for usage by custom ``env.py`` scripts.
+     Set via the ``--tag`` option, can be overridden here.
+    :param template_args: dictionary of template arguments which
+     will be added to the template argument environment when
+     running the "revision" command.   Note that the script environment
+     is only run within the "revision" command if the --autogenerate
+     option is used, or if the option "revision_environment=true"
+     is present in the alembic.ini file.
+
+    :param version_table: The name of the Alembic version table.
+     The default is ``'alembic_version'``.
+    :param version_table_schema: Optional schema to place version
+     table within.
+    :param version_table_pk: boolean, whether the Alembic version table
+     should use a primary key constraint for the "value" column; this
+     only takes effect when the table is first created.
+     Defaults to True; setting to False should not be necessary and is
+     here for backwards compatibility reasons.
+    :param on_version_apply: a callable or collection of callables to be
+        run for each migration step.
+        The callables will be run in the order they are given, once for
+        each migration step, after the respective operation has been
+        applied but before its transaction is finalized.
+        Each callable accepts no positional arguments and the following
+        keyword arguments:
+
+        * ``ctx``: the :class:`.MigrationContext` running the migration,
+        * ``step``: a :class:`.MigrationInfo` representing the
+          step currently being applied,
+        * ``heads``: a collection of version strings representing the
+          current heads,
+        * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`.
+
+    Parameters specific to the autogenerate feature, when
+    ``alembic revision`` is run with the ``--autogenerate`` feature:
+
+    :param target_metadata: a :class:`sqlalchemy.schema.MetaData`
+     object, or a sequence of :class:`~sqlalchemy.schema.MetaData`
+     objects, that will be consulted during autogeneration.
+     The tables present in each :class:`~sqlalchemy.schema.MetaData`
+     will be compared against
+     what is locally available on the target
+     :class:`~sqlalchemy.engine.Connection`
+     to produce candidate upgrade/downgrade operations.
+    :param compare_type: Indicates type comparison behavior during
+     an autogenerate
+     operation.  Defaults to ``True`` turning on type comparison, which
+     has good accuracy on most backends.   See :ref:`compare_types`
+     for an example as well as information on other type
+     comparison options. Set to ``False`` which disables type
+     comparison. A callable can also be passed to provide custom type
+     comparison, see :ref:`compare_types` for additional details.
+
+     .. versionchanged:: 1.12.0 The default value of
+        :paramref:`.EnvironmentContext.configure.compare_type` has been
+        changed to ``True``.
+
+     .. seealso::
+
+        :ref:`compare_types`
+
+        :paramref:`.EnvironmentContext.configure.compare_server_default`
+
+    :param compare_server_default: Indicates server default comparison
+     behavior during
+     an autogenerate operation.  Defaults to ``False`` which disables
+     server default
+     comparison.  Set to  ``True`` to turn on server default comparison,
+     which has
+     varied accuracy depending on backend.
+
+     To customize server default comparison behavior, a callable may
+     be specified
+     which can filter server default comparisons during an
+     autogenerate operation.
+     defaults during an autogenerate operation.   The format of this
+     callable is::
+
+        def my_compare_server_default(context, inspected_column,
+                    metadata_column, inspected_default, metadata_default,
+                    rendered_metadata_default):
+            # return True if the defaults are different,
+            # False if not, or None to allow the default implementation
+            # to compare these defaults
+            return None
+
+        context.configure(
+            # ...
+            compare_server_default = my_compare_server_default
+        )
+
+     ``inspected_column`` is a dictionary structure as returned by
+     :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
+     ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
+     the local model environment.
+
+     A return value of ``None`` indicates to allow default server default
+     comparison
+     to proceed.  Note that some backends such as Postgresql actually
+     execute
+     the two defaults on the database side to compare for equivalence.
+
+     .. seealso::
+
+        :paramref:`.EnvironmentContext.configure.compare_type`
+
+    :param include_name: A callable function which is given
+     the chance to return ``True`` or ``False`` for any database reflected
+     object based on its name, including database schema names when
+     the :paramref:`.EnvironmentContext.configure.include_schemas` flag
+     is set to ``True``.
+
+     The function accepts the following positional arguments:
+
+     * ``name``: the name of the object, such as schema name or table name.
+       Will be ``None`` when indicating the default schema name of the
+       database connection.
+     * ``type``: a string describing the type of object; currently
+       ``"schema"``, ``"table"``, ``"column"``, ``"index"``,
+       ``"unique_constraint"``, or ``"foreign_key_constraint"``
+     * ``parent_names``: a dictionary of "parent" object names, that are
+       relative to the name being given.  Keys in this dictionary may
+       include:  ``"schema_name"``, ``"table_name"`` or
+       ``"schema_qualified_table_name"``.
+
+     E.g.::
+
+        def include_name(name, type_, parent_names):
+            if type_ == "schema":
+                return name in ["schema_one", "schema_two"]
+            else:
+                return True
+
+        context.configure(
+            # ...
+            include_schemas = True,
+            include_name = include_name
+        )
+
+     .. seealso::
+
+        :ref:`autogenerate_include_hooks`
+
+        :paramref:`.EnvironmentContext.configure.include_object`
+
+        :paramref:`.EnvironmentContext.configure.include_schemas`
+
+
+    :param include_object: A callable function which is given
+     the chance to return ``True`` or ``False`` for any object,
+     indicating if the given object should be considered in the
+     autogenerate sweep.
+
+     The function accepts the following positional arguments:
+
+     * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such
+       as a :class:`~sqlalchemy.schema.Table`,
+       :class:`~sqlalchemy.schema.Column`,
+       :class:`~sqlalchemy.schema.Index`
+       :class:`~sqlalchemy.schema.UniqueConstraint`,
+       or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object
+     * ``name``: the name of the object. This is typically available
+       via ``object.name``.
+     * ``type``: a string describing the type of object; currently
+       ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``,
+       or ``"foreign_key_constraint"``
+     * ``reflected``: ``True`` if the given object was produced based on
+       table reflection, ``False`` if it's from a local :class:`.MetaData`
+       object.
+     * ``compare_to``: the object being compared against, if available,
+       else ``None``.
+
+     E.g.::
+
+        def include_object(object, name, type_, reflected, compare_to):
+            if (type_ == "column" and
+                not reflected and
+                object.info.get("skip_autogenerate", False)):
+                return False
+            else:
+                return True
+
+        context.configure(
+            # ...
+            include_object = include_object
+        )
+
+     For the use case of omitting specific schemas from a target database
+     when :paramref:`.EnvironmentContext.configure.include_schemas` is
+     set to ``True``, the :attr:`~sqlalchemy.schema.Table.schema`
+     attribute can be checked for each :class:`~sqlalchemy.schema.Table`
+     object passed to the hook, however it is much more efficient
+     to filter on schemas before reflection of objects takes place
+     using the :paramref:`.EnvironmentContext.configure.include_name`
+     hook.
+
+     .. seealso::
+
+        :ref:`autogenerate_include_hooks`
+
+        :paramref:`.EnvironmentContext.configure.include_name`
+
+        :paramref:`.EnvironmentContext.configure.include_schemas`
+
+    :param render_as_batch: if True, commands which alter elements
+     within a table will be placed under a ``with batch_alter_table():``
+     directive, so that batch migrations will take place.
+
+     .. seealso::
+
+        :ref:`batch_migrations`
+
+    :param include_schemas: If True, autogenerate will scan across
+     all schemas located by the SQLAlchemy
+     :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names`
+     method, and include all differences in tables found across all
+     those schemas.  When using this option, you may want to also
+     use the :paramref:`.EnvironmentContext.configure.include_name`
+     parameter to specify a callable which
+     can filter the tables/schemas that get included.
+
+     .. seealso::
+
+        :ref:`autogenerate_include_hooks`
+
+        :paramref:`.EnvironmentContext.configure.include_name`
+
+        :paramref:`.EnvironmentContext.configure.include_object`
+
+    :param render_item: Callable that can be used to override how
+     any schema item, i.e. column, constraint, type,
+     etc., is rendered for autogenerate.  The callable receives a
+     string describing the type of object, the object, and
+     the autogen context.  If it returns False, the
+     default rendering method will be used.  If it returns None,
+     the item will not be rendered in the context of a Table
+     construct, that is, can be used to skip columns or constraints
+     within op.create_table()::
+
+        def my_render_column(type_, col, autogen_context):
+            if type_ == "column" and isinstance(col, MySpecialCol):
+                return repr(col)
+            else:
+                return False
+
+        context.configure(
+            # ...
+            render_item = my_render_column
+        )
+
+     Available values for the type string include: ``"column"``,
+     ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``,
+     ``"type"``, ``"server_default"``.
+
+     .. seealso::
+
+        :ref:`autogen_render_types`
+
+    :param upgrade_token: When autogenerate completes, the text of the
+     candidate upgrade operations will be present in this template
+     variable when ``script.py.mako`` is rendered.  Defaults to
+     ``upgrades``.
+    :param downgrade_token: When autogenerate completes, the text of the
+     candidate downgrade operations will be present in this
+     template variable when ``script.py.mako`` is rendered.  Defaults to
+     ``downgrades``.
+
+    :param alembic_module_prefix: When autogenerate refers to Alembic
+     :mod:`alembic.operations` constructs, this prefix will be used
+     (i.e. ``op.create_table``)  Defaults to "``op.``".
+     Can be ``None`` to indicate no prefix.
+
+    :param sqlalchemy_module_prefix: When autogenerate refers to
+     SQLAlchemy
+     :class:`~sqlalchemy.schema.Column` or type classes, this prefix
+     will be used
+     (i.e. ``sa.Column("somename", sa.Integer)``)  Defaults to "``sa.``".
+     Can be ``None`` to indicate no prefix.
+     Note that when dialect-specific types are rendered, autogenerate
+     will render them using the dialect module name, i.e. ``mssql.BIT()``,
+     ``postgresql.UUID()``.
+
+    :param user_module_prefix: When autogenerate refers to a SQLAlchemy
+     type (e.g. :class:`.TypeEngine`) where the module name is not
+     under the ``sqlalchemy`` namespace, this prefix will be used
+     within autogenerate.  If left at its default of
+     ``None``, the ``__module__`` attribute of the type is used to
+     render the import module.   It's a good practice to set this
+     and to have all custom types be available from a fixed module space,
+     in order to future-proof migration files against reorganizations
+     in modules.
+
+     .. seealso::
+
+        :ref:`autogen_module_prefix`
+
+    :param process_revision_directives: a callable function that will
+     be passed a structure representing the end result of an autogenerate
+     or plain "revision" operation, which can be manipulated to affect
+     how the ``alembic revision`` command ultimately outputs new
+     revision scripts.   The structure of the callable is::
+
+        def process_revision_directives(context, revision, directives):
+            pass
+
+     The ``directives`` parameter is a Python list containing
+     a single :class:`.MigrationScript` directive, which represents
+     the revision file to be generated.    This list as well as its
+     contents may be freely modified to produce any set of commands.
+     The section :ref:`customizing_revision` shows an example of
+     doing this.  The ``context`` parameter is the
+     :class:`.MigrationContext` in use,
+     and ``revision`` is a tuple of revision identifiers representing the
+     current revision of the database.
+
+     The callable is invoked at all times when the ``--autogenerate``
+     option is passed to ``alembic revision``.  If ``--autogenerate``
+     is not passed, the callable is invoked only if the
+     ``revision_environment`` variable is set to True in the Alembic
+     configuration, in which case the given ``directives`` collection
+     will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps`
+     collections for ``.upgrade_ops`` and ``.downgrade_ops``.  The
+     ``--autogenerate`` option itself can be inferred by inspecting
+     ``context.config.cmd_opts.autogenerate``.
+
+     The callable function may optionally be an instance of
+     a :class:`.Rewriter` object.  This is a helper object that
+     assists in the production of autogenerate-stream rewriter functions.
+
+     .. seealso::
+
+         :ref:`customizing_revision`
+
+         :ref:`autogen_rewriter`
+
+         :paramref:`.command.revision.process_revision_directives`
+
+    Parameters specific to individual backends:
+
+    :param mssql_batch_separator: The "batch separator" which will
+     be placed between each statement when generating offline SQL Server
+     migrations.  Defaults to ``GO``.  Note this is in addition to the
+     customary semicolon ``;`` at the end of each statement; SQL Server
+     considers the "batch separator" to denote the end of an
+     individual statement execution, and cannot group certain
+     dependent operations in one step.
+    :param oracle_batch_separator: The "batch separator" which will
+     be placed between each statement when generating offline
+     Oracle migrations.  Defaults to ``/``.  Oracle doesn't add a
+     semicolon between statements like most other backends.
+
+    """
+
+def execute(
+    sql: Union[Executable, str],
+    execution_options: Optional[Dict[str, Any]] = None,
+) -> None:
+    """Execute the given SQL using the current change context.
+
+    The behavior of :meth:`.execute` is the same
+    as that of :meth:`.Operations.execute`.  Please see that
+    function's documentation for full detail including
+    caveats and limitations.
+
+    This function requires that a :class:`.MigrationContext` has
+    first been made available via :meth:`.configure`.
+
+    """
+
+def get_bind() -> Connection:
+    """Return the current 'bind'.
+
+    In "online" mode, this is the
+    :class:`sqlalchemy.engine.Connection` currently being used
+    to emit SQL to the database.
+
+    This function requires that a :class:`.MigrationContext`
+    has first been made available via :meth:`.configure`.
+
+    """
+
+def get_context() -> MigrationContext:
+    """Return the current :class:`.MigrationContext` object.
+
+    If :meth:`.EnvironmentContext.configure` has not been
+    called yet, raises an exception.
+
+    """
+
+def get_head_revision() -> Union[str, Tuple[str, ...], None]:
+    """Return the hex identifier of the 'head' script revision.
+
+    If the script directory has multiple heads, this
+    method raises a :class:`.CommandError`;
+    :meth:`.EnvironmentContext.get_head_revisions` should be preferred.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    .. seealso:: :meth:`.EnvironmentContext.get_head_revisions`
+
+    """
+
+def get_head_revisions() -> Union[str, Tuple[str, ...], None]:
+    """Return the hex identifier of the 'heads' script revision(s).
+
+    This returns a tuple containing the version number of all
+    heads in the script directory.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    """
+
+def get_revision_argument() -> Union[str, Tuple[str, ...], None]:
+    """Get the 'destination' revision argument.
+
+    This is typically the argument passed to the
+    ``upgrade`` or ``downgrade`` command.
+
+    If it was specified as ``head``, the actual
+    version number is returned; if specified
+    as ``base``, ``None`` is returned.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    """
+
+def get_starting_revision_argument() -> Union[str, Tuple[str, ...], None]:
+    """Return the 'starting revision' argument,
+    if the revision was passed using ``start:end``.
+
+    This is only meaningful in "offline" mode.
+    Returns ``None`` if no value is available
+    or was configured.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    """
+
+def get_tag_argument() -> Optional[str]:
+    """Return the value passed for the ``--tag`` argument, if any.
+
+    The ``--tag`` argument is not used directly by Alembic,
+    but is available for custom ``env.py`` configurations that
+    wish to use it; particularly for offline generation scripts
+    that wish to generate tagged filenames.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    .. seealso::
+
+        :meth:`.EnvironmentContext.get_x_argument` - a newer and more
+        open ended system of extending ``env.py`` scripts via the command
+        line.
+
+    """
+
+@overload
+def get_x_argument(as_dictionary: Literal[False]) -> List[str]: ...
+@overload
+def get_x_argument(as_dictionary: Literal[True]) -> Dict[str, str]: ...
+@overload
+def get_x_argument(
+    as_dictionary: bool = ...,
+) -> Union[List[str], Dict[str, str]]:
+    """Return the value(s) passed for the ``-x`` argument, if any.
+
+    The ``-x`` argument is an open ended flag that allows any user-defined
+    value or values to be passed on the command line, then available
+    here for consumption by a custom ``env.py`` script.
+
+    The return value is a list, returned directly from the ``argparse``
+    structure.  If ``as_dictionary=True`` is passed, the ``x`` arguments
+    are parsed using ``key=value`` format into a dictionary that is
+    then returned. If there is no ``=`` in the argument, value is an empty
+    string.
+
+    .. versionchanged:: 1.13.1 Support ``as_dictionary=True`` when
+       arguments are passed without the ``=`` symbol.
+
+    For example, to support passing a database URL on the command line,
+    the standard ``env.py`` script can be modified like this::
+
+        cmd_line_url = context.get_x_argument(
+            as_dictionary=True).get('dbname')
+        if cmd_line_url:
+            engine = create_engine(cmd_line_url)
+        else:
+            engine = engine_from_config(
+                    config.get_section(config.config_ini_section),
+                    prefix='sqlalchemy.',
+                    poolclass=pool.NullPool)
+
+    This then takes effect by running the ``alembic`` script as::
+
+        alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    .. seealso::
+
+        :meth:`.EnvironmentContext.get_tag_argument`
+
+        :attr:`.Config.cmd_opts`
+
+    """
+
+def is_offline_mode() -> bool:
+    """Return True if the current migrations environment
+    is running in "offline mode".
+
+    This is ``True`` or ``False`` depending
+    on the ``--sql`` flag passed.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    """
+
+def is_transactional_ddl() -> bool:
+    """Return True if the context is configured to expect a
+    transactional DDL capable backend.
+
+    This defaults to the type of database in use, and
+    can be overridden by the ``transactional_ddl`` argument
+    to :meth:`.configure`
+
+    This function requires that a :class:`.MigrationContext`
+    has first been made available via :meth:`.configure`.
+
+    """
+
+def run_migrations(**kw: Any) -> None:
+    """Run migrations as determined by the current command line
+    configuration
+    as well as versioning information present (or not) in the current
+    database connection (if one is present).
+
+    The function accepts optional ``**kw`` arguments.   If these are
+    passed, they are sent directly to the ``upgrade()`` and
+    ``downgrade()``
+    functions within each target revision file.   By modifying the
+    ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
+    functions accept arguments, parameters can be passed here so that
+    contextual information, usually information to identify a particular
+    database in use, can be passed from a custom ``env.py`` script
+    to the migration functions.
+
+    This function requires that a :class:`.MigrationContext` has
+    first been made available via :meth:`.configure`.
+
+    """
+
+script: ScriptDirectory
+
+def static_output(text: str) -> None:
+    """Emit text directly to the "offline" SQL stream.
+
+    Typically this is for emitting comments that
+    start with --.  The statement is not treated
+    as a SQL execution, no ; or batch separator
+    is added, etc.
+
+    """
diff --git a/.venv/lib/python3.12/site-packages/alembic/ddl/__init__.py b/.venv/lib/python3.12/site-packages/alembic/ddl/__init__.py
new file mode 100644
index 00000000..f2f72b3d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/ddl/__init__.py
@@ -0,0 +1,6 @@
+from . import mssql
+from . import mysql
+from . import oracle
+from . import postgresql
+from . import sqlite
+from .impl import DefaultImpl as DefaultImpl
diff --git a/.venv/lib/python3.12/site-packages/alembic/ddl/_autogen.py b/.venv/lib/python3.12/site-packages/alembic/ddl/_autogen.py
new file mode 100644
index 00000000..74715b18
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/ddl/_autogen.py
@@ -0,0 +1,329 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+from typing import Any
+from typing import ClassVar
+from typing import Dict
+from typing import Generic
+from typing import NamedTuple
+from typing import Optional
+from typing import Sequence
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy.sql.schema import Constraint
+from sqlalchemy.sql.schema import ForeignKeyConstraint
+from sqlalchemy.sql.schema import Index
+from sqlalchemy.sql.schema import UniqueConstraint
+from typing_extensions import TypeGuard
+
+from .. import util
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from alembic.autogenerate.api import AutogenContext
+    from alembic.ddl.impl import DefaultImpl
+
+CompareConstraintType = Union[Constraint, Index]
+
+_C = TypeVar("_C", bound=CompareConstraintType)
+
+_clsreg: Dict[str, Type[_constraint_sig]] = {}
+
+
+class ComparisonResult(NamedTuple):
+    status: Literal["equal", "different", "skip"]
+    message: str
+
+    @property
+    def is_equal(self) -> bool:
+        return self.status == "equal"
+
+    @property
+    def is_different(self) -> bool:
+        return self.status == "different"
+
+    @property
+    def is_skip(self) -> bool:
+        return self.status == "skip"
+
+    @classmethod
+    def Equal(cls) -> ComparisonResult:
+        """the constraints are equal."""
+        return cls("equal", "The two constraints are equal")
+
+    @classmethod
+    def Different(cls, reason: Union[str, Sequence[str]]) -> ComparisonResult:
+        """the constraints are different for the provided reason(s)."""
+        return cls("different", ", ".join(util.to_list(reason)))
+
+    @classmethod
+    def Skip(cls, reason: Union[str, Sequence[str]]) -> ComparisonResult:
+        """the constraint cannot be compared for the provided reason(s).
+
+        The message is logged, but the constraints will be otherwise
+        considered equal, meaning that no migration command will be
+        generated.
+        """
+        return cls("skip", ", ".join(util.to_list(reason)))
+
+
+class _constraint_sig(Generic[_C]):
+    const: _C
+
+    _sig: Tuple[Any, ...]
+    name: Optional[sqla_compat._ConstraintNameDefined]
+
+    impl: DefaultImpl
+
+    _is_index: ClassVar[bool] = False
+    _is_fk: ClassVar[bool] = False
+    _is_uq: ClassVar[bool] = False
+
+    _is_metadata: bool
+
+    def __init_subclass__(cls) -> None:
+        cls._register()
+
+    @classmethod
+    def _register(cls):
+        raise NotImplementedError()
+
+    def __init__(
+        self, is_metadata: bool, impl: DefaultImpl, const: _C
+    ) -> None:
+        raise NotImplementedError()
+
+    def compare_to_reflected(
+        self, other: _constraint_sig[Any]
+    ) -> ComparisonResult:
+        assert self.impl is other.impl
+        assert self._is_metadata
+        assert not other._is_metadata
+
+        return self._compare_to_reflected(other)
+
+    def _compare_to_reflected(
+        self, other: _constraint_sig[_C]
+    ) -> ComparisonResult:
+        raise NotImplementedError()
+
+    @classmethod
+    def from_constraint(
+        cls, is_metadata: bool, impl: DefaultImpl, constraint: _C
+    ) -> _constraint_sig[_C]:
+        # these could be cached by constraint/impl, however, if the
+        # constraint is modified in place, then the sig is wrong.  the mysql
+        # impl currently does this, and if we fixed that we can't be sure
+        # someone else might do it too, so play it safe.
+        sig = _clsreg[constraint.__visit_name__](is_metadata, impl, constraint)
+        return sig
+
+    def md_name_to_sql_name(self, context: AutogenContext) -> Optional[str]:
+        return sqla_compat._get_constraint_final_name(
+            self.const, context.dialect
+        )
+
+    @util.memoized_property
+    def is_named(self):
+        return sqla_compat._constraint_is_named(self.const, self.impl.dialect)
+
+    @util.memoized_property
+    def unnamed(self) -> Tuple[Any, ...]:
+        return self._sig
+
+    @util.memoized_property
+    def unnamed_no_options(self) -> Tuple[Any, ...]:
+        raise NotImplementedError()
+
+    @util.memoized_property
+    def _full_sig(self) -> Tuple[Any, ...]:
+        return (self.name,) + self.unnamed
+
+    def __eq__(self, other) -> bool:
+        return self._full_sig == other._full_sig
+
+    def __ne__(self, other) -> bool:
+        return self._full_sig != other._full_sig
+
+    def __hash__(self) -> int:
+        return hash(self._full_sig)
+
+
+class _uq_constraint_sig(_constraint_sig[UniqueConstraint]):
+    _is_uq = True
+
+    @classmethod
+    def _register(cls) -> None:
+        _clsreg["unique_constraint"] = cls
+
+    is_unique = True
+
+    def __init__(
+        self,
+        is_metadata: bool,
+        impl: DefaultImpl,
+        const: UniqueConstraint,
+    ) -> None:
+        self.impl = impl
+        self.const = const
+        self.name = sqla_compat.constraint_name_or_none(const.name)
+        self._sig = tuple(sorted([col.name for col in const.columns]))
+        self._is_metadata = is_metadata
+
+    @property
+    def column_names(self) -> Tuple[str, ...]:
+        return tuple([col.name for col in self.const.columns])
+
+    def _compare_to_reflected(
+        self, other: _constraint_sig[_C]
+    ) -> ComparisonResult:
+        assert self._is_metadata
+        metadata_obj = self
+        conn_obj = other
+
+        assert is_uq_sig(conn_obj)
+        return self.impl.compare_unique_constraint(
+            metadata_obj.const, conn_obj.const
+        )
+
+
+class _ix_constraint_sig(_constraint_sig[Index]):
+    _is_index = True
+
+    name: sqla_compat._ConstraintName
+
+    @classmethod
+    def _register(cls) -> None:
+        _clsreg["index"] = cls
+
+    def __init__(
+        self, is_metadata: bool, impl: DefaultImpl, const: Index
+    ) -> None:
+        self.impl = impl
+        self.const = const
+        self.name = const.name
+        self.is_unique = bool(const.unique)
+        self._is_metadata = is_metadata
+
+    def _compare_to_reflected(
+        self, other: _constraint_sig[_C]
+    ) -> ComparisonResult:
+        assert self._is_metadata
+        metadata_obj = self
+        conn_obj = other
+
+        assert is_index_sig(conn_obj)
+        return self.impl.compare_indexes(metadata_obj.const, conn_obj.const)
+
+    @util.memoized_property
+    def has_expressions(self):
+        return sqla_compat.is_expression_index(self.const)
+
+    @util.memoized_property
+    def column_names(self) -> Tuple[str, ...]:
+        return tuple([col.name for col in self.const.columns])
+
+    @util.memoized_property
+    def column_names_optional(self) -> Tuple[Optional[str], ...]:
+        return tuple(
+            [getattr(col, "name", None) for col in self.const.expressions]
+        )
+
+    @util.memoized_property
+    def is_named(self):
+        return True
+
+    @util.memoized_property
+    def unnamed(self):
+        return (self.is_unique,) + self.column_names_optional
+
+
+class _fk_constraint_sig(_constraint_sig[ForeignKeyConstraint]):
+    _is_fk = True
+
+    @classmethod
+    def _register(cls) -> None:
+        _clsreg["foreign_key_constraint"] = cls
+
+    def __init__(
+        self,
+        is_metadata: bool,
+        impl: DefaultImpl,
+        const: ForeignKeyConstraint,
+    ) -> None:
+        self._is_metadata = is_metadata
+
+        self.impl = impl
+        self.const = const
+
+        self.name = sqla_compat.constraint_name_or_none(const.name)
+
+        (
+            self.source_schema,
+            self.source_table,
+            self.source_columns,
+            self.target_schema,
+            self.target_table,
+            self.target_columns,
+            onupdate,
+            ondelete,
+            deferrable,
+            initially,
+        ) = sqla_compat._fk_spec(const)
+
+        self._sig: Tuple[Any, ...] = (
+            self.source_schema,
+            self.source_table,
+            tuple(self.source_columns),
+            self.target_schema,
+            self.target_table,
+            tuple(self.target_columns),
+        ) + (
+            (
+                (None if onupdate.lower() == "no action" else onupdate.lower())
+                if onupdate
+                else None
+            ),
+            (
+                (None if ondelete.lower() == "no action" else ondelete.lower())
+                if ondelete
+                else None
+            ),
+            # convert initially + deferrable into one three-state value
+            (
+                "initially_deferrable"
+                if initially and initially.lower() == "deferred"
+                else "deferrable" if deferrable else "not deferrable"
+            ),
+        )
+
+    @util.memoized_property
+    def unnamed_no_options(self):
+        return (
+            self.source_schema,
+            self.source_table,
+            tuple(self.source_columns),
+            self.target_schema,
+            self.target_table,
+            tuple(self.target_columns),
+        )
+
+
+def is_index_sig(sig: _constraint_sig) -> TypeGuard[_ix_constraint_sig]:
+    return sig._is_index
+
+
+def is_uq_sig(sig: _constraint_sig) -> TypeGuard[_uq_constraint_sig]:
+    return sig._is_uq
+
+
+def is_fk_sig(sig: _constraint_sig) -> TypeGuard[_fk_constraint_sig]:
+    return sig._is_fk
diff --git a/.venv/lib/python3.12/site-packages/alembic/ddl/base.py b/.venv/lib/python3.12/site-packages/alembic/ddl/base.py
new file mode 100644
index 00000000..bd55c56d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/ddl/base.py
@@ -0,0 +1,336 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import functools
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import exc
+from sqlalchemy import Integer
+from sqlalchemy import types as sqltypes
+from sqlalchemy.ext.compiler import compiles
+from sqlalchemy.schema import Column
+from sqlalchemy.schema import DDLElement
+from sqlalchemy.sql.elements import quoted_name
+
+from ..util.sqla_compat import _columns_for_constraint  # noqa
+from ..util.sqla_compat import _find_columns  # noqa
+from ..util.sqla_compat import _fk_spec  # noqa
+from ..util.sqla_compat import _is_type_bound  # noqa
+from ..util.sqla_compat import _table_for_constraint  # noqa
+
+if TYPE_CHECKING:
+    from typing import Any
+
+    from sqlalchemy import Computed
+    from sqlalchemy import Identity
+    from sqlalchemy.sql.compiler import Compiled
+    from sqlalchemy.sql.compiler import DDLCompiler
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.functions import Function
+    from sqlalchemy.sql.schema import FetchedValue
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .impl import DefaultImpl
+
+_ServerDefault = Union["TextClause", "FetchedValue", "Function[Any]", str]
+
+
+class AlterTable(DDLElement):
+    """Represent an ALTER TABLE statement.
+
+    Only the string name and optional schema name of the table
+    is required, not a full Table object.
+
+    """
+
+    def __init__(
+        self,
+        table_name: str,
+        schema: Optional[Union[quoted_name, str]] = None,
+    ) -> None:
+        self.table_name = table_name
+        self.schema = schema
+
+
+class RenameTable(AlterTable):
+    def __init__(
+        self,
+        old_table_name: str,
+        new_table_name: Union[quoted_name, str],
+        schema: Optional[Union[quoted_name, str]] = None,
+    ) -> None:
+        super().__init__(old_table_name, schema=schema)
+        self.new_table_name = new_table_name
+
+
+class AlterColumn(AlterTable):
+    def __init__(
+        self,
+        name: str,
+        column_name: str,
+        schema: Optional[str] = None,
+        existing_type: Optional[TypeEngine] = None,
+        existing_nullable: Optional[bool] = None,
+        existing_server_default: Optional[_ServerDefault] = None,
+        existing_comment: Optional[str] = None,
+    ) -> None:
+        super().__init__(name, schema=schema)
+        self.column_name = column_name
+        self.existing_type = (
+            sqltypes.to_instance(existing_type)
+            if existing_type is not None
+            else None
+        )
+        self.existing_nullable = existing_nullable
+        self.existing_server_default = existing_server_default
+        self.existing_comment = existing_comment
+
+
+class ColumnNullable(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, nullable: bool, **kw
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.nullable = nullable
+
+
+class ColumnType(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, type_: TypeEngine, **kw
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.type_ = sqltypes.to_instance(type_)
+
+
+class ColumnName(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, newname: str, **kw
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.newname = newname
+
+
+class ColumnDefault(AlterColumn):
+    def __init__(
+        self,
+        name: str,
+        column_name: str,
+        default: Optional[_ServerDefault],
+        **kw,
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.default = default
+
+
+class ComputedColumnDefault(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, default: Optional[Computed], **kw
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.default = default
+
+
+class IdentityColumnDefault(AlterColumn):
+    def __init__(
+        self,
+        name: str,
+        column_name: str,
+        default: Optional[Identity],
+        impl: DefaultImpl,
+        **kw,
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.default = default
+        self.impl = impl
+
+
+class AddColumn(AlterTable):
+    def __init__(
+        self,
+        name: str,
+        column: Column[Any],
+        schema: Optional[Union[quoted_name, str]] = None,
+    ) -> None:
+        super().__init__(name, schema=schema)
+        self.column = column
+
+
+class DropColumn(AlterTable):
+    def __init__(
+        self, name: str, column: Column[Any], schema: Optional[str] = None
+    ) -> None:
+        super().__init__(name, schema=schema)
+        self.column = column
+
+
+class ColumnComment(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, comment: Optional[str], **kw
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.comment = comment
+
+
+@compiles(RenameTable)
+def visit_rename_table(
+    element: RenameTable, compiler: DDLCompiler, **kw
+) -> str:
+    return "%s RENAME TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_table_name(compiler, element.new_table_name, element.schema),
+    )
+
+
+@compiles(AddColumn)
+def visit_add_column(element: AddColumn, compiler: DDLCompiler, **kw) -> str:
+    return "%s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        add_column(compiler, element.column, **kw),
+    )
+
+
+@compiles(DropColumn)
+def visit_drop_column(element: DropColumn, compiler: DDLCompiler, **kw) -> str:
+    return "%s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        drop_column(compiler, element.column.name, **kw),
+    )
+
+
+@compiles(ColumnNullable)
+def visit_column_nullable(
+    element: ColumnNullable, compiler: DDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "DROP NOT NULL" if element.nullable else "SET NOT NULL",
+    )
+
+
+@compiles(ColumnType)
+def visit_column_type(element: ColumnType, compiler: DDLCompiler, **kw) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "TYPE %s" % format_type(compiler, element.type_),
+    )
+
+
+@compiles(ColumnName)
+def visit_column_name(element: ColumnName, compiler: DDLCompiler, **kw) -> str:
+    return "%s RENAME %s TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        format_column_name(compiler, element.newname),
+    )
+
+
+@compiles(ColumnDefault)
+def visit_column_default(
+    element: ColumnDefault, compiler: DDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        (
+            "SET DEFAULT %s" % format_server_default(compiler, element.default)
+            if element.default is not None
+            else "DROP DEFAULT"
+        ),
+    )
+
+
+@compiles(ComputedColumnDefault)
+def visit_computed_column(
+    element: ComputedColumnDefault, compiler: DDLCompiler, **kw
+):
+    raise exc.CompileError(
+        'Adding or removing a "computed" construct, e.g. GENERATED '
+        "ALWAYS AS, to or from an existing column is not supported."
+    )
+
+
+@compiles(IdentityColumnDefault)
+def visit_identity_column(
+    element: IdentityColumnDefault, compiler: DDLCompiler, **kw
+):
+    raise exc.CompileError(
+        'Adding, removing or modifying an "identity" construct, '
+        "e.g. GENERATED AS IDENTITY, to or from an existing "
+        "column is not supported in this dialect."
+    )
+
+
+def quote_dotted(
+    name: Union[quoted_name, str], quote: functools.partial
+) -> Union[quoted_name, str]:
+    """quote the elements of a dotted name"""
+
+    if isinstance(name, quoted_name):
+        return quote(name)
+    result = ".".join([quote(x) for x in name.split(".")])
+    return result
+
+
+def format_table_name(
+    compiler: Compiled,
+    name: Union[quoted_name, str],
+    schema: Optional[Union[quoted_name, str]],
+) -> Union[quoted_name, str]:
+    quote = functools.partial(compiler.preparer.quote)
+    if schema:
+        return quote_dotted(schema, quote) + "." + quote(name)
+    else:
+        return quote(name)
+
+
+def format_column_name(
+    compiler: DDLCompiler, name: Optional[Union[quoted_name, str]]
+) -> Union[quoted_name, str]:
+    return compiler.preparer.quote(name)  # type: ignore[arg-type]
+
+
+def format_server_default(
+    compiler: DDLCompiler,
+    default: Optional[_ServerDefault],
+) -> str:
+    return compiler.get_column_default_string(
+        Column("x", Integer, server_default=default)
+    )
+
+
+def format_type(compiler: DDLCompiler, type_: TypeEngine) -> str:
+    return compiler.dialect.type_compiler.process(type_)
+
+
+def alter_table(
+    compiler: DDLCompiler,
+    name: str,
+    schema: Optional[str],
+) -> str:
+    return "ALTER TABLE %s" % format_table_name(compiler, name, schema)
+
+
+def drop_column(compiler: DDLCompiler, name: str, **kw) -> str:
+    return "DROP COLUMN %s" % format_column_name(compiler, name)
+
+
+def alter_column(compiler: DDLCompiler, name: str) -> str:
+    return "ALTER COLUMN %s" % format_column_name(compiler, name)
+
+
+def add_column(compiler: DDLCompiler, column: Column[Any], **kw) -> str:
+    text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw)
+
+    const = " ".join(
+        compiler.process(constraint) for constraint in column.constraints
+    )
+    if const:
+        text += " " + const
+
+    return text
diff --git a/.venv/lib/python3.12/site-packages/alembic/ddl/impl.py b/.venv/lib/python3.12/site-packages/alembic/ddl/impl.py
new file mode 100644
index 00000000..c116fcfa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/ddl/impl.py
@@ -0,0 +1,885 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import logging
+import re
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import Iterable
+from typing import List
+from typing import Mapping
+from typing import NamedTuple
+from typing import Optional
+from typing import Sequence
+from typing import Set
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import cast
+from sqlalchemy import Column
+from sqlalchemy import MetaData
+from sqlalchemy import PrimaryKeyConstraint
+from sqlalchemy import schema
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy import text
+
+from . import _autogen
+from . import base
+from ._autogen import _constraint_sig as _constraint_sig
+from ._autogen import ComparisonResult as ComparisonResult
+from .. import util
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from typing import Literal
+    from typing import TextIO
+
+    from sqlalchemy.engine import Connection
+    from sqlalchemy.engine import Dialect
+    from sqlalchemy.engine.cursor import CursorResult
+    from sqlalchemy.engine.reflection import Inspector
+    from sqlalchemy.sql import ClauseElement
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import quoted_name
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.schema import ForeignKeyConstraint
+    from sqlalchemy.sql.schema import Index
+    from sqlalchemy.sql.schema import UniqueConstraint
+    from sqlalchemy.sql.selectable import TableClause
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .base import _ServerDefault
+    from ..autogenerate.api import AutogenContext
+    from ..operations.batch import ApplyBatchImpl
+    from ..operations.batch import BatchOperationsImpl
+
+log = logging.getLogger(__name__)
+
+
+class ImplMeta(type):
+    def __init__(
+        cls,
+        classname: str,
+        bases: Tuple[Type[DefaultImpl]],
+        dict_: Dict[str, Any],
+    ):
+        newtype = type.__init__(cls, classname, bases, dict_)
+        if "__dialect__" in dict_:
+            _impls[dict_["__dialect__"]] = cls  # type: ignore[assignment]
+        return newtype
+
+
+_impls: Dict[str, Type[DefaultImpl]] = {}
+
+
+class DefaultImpl(metaclass=ImplMeta):
+    """Provide the entrypoint for major migration operations,
+    including database-specific behavioral variances.
+
+    While individual SQL/DDL constructs already provide
+    for database-specific implementations, variances here
+    allow for entirely different sequences of operations
+    to take place for a particular migration, such as
+    SQL Server's special 'IDENTITY INSERT' step for
+    bulk inserts.
+
+    """
+
+    __dialect__ = "default"
+
+    transactional_ddl = False
+    command_terminator = ";"
+    type_synonyms: Tuple[Set[str], ...] = ({"NUMERIC", "DECIMAL"},)
+    type_arg_extract: Sequence[str] = ()
+    # These attributes are deprecated in SQLAlchemy via #10247. They need to
+    # be ignored to support older version that did not use dialect kwargs.
+    # They only apply to Oracle and are replaced by oracle_order,
+    # oracle_on_null
+    identity_attrs_ignore: Tuple[str, ...] = ("order", "on_null")
+
+    def __init__(
+        self,
+        dialect: Dialect,
+        connection: Optional[Connection],
+        as_sql: bool,
+        transactional_ddl: Optional[bool],
+        output_buffer: Optional[TextIO],
+        context_opts: Dict[str, Any],
+    ) -> None:
+        self.dialect = dialect
+        self.connection = connection
+        self.as_sql = as_sql
+        self.literal_binds = context_opts.get("literal_binds", False)
+
+        self.output_buffer = output_buffer
+        self.memo: dict = {}
+        self.context_opts = context_opts
+        if transactional_ddl is not None:
+            self.transactional_ddl = transactional_ddl
+
+        if self.literal_binds:
+            if not self.as_sql:
+                raise util.CommandError(
+                    "Can't use literal_binds setting without as_sql mode"
+                )
+
+    @classmethod
+    def get_by_dialect(cls, dialect: Dialect) -> Type[DefaultImpl]:
+        return _impls[dialect.name]
+
+    def static_output(self, text: str) -> None:
+        assert self.output_buffer is not None
+        self.output_buffer.write(text + "\n\n")
+        self.output_buffer.flush()
+
+    def version_table_impl(
+        self,
+        *,
+        version_table: str,
+        version_table_schema: Optional[str],
+        version_table_pk: bool,
+        **kw: Any,
+    ) -> Table:
+        """Generate a :class:`.Table` object which will be used as the
+        structure for the Alembic version table.
+
+        Third party dialects may override this hook to provide an alternate
+        structure for this :class:`.Table`; requirements are only that it
+        be named based on the ``version_table`` parameter and contains
+        at least a single string-holding column named ``version_num``.
+
+        .. versionadded:: 1.14
+
+        """
+        vt = Table(
+            version_table,
+            MetaData(),
+            Column("version_num", String(32), nullable=False),
+            schema=version_table_schema,
+        )
+        if version_table_pk:
+            vt.append_constraint(
+                PrimaryKeyConstraint(
+                    "version_num", name=f"{version_table}_pkc"
+                )
+            )
+
+        return vt
+
+    def requires_recreate_in_batch(
+        self, batch_op: BatchOperationsImpl
+    ) -> bool:
+        """Return True if the given :class:`.BatchOperationsImpl`
+        would need the table to be recreated and copied in order to
+        proceed.
+
+        Normally, only returns True on SQLite when operations other
+        than add_column are present.
+
+        """
+        return False
+
+    def prep_table_for_batch(
+        self, batch_impl: ApplyBatchImpl, table: Table
+    ) -> None:
+        """perform any operations needed on a table before a new
+        one is created to replace it in batch mode.
+
+        the PG dialect uses this to drop constraints on the table
+        before the new one uses those same names.
+
+        """
+
+    @property
+    def bind(self) -> Optional[Connection]:
+        return self.connection
+
+    def _exec(
+        self,
+        construct: Union[Executable, str],
+        execution_options: Optional[Mapping[str, Any]] = None,
+        multiparams: Optional[Sequence[Mapping[str, Any]]] = None,
+        params: Mapping[str, Any] = util.immutabledict(),
+    ) -> Optional[CursorResult]:
+        if isinstance(construct, str):
+            construct = text(construct)
+        if self.as_sql:
+            if multiparams is not None or params:
+                raise TypeError("SQL parameters not allowed with as_sql")
+
+            compile_kw: dict[str, Any]
+            if self.literal_binds and not isinstance(
+                construct, schema.DDLElement
+            ):
+                compile_kw = dict(compile_kwargs={"literal_binds": True})
+            else:
+                compile_kw = {}
+
+            if TYPE_CHECKING:
+                assert isinstance(construct, ClauseElement)
+            compiled = construct.compile(dialect=self.dialect, **compile_kw)
+            self.static_output(
+                str(compiled).replace("\t", "    ").strip()
+                + self.command_terminator
+            )
+            return None
+        else:
+            conn = self.connection
+            assert conn is not None
+            if execution_options:
+                conn = conn.execution_options(**execution_options)
+
+            if params and multiparams is not None:
+                raise TypeError(
+                    "Can't send params and multiparams at the same time"
+                )
+
+            if multiparams:
+                return conn.execute(construct, multiparams)
+            else:
+                return conn.execute(construct, params)
+
+    def execute(
+        self,
+        sql: Union[Executable, str],
+        execution_options: Optional[dict[str, Any]] = None,
+    ) -> None:
+        self._exec(sql, execution_options)
+
+    def alter_column(
+        self,
+        table_name: str,
+        column_name: str,
+        nullable: Optional[bool] = None,
+        server_default: Union[_ServerDefault, Literal[False]] = False,
+        name: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        schema: Optional[str] = None,
+        autoincrement: Optional[bool] = None,
+        comment: Optional[Union[str, Literal[False]]] = False,
+        existing_comment: Optional[str] = None,
+        existing_type: Optional[TypeEngine] = None,
+        existing_server_default: Optional[_ServerDefault] = None,
+        existing_nullable: Optional[bool] = None,
+        existing_autoincrement: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        if autoincrement is not None or existing_autoincrement is not None:
+            util.warn(
+                "autoincrement and existing_autoincrement "
+                "only make sense for MySQL",
+                stacklevel=3,
+            )
+        if nullable is not None:
+            self._exec(
+                base.ColumnNullable(
+                    table_name,
+                    column_name,
+                    nullable,
+                    schema=schema,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                    existing_comment=existing_comment,
+                )
+            )
+        if server_default is not False:
+            kw = {}
+            cls_: Type[
+                Union[
+                    base.ComputedColumnDefault,
+                    base.IdentityColumnDefault,
+                    base.ColumnDefault,
+                ]
+            ]
+            if sqla_compat._server_default_is_computed(
+                server_default, existing_server_default
+            ):
+                cls_ = base.ComputedColumnDefault
+            elif sqla_compat._server_default_is_identity(
+                server_default, existing_server_default
+            ):
+                cls_ = base.IdentityColumnDefault
+                kw["impl"] = self
+            else:
+                cls_ = base.ColumnDefault
+            self._exec(
+                cls_(
+                    table_name,
+                    column_name,
+                    server_default,  # type:ignore[arg-type]
+                    schema=schema,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                    existing_comment=existing_comment,
+                    **kw,
+                )
+            )
+        if type_ is not None:
+            self._exec(
+                base.ColumnType(
+                    table_name,
+                    column_name,
+                    type_,
+                    schema=schema,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                    existing_comment=existing_comment,
+                )
+            )
+
+        if comment is not False:
+            self._exec(
+                base.ColumnComment(
+                    table_name,
+                    column_name,
+                    comment,
+                    schema=schema,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                    existing_comment=existing_comment,
+                )
+            )
+
+        # do the new name last ;)
+        if name is not None:
+            self._exec(
+                base.ColumnName(
+                    table_name,
+                    column_name,
+                    name,
+                    schema=schema,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                )
+            )
+
+    def add_column(
+        self,
+        table_name: str,
+        column: Column[Any],
+        schema: Optional[Union[str, quoted_name]] = None,
+    ) -> None:
+        self._exec(base.AddColumn(table_name, column, schema=schema))
+
+    def drop_column(
+        self,
+        table_name: str,
+        column: Column[Any],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> None:
+        self._exec(base.DropColumn(table_name, column, schema=schema))
+
+    def add_constraint(self, const: Any) -> None:
+        if const._create_rule is None or const._create_rule(self):
+            self._exec(schema.AddConstraint(const))
+
+    def drop_constraint(self, const: Constraint) -> None:
+        self._exec(schema.DropConstraint(const))
+
+    def rename_table(
+        self,
+        old_table_name: str,
+        new_table_name: Union[str, quoted_name],
+        schema: Optional[Union[str, quoted_name]] = None,
+    ) -> None:
+        self._exec(
+            base.RenameTable(old_table_name, new_table_name, schema=schema)
+        )
+
+    def create_table(self, table: Table, **kw: Any) -> None:
+        table.dispatch.before_create(
+            table, self.connection, checkfirst=False, _ddl_runner=self
+        )
+        self._exec(schema.CreateTable(table, **kw))
+        table.dispatch.after_create(
+            table, self.connection, checkfirst=False, _ddl_runner=self
+        )
+        for index in table.indexes:
+            self._exec(schema.CreateIndex(index))
+
+        with_comment = (
+            self.dialect.supports_comments and not self.dialect.inline_comments
+        )
+        comment = table.comment
+        if comment and with_comment:
+            self.create_table_comment(table)
+
+        for column in table.columns:
+            comment = column.comment
+            if comment and with_comment:
+                self.create_column_comment(column)
+
+    def drop_table(self, table: Table, **kw: Any) -> None:
+        table.dispatch.before_drop(
+            table, self.connection, checkfirst=False, _ddl_runner=self
+        )
+        self._exec(schema.DropTable(table, **kw))
+        table.dispatch.after_drop(
+            table, self.connection, checkfirst=False, _ddl_runner=self
+        )
+
+    def create_index(self, index: Index, **kw: Any) -> None:
+        self._exec(schema.CreateIndex(index, **kw))
+
+    def create_table_comment(self, table: Table) -> None:
+        self._exec(schema.SetTableComment(table))
+
+    def drop_table_comment(self, table: Table) -> None:
+        self._exec(schema.DropTableComment(table))
+
+    def create_column_comment(self, column: ColumnElement[Any]) -> None:
+        self._exec(schema.SetColumnComment(column))
+
+    def drop_index(self, index: Index, **kw: Any) -> None:
+        self._exec(schema.DropIndex(index, **kw))
+
+    def bulk_insert(
+        self,
+        table: Union[TableClause, Table],
+        rows: List[dict],
+        multiinsert: bool = True,
+    ) -> None:
+        if not isinstance(rows, list):
+            raise TypeError("List expected")
+        elif rows and not isinstance(rows[0], dict):
+            raise TypeError("List of dictionaries expected")
+        if self.as_sql:
+            for row in rows:
+                self._exec(
+                    table.insert()
+                    .inline()
+                    .values(
+                        **{
+                            k: (
+                                sqla_compat._literal_bindparam(
+                                    k, v, type_=table.c[k].type
+                                )
+                                if not isinstance(
+                                    v, sqla_compat._literal_bindparam
+                                )
+                                else v
+                            )
+                            for k, v in row.items()
+                        }
+                    )
+                )
+        else:
+            if rows:
+                if multiinsert:
+                    self._exec(table.insert().inline(), multiparams=rows)
+                else:
+                    for row in rows:
+                        self._exec(table.insert().inline().values(**row))
+
+    def _tokenize_column_type(self, column: Column) -> Params:
+        definition: str
+        definition = self.dialect.type_compiler.process(column.type).lower()
+
+        # tokenize the SQLAlchemy-generated version of a type, so that
+        # the two can be compared.
+        #
+        # examples:
+        # NUMERIC(10, 5)
+        # TIMESTAMP WITH TIMEZONE
+        # INTEGER UNSIGNED
+        # INTEGER (10) UNSIGNED
+        # INTEGER(10) UNSIGNED
+        # varchar character set utf8
+        #
+
+        tokens: List[str] = re.findall(r"[\w\-_]+|\(.+?\)", definition)
+
+        term_tokens: List[str] = []
+        paren_term = None
+
+        for token in tokens:
+            if re.match(r"^\(.*\)$", token):
+                paren_term = token
+            else:
+                term_tokens.append(token)
+
+        params = Params(term_tokens[0], term_tokens[1:], [], {})
+
+        if paren_term:
+            term: str
+            for term in re.findall("[^(),]+", paren_term):
+                if "=" in term:
+                    key, val = term.split("=")
+                    params.kwargs[key.strip()] = val.strip()
+                else:
+                    params.args.append(term.strip())
+
+        return params
+
+    def _column_types_match(
+        self, inspector_params: Params, metadata_params: Params
+    ) -> bool:
+        if inspector_params.token0 == metadata_params.token0:
+            return True
+
+        synonyms = [{t.lower() for t in batch} for batch in self.type_synonyms]
+        inspector_all_terms = " ".join(
+            [inspector_params.token0] + inspector_params.tokens
+        )
+        metadata_all_terms = " ".join(
+            [metadata_params.token0] + metadata_params.tokens
+        )
+
+        for batch in synonyms:
+            if {inspector_all_terms, metadata_all_terms}.issubset(batch) or {
+                inspector_params.token0,
+                metadata_params.token0,
+            }.issubset(batch):
+                return True
+        return False
+
+    def _column_args_match(
+        self, inspected_params: Params, meta_params: Params
+    ) -> bool:
+        """We want to compare column parameters. However, we only want
+        to compare parameters that are set. If they both have `collation`,
+        we want to make sure they are the same. However, if only one
+        specifies it, dont flag it for being less specific
+        """
+
+        if (
+            len(meta_params.tokens) == len(inspected_params.tokens)
+            and meta_params.tokens != inspected_params.tokens
+        ):
+            return False
+
+        if (
+            len(meta_params.args) == len(inspected_params.args)
+            and meta_params.args != inspected_params.args
+        ):
+            return False
+
+        insp = " ".join(inspected_params.tokens).lower()
+        meta = " ".join(meta_params.tokens).lower()
+
+        for reg in self.type_arg_extract:
+            mi = re.search(reg, insp)
+            mm = re.search(reg, meta)
+
+            if mi and mm and mi.group(1) != mm.group(1):
+                return False
+
+        return True
+
+    def compare_type(
+        self, inspector_column: Column[Any], metadata_column: Column
+    ) -> bool:
+        """Returns True if there ARE differences between the types of the two
+        columns. Takes impl.type_synonyms into account between retrospected
+        and metadata types
+        """
+        inspector_params = self._tokenize_column_type(inspector_column)
+        metadata_params = self._tokenize_column_type(metadata_column)
+
+        if not self._column_types_match(inspector_params, metadata_params):
+            return True
+        if not self._column_args_match(inspector_params, metadata_params):
+            return True
+        return False
+
+    def compare_server_default(
+        self,
+        inspector_column,
+        metadata_column,
+        rendered_metadata_default,
+        rendered_inspector_default,
+    ):
+        return rendered_inspector_default != rendered_metadata_default
+
+    def correct_for_autogen_constraints(
+        self,
+        conn_uniques: Set[UniqueConstraint],
+        conn_indexes: Set[Index],
+        metadata_unique_constraints: Set[UniqueConstraint],
+        metadata_indexes: Set[Index],
+    ) -> None:
+        pass
+
+    def cast_for_batch_migrate(self, existing, existing_transfer, new_type):
+        if existing.type._type_affinity is not new_type._type_affinity:
+            existing_transfer["expr"] = cast(
+                existing_transfer["expr"], new_type
+            )
+
+    def render_ddl_sql_expr(
+        self, expr: ClauseElement, is_server_default: bool = False, **kw: Any
+    ) -> str:
+        """Render a SQL expression that is typically a server default,
+        index expression, etc.
+
+        """
+
+        compile_kw = {"literal_binds": True, "include_table": False}
+
+        return str(
+            expr.compile(dialect=self.dialect, compile_kwargs=compile_kw)
+        )
+
+    def _compat_autogen_column_reflect(self, inspector: Inspector) -> Callable:
+        return self.autogen_column_reflect
+
+    def correct_for_autogen_foreignkeys(
+        self,
+        conn_fks: Set[ForeignKeyConstraint],
+        metadata_fks: Set[ForeignKeyConstraint],
+    ) -> None:
+        pass
+
+    def autogen_column_reflect(self, inspector, table, column_info):
+        """A hook that is attached to the 'column_reflect' event for when
+        a Table is reflected from the database during the autogenerate
+        process.
+
+        Dialects can elect to modify the information gathered here.
+
+        """
+
+    def start_migrations(self) -> None:
+        """A hook called when :meth:`.EnvironmentContext.run_migrations`
+        is called.
+
+        Implementations can set up per-migration-run state here.
+
+        """
+
+    def emit_begin(self) -> None:
+        """Emit the string ``BEGIN``, or the backend-specific
+        equivalent, on the current connection context.
+
+        This is used in offline mode and typically
+        via :meth:`.EnvironmentContext.begin_transaction`.
+
+        """
+        self.static_output("BEGIN" + self.command_terminator)
+
+    def emit_commit(self) -> None:
+        """Emit the string ``COMMIT``, or the backend-specific
+        equivalent, on the current connection context.
+
+        This is used in offline mode and typically
+        via :meth:`.EnvironmentContext.begin_transaction`.
+
+        """
+        self.static_output("COMMIT" + self.command_terminator)
+
+    def render_type(
+        self, type_obj: TypeEngine, autogen_context: AutogenContext
+    ) -> Union[str, Literal[False]]:
+        return False
+
+    def _compare_identity_default(self, metadata_identity, inspector_identity):
+        # ignored contains the attributes that were not considered
+        # because assumed to their default values in the db.
+        diff, ignored = _compare_identity_options(
+            metadata_identity,
+            inspector_identity,
+            schema.Identity(),
+            skip={"always"},
+        )
+
+        meta_always = getattr(metadata_identity, "always", None)
+        inspector_always = getattr(inspector_identity, "always", None)
+        # None and False are the same in this comparison
+        if bool(meta_always) != bool(inspector_always):
+            diff.add("always")
+
+        diff.difference_update(self.identity_attrs_ignore)
+
+        # returns 3 values:
+        return (
+            # different identity attributes
+            diff,
+            # ignored identity attributes
+            ignored,
+            # if the two identity should be considered different
+            bool(diff) or bool(metadata_identity) != bool(inspector_identity),
+        )
+
+    def _compare_index_unique(
+        self, metadata_index: Index, reflected_index: Index
+    ) -> Optional[str]:
+        conn_unique = bool(reflected_index.unique)
+        meta_unique = bool(metadata_index.unique)
+        if conn_unique != meta_unique:
+            return f"unique={conn_unique} to unique={meta_unique}"
+        else:
+            return None
+
+    def _create_metadata_constraint_sig(
+        self, constraint: _autogen._C, **opts: Any
+    ) -> _constraint_sig[_autogen._C]:
+        return _constraint_sig.from_constraint(True, self, constraint, **opts)
+
+    def _create_reflected_constraint_sig(
+        self, constraint: _autogen._C, **opts: Any
+    ) -> _constraint_sig[_autogen._C]:
+        return _constraint_sig.from_constraint(False, self, constraint, **opts)
+
+    def compare_indexes(
+        self,
+        metadata_index: Index,
+        reflected_index: Index,
+    ) -> ComparisonResult:
+        """Compare two indexes by comparing the signature generated by
+        ``create_index_sig``.
+
+        This method returns a ``ComparisonResult``.
+        """
+        msg: List[str] = []
+        unique_msg = self._compare_index_unique(
+            metadata_index, reflected_index
+        )
+        if unique_msg:
+            msg.append(unique_msg)
+        m_sig = self._create_metadata_constraint_sig(metadata_index)
+        r_sig = self._create_reflected_constraint_sig(reflected_index)
+
+        assert _autogen.is_index_sig(m_sig)
+        assert _autogen.is_index_sig(r_sig)
+
+        # The assumption is that the index have no expression
+        for sig in m_sig, r_sig:
+            if sig.has_expressions:
+                log.warning(
+                    "Generating approximate signature for index %s. "
+                    "The dialect "
+                    "implementation should either skip expression indexes "
+                    "or provide a custom implementation.",
+                    sig.const,
+                )
+
+        if m_sig.column_names != r_sig.column_names:
+            msg.append(
+                f"expression {r_sig.column_names} to {m_sig.column_names}"
+            )
+
+        if msg:
+            return ComparisonResult.Different(msg)
+        else:
+            return ComparisonResult.Equal()
+
+    def compare_unique_constraint(
+        self,
+        metadata_constraint: UniqueConstraint,
+        reflected_constraint: UniqueConstraint,
+    ) -> ComparisonResult:
+        """Compare two unique constraints by comparing the two signatures.
+
+        The arguments are two tuples that contain the unique constraint and
+        the signatures generated by ``create_unique_constraint_sig``.
+
+        This method returns a ``ComparisonResult``.
+        """
+        metadata_tup = self._create_metadata_constraint_sig(
+            metadata_constraint
+        )
+        reflected_tup = self._create_reflected_constraint_sig(
+            reflected_constraint
+        )
+
+        meta_sig = metadata_tup.unnamed
+        conn_sig = reflected_tup.unnamed
+        if conn_sig != meta_sig:
+            return ComparisonResult.Different(
+                f"expression {conn_sig} to {meta_sig}"
+            )
+        else:
+            return ComparisonResult.Equal()
+
+    def _skip_functional_indexes(self, metadata_indexes, conn_indexes):
+        conn_indexes_by_name = {c.name: c for c in conn_indexes}
+
+        for idx in list(metadata_indexes):
+            if idx.name in conn_indexes_by_name:
+                continue
+            iex = sqla_compat.is_expression_index(idx)
+            if iex:
+                util.warn(
+                    "autogenerate skipping metadata-specified "
+                    "expression-based index "
+                    f"{idx.name!r}; dialect {self.__dialect__!r} under "
+                    f"SQLAlchemy {sqla_compat.sqlalchemy_version} can't "
+                    "reflect these indexes so they can't be compared"
+                )
+                metadata_indexes.discard(idx)
+
+    def adjust_reflected_dialect_options(
+        self, reflected_object: Dict[str, Any], kind: str
+    ) -> Dict[str, Any]:
+        return reflected_object.get("dialect_options", {})
+
+
+class Params(NamedTuple):
+    token0: str
+    tokens: List[str]
+    args: List[str]
+    kwargs: Dict[str, str]
+
+
+def _compare_identity_options(
+    metadata_io: Union[schema.Identity, schema.Sequence, None],
+    inspector_io: Union[schema.Identity, schema.Sequence, None],
+    default_io: Union[schema.Identity, schema.Sequence],
+    skip: Set[str],
+):
+    # this can be used for identity or sequence compare.
+    # default_io is an instance of IdentityOption with all attributes to the
+    # default value.
+    meta_d = sqla_compat._get_identity_options_dict(metadata_io)
+    insp_d = sqla_compat._get_identity_options_dict(inspector_io)
+
+    diff = set()
+    ignored_attr = set()
+
+    def check_dicts(
+        meta_dict: Mapping[str, Any],
+        insp_dict: Mapping[str, Any],
+        default_dict: Mapping[str, Any],
+        attrs: Iterable[str],
+    ):
+        for attr in set(attrs).difference(skip):
+            meta_value = meta_dict.get(attr)
+            insp_value = insp_dict.get(attr)
+            if insp_value != meta_value:
+                default_value = default_dict.get(attr)
+                if meta_value == default_value:
+                    ignored_attr.add(attr)
+                else:
+                    diff.add(attr)
+
+    check_dicts(
+        meta_d,
+        insp_d,
+        sqla_compat._get_identity_options_dict(default_io),
+        set(meta_d).union(insp_d),
+    )
+    if sqla_compat.identity_has_dialect_kwargs:
+        assert hasattr(default_io, "dialect_kwargs")
+        # use only the dialect kwargs in inspector_io since metadata_io
+        # can have options for many backends
+        check_dicts(
+            getattr(metadata_io, "dialect_kwargs", {}),
+            getattr(inspector_io, "dialect_kwargs", {}),
+            default_io.dialect_kwargs,
+            getattr(inspector_io, "dialect_kwargs", {}),
+        )
+
+    return diff, ignored_attr
diff --git a/.venv/lib/python3.12/site-packages/alembic/ddl/mssql.py b/.venv/lib/python3.12/site-packages/alembic/ddl/mssql.py
new file mode 100644
index 00000000..baa43d5e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/ddl/mssql.py
@@ -0,0 +1,419 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import re
+from typing import Any
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import types as sqltypes
+from sqlalchemy.schema import Column
+from sqlalchemy.schema import CreateIndex
+from sqlalchemy.sql.base import Executable
+from sqlalchemy.sql.elements import ClauseElement
+
+from .base import AddColumn
+from .base import alter_column
+from .base import alter_table
+from .base import ColumnDefault
+from .base import ColumnName
+from .base import ColumnNullable
+from .base import ColumnType
+from .base import format_column_name
+from .base import format_server_default
+from .base import format_table_name
+from .base import format_type
+from .base import RenameTable
+from .impl import DefaultImpl
+from .. import util
+from ..util import sqla_compat
+from ..util.sqla_compat import compiles
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.dialects.mssql.base import MSDDLCompiler
+    from sqlalchemy.dialects.mssql.base import MSSQLCompiler
+    from sqlalchemy.engine.cursor import CursorResult
+    from sqlalchemy.sql.schema import Index
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.selectable import TableClause
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .base import _ServerDefault
+
+
+class MSSQLImpl(DefaultImpl):
+    __dialect__ = "mssql"
+    transactional_ddl = True
+    batch_separator = "GO"
+
+    type_synonyms = DefaultImpl.type_synonyms + ({"VARCHAR", "NVARCHAR"},)
+    identity_attrs_ignore = DefaultImpl.identity_attrs_ignore + (
+        "minvalue",
+        "maxvalue",
+        "nominvalue",
+        "nomaxvalue",
+        "cycle",
+        "cache",
+    )
+
+    def __init__(self, *arg, **kw) -> None:
+        super().__init__(*arg, **kw)
+        self.batch_separator = self.context_opts.get(
+            "mssql_batch_separator", self.batch_separator
+        )
+
+    def _exec(self, construct: Any, *args, **kw) -> Optional[CursorResult]:
+        result = super()._exec(construct, *args, **kw)
+        if self.as_sql and self.batch_separator:
+            self.static_output(self.batch_separator)
+        return result
+
+    def emit_begin(self) -> None:
+        self.static_output("BEGIN TRANSACTION" + self.command_terminator)
+
+    def emit_commit(self) -> None:
+        super().emit_commit()
+        if self.as_sql and self.batch_separator:
+            self.static_output(self.batch_separator)
+
+    def alter_column(  # type:ignore[override]
+        self,
+        table_name: str,
+        column_name: str,
+        nullable: Optional[bool] = None,
+        server_default: Optional[
+            Union[_ServerDefault, Literal[False]]
+        ] = False,
+        name: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        schema: Optional[str] = None,
+        existing_type: Optional[TypeEngine] = None,
+        existing_server_default: Optional[_ServerDefault] = None,
+        existing_nullable: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        if nullable is not None:
+            if type_ is not None:
+                # the NULL/NOT NULL alter will handle
+                # the type alteration
+                existing_type = type_
+                type_ = None
+            elif existing_type is None:
+                raise util.CommandError(
+                    "MS-SQL ALTER COLUMN operations "
+                    "with NULL or NOT NULL require the "
+                    "existing_type or a new type_ be passed."
+                )
+        elif existing_nullable is not None and type_ is not None:
+            nullable = existing_nullable
+
+            # the NULL/NOT NULL alter will handle
+            # the type alteration
+            existing_type = type_
+            type_ = None
+
+        elif type_ is not None:
+            util.warn(
+                "MS-SQL ALTER COLUMN operations that specify type_= "
+                "should also specify a nullable= or "
+                "existing_nullable= argument to avoid implicit conversion "
+                "of NOT NULL columns to NULL."
+            )
+
+        used_default = False
+        if sqla_compat._server_default_is_identity(
+            server_default, existing_server_default
+        ) or sqla_compat._server_default_is_computed(
+            server_default, existing_server_default
+        ):
+            used_default = True
+            kw["server_default"] = server_default
+            kw["existing_server_default"] = existing_server_default
+
+        super().alter_column(
+            table_name,
+            column_name,
+            nullable=nullable,
+            type_=type_,
+            schema=schema,
+            existing_type=existing_type,
+            existing_nullable=existing_nullable,
+            **kw,
+        )
+
+        if server_default is not False and used_default is False:
+            if existing_server_default is not False or server_default is None:
+                self._exec(
+                    _ExecDropConstraint(
+                        table_name,
+                        column_name,
+                        "sys.default_constraints",
+                        schema,
+                    )
+                )
+            if server_default is not None:
+                super().alter_column(
+                    table_name,
+                    column_name,
+                    schema=schema,
+                    server_default=server_default,
+                )
+
+        if name is not None:
+            super().alter_column(
+                table_name, column_name, schema=schema, name=name
+            )
+
+    def create_index(self, index: Index, **kw: Any) -> None:
+        # this likely defaults to None if not present, so get()
+        # should normally not return the default value.  being
+        # defensive in any case
+        mssql_include = index.kwargs.get("mssql_include", None) or ()
+        assert index.table is not None
+        for col in mssql_include:
+            if col not in index.table.c:
+                index.table.append_column(Column(col, sqltypes.NullType))
+        self._exec(CreateIndex(index, **kw))
+
+    def bulk_insert(  # type:ignore[override]
+        self, table: Union[TableClause, Table], rows: List[dict], **kw: Any
+    ) -> None:
+        if self.as_sql:
+            self._exec(
+                "SET IDENTITY_INSERT %s ON"
+                % self.dialect.identifier_preparer.format_table(table)
+            )
+            super().bulk_insert(table, rows, **kw)
+            self._exec(
+                "SET IDENTITY_INSERT %s OFF"
+                % self.dialect.identifier_preparer.format_table(table)
+            )
+        else:
+            super().bulk_insert(table, rows, **kw)
+
+    def drop_column(
+        self,
+        table_name: str,
+        column: Column[Any],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> None:
+        drop_default = kw.pop("mssql_drop_default", False)
+        if drop_default:
+            self._exec(
+                _ExecDropConstraint(
+                    table_name, column, "sys.default_constraints", schema
+                )
+            )
+        drop_check = kw.pop("mssql_drop_check", False)
+        if drop_check:
+            self._exec(
+                _ExecDropConstraint(
+                    table_name, column, "sys.check_constraints", schema
+                )
+            )
+        drop_fks = kw.pop("mssql_drop_foreign_key", False)
+        if drop_fks:
+            self._exec(_ExecDropFKConstraint(table_name, column, schema))
+        super().drop_column(table_name, column, schema=schema, **kw)
+
+    def compare_server_default(
+        self,
+        inspector_column,
+        metadata_column,
+        rendered_metadata_default,
+        rendered_inspector_default,
+    ):
+        if rendered_metadata_default is not None:
+            rendered_metadata_default = re.sub(
+                r"[\(\) \"\']", "", rendered_metadata_default
+            )
+
+        if rendered_inspector_default is not None:
+            # SQL Server collapses whitespace and adds arbitrary parenthesis
+            # within expressions.   our only option is collapse all of it
+
+            rendered_inspector_default = re.sub(
+                r"[\(\) \"\']", "", rendered_inspector_default
+            )
+
+        return rendered_inspector_default != rendered_metadata_default
+
+    def _compare_identity_default(self, metadata_identity, inspector_identity):
+        diff, ignored, is_alter = super()._compare_identity_default(
+            metadata_identity, inspector_identity
+        )
+
+        if (
+            metadata_identity is None
+            and inspector_identity is not None
+            and not diff
+            and inspector_identity.column is not None
+            and inspector_identity.column.primary_key
+        ):
+            # mssql reflect primary keys with autoincrement as identity
+            # columns. if no different attributes are present ignore them
+            is_alter = False
+
+        return diff, ignored, is_alter
+
+    def adjust_reflected_dialect_options(
+        self, reflected_object: Dict[str, Any], kind: str
+    ) -> Dict[str, Any]:
+        options: Dict[str, Any]
+        options = reflected_object.get("dialect_options", {}).copy()
+        if not options.get("mssql_include"):
+            options.pop("mssql_include", None)
+        if not options.get("mssql_clustered"):
+            options.pop("mssql_clustered", None)
+        return options
+
+
+class _ExecDropConstraint(Executable, ClauseElement):
+    inherit_cache = False
+
+    def __init__(
+        self,
+        tname: str,
+        colname: Union[Column[Any], str],
+        type_: str,
+        schema: Optional[str],
+    ) -> None:
+        self.tname = tname
+        self.colname = colname
+        self.type_ = type_
+        self.schema = schema
+
+
+class _ExecDropFKConstraint(Executable, ClauseElement):
+    inherit_cache = False
+
+    def __init__(
+        self, tname: str, colname: Column[Any], schema: Optional[str]
+    ) -> None:
+        self.tname = tname
+        self.colname = colname
+        self.schema = schema
+
+
+@compiles(_ExecDropConstraint, "mssql")
+def _exec_drop_col_constraint(
+    element: _ExecDropConstraint, compiler: MSSQLCompiler, **kw
+) -> str:
+    schema, tname, colname, type_ = (
+        element.schema,
+        element.tname,
+        element.colname,
+        element.type_,
+    )
+    # from http://www.mssqltips.com/sqlservertip/1425/\
+    # working-with-default-constraints-in-sql-server/
+    return """declare @const_name varchar(256)
+select @const_name = QUOTENAME([name]) from %(type)s
+where parent_object_id = object_id('%(schema_dot)s%(tname)s')
+and col_name(parent_object_id, parent_column_id) = '%(colname)s'
+exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % {
+        "type": type_,
+        "tname": tname,
+        "colname": colname,
+        "tname_quoted": format_table_name(compiler, tname, schema),
+        "schema_dot": schema + "." if schema else "",
+    }
+
+
+@compiles(_ExecDropFKConstraint, "mssql")
+def _exec_drop_col_fk_constraint(
+    element: _ExecDropFKConstraint, compiler: MSSQLCompiler, **kw
+) -> str:
+    schema, tname, colname = element.schema, element.tname, element.colname
+
+    return """declare @const_name varchar(256)
+select @const_name = QUOTENAME([name]) from
+sys.foreign_keys fk join sys.foreign_key_columns fkc
+on fk.object_id=fkc.constraint_object_id
+where fkc.parent_object_id = object_id('%(schema_dot)s%(tname)s')
+and col_name(fkc.parent_object_id, fkc.parent_column_id) = '%(colname)s'
+exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % {
+        "tname": tname,
+        "colname": colname,
+        "tname_quoted": format_table_name(compiler, tname, schema),
+        "schema_dot": schema + "." if schema else "",
+    }
+
+
+@compiles(AddColumn, "mssql")
+def visit_add_column(element: AddColumn, compiler: MSDDLCompiler, **kw) -> str:
+    return "%s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        mssql_add_column(compiler, element.column, **kw),
+    )
+
+
+def mssql_add_column(
+    compiler: MSDDLCompiler, column: Column[Any], **kw
+) -> str:
+    return "ADD %s" % compiler.get_column_specification(column, **kw)
+
+
+@compiles(ColumnNullable, "mssql")
+def visit_column_nullable(
+    element: ColumnNullable, compiler: MSDDLCompiler, **kw
+) -> str:
+    return "%s %s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        format_type(compiler, element.existing_type),  # type: ignore[arg-type]
+        "NULL" if element.nullable else "NOT NULL",
+    )
+
+
+@compiles(ColumnDefault, "mssql")
+def visit_column_default(
+    element: ColumnDefault, compiler: MSDDLCompiler, **kw
+) -> str:
+    # TODO: there can also be a named constraint
+    # with ADD CONSTRAINT here
+    return "%s ADD DEFAULT %s FOR %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_server_default(compiler, element.default),
+        format_column_name(compiler, element.column_name),
+    )
+
+
+@compiles(ColumnName, "mssql")
+def visit_rename_column(
+    element: ColumnName, compiler: MSDDLCompiler, **kw
+) -> str:
+    return "EXEC sp_rename '%s.%s', %s, 'COLUMN'" % (
+        format_table_name(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        format_column_name(compiler, element.newname),
+    )
+
+
+@compiles(ColumnType, "mssql")
+def visit_column_type(
+    element: ColumnType, compiler: MSDDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        format_type(compiler, element.type_),
+    )
+
+
+@compiles(RenameTable, "mssql")
+def visit_rename_table(
+    element: RenameTable, compiler: MSDDLCompiler, **kw
+) -> str:
+    return "EXEC sp_rename '%s', %s" % (
+        format_table_name(compiler, element.table_name, element.schema),
+        format_table_name(compiler, element.new_table_name, None),
+    )
diff --git a/.venv/lib/python3.12/site-packages/alembic/ddl/mysql.py b/.venv/lib/python3.12/site-packages/alembic/ddl/mysql.py
new file mode 100644
index 00000000..c7b3905c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/ddl/mysql.py
@@ -0,0 +1,491 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import re
+from typing import Any
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import schema
+from sqlalchemy import types as sqltypes
+
+from .base import alter_table
+from .base import AlterColumn
+from .base import ColumnDefault
+from .base import ColumnName
+from .base import ColumnNullable
+from .base import ColumnType
+from .base import format_column_name
+from .base import format_server_default
+from .impl import DefaultImpl
+from .. import util
+from ..util import sqla_compat
+from ..util.sqla_compat import _is_type_bound
+from ..util.sqla_compat import compiles
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.dialects.mysql.base import MySQLDDLCompiler
+    from sqlalchemy.sql.ddl import DropConstraint
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .base import _ServerDefault
+
+
+class MySQLImpl(DefaultImpl):
+    __dialect__ = "mysql"
+
+    transactional_ddl = False
+    type_synonyms = DefaultImpl.type_synonyms + (
+        {"BOOL", "TINYINT"},
+        {"JSON", "LONGTEXT"},
+    )
+    type_arg_extract = [r"character set ([\w\-_]+)", r"collate ([\w\-_]+)"]
+
+    def alter_column(  # type:ignore[override]
+        self,
+        table_name: str,
+        column_name: str,
+        nullable: Optional[bool] = None,
+        server_default: Union[_ServerDefault, Literal[False]] = False,
+        name: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        schema: Optional[str] = None,
+        existing_type: Optional[TypeEngine] = None,
+        existing_server_default: Optional[_ServerDefault] = None,
+        existing_nullable: Optional[bool] = None,
+        autoincrement: Optional[bool] = None,
+        existing_autoincrement: Optional[bool] = None,
+        comment: Optional[Union[str, Literal[False]]] = False,
+        existing_comment: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        if sqla_compat._server_default_is_identity(
+            server_default, existing_server_default
+        ) or sqla_compat._server_default_is_computed(
+            server_default, existing_server_default
+        ):
+            # modifying computed or identity columns is not supported
+            # the default will raise
+            super().alter_column(
+                table_name,
+                column_name,
+                nullable=nullable,
+                type_=type_,
+                schema=schema,
+                existing_type=existing_type,
+                existing_nullable=existing_nullable,
+                server_default=server_default,
+                existing_server_default=existing_server_default,
+                **kw,
+            )
+        if name is not None or self._is_mysql_allowed_functional_default(
+            type_ if type_ is not None else existing_type, server_default
+        ):
+            self._exec(
+                MySQLChangeColumn(
+                    table_name,
+                    column_name,
+                    schema=schema,
+                    newname=name if name is not None else column_name,
+                    nullable=(
+                        nullable
+                        if nullable is not None
+                        else (
+                            existing_nullable
+                            if existing_nullable is not None
+                            else True
+                        )
+                    ),
+                    type_=type_ if type_ is not None else existing_type,
+                    default=(
+                        server_default
+                        if server_default is not False
+                        else existing_server_default
+                    ),
+                    autoincrement=(
+                        autoincrement
+                        if autoincrement is not None
+                        else existing_autoincrement
+                    ),
+                    comment=(
+                        comment if comment is not False else existing_comment
+                    ),
+                )
+            )
+        elif (
+            nullable is not None
+            or type_ is not None
+            or autoincrement is not None
+            or comment is not False
+        ):
+            self._exec(
+                MySQLModifyColumn(
+                    table_name,
+                    column_name,
+                    schema=schema,
+                    newname=name if name is not None else column_name,
+                    nullable=(
+                        nullable
+                        if nullable is not None
+                        else (
+                            existing_nullable
+                            if existing_nullable is not None
+                            else True
+                        )
+                    ),
+                    type_=type_ if type_ is not None else existing_type,
+                    default=(
+                        server_default
+                        if server_default is not False
+                        else existing_server_default
+                    ),
+                    autoincrement=(
+                        autoincrement
+                        if autoincrement is not None
+                        else existing_autoincrement
+                    ),
+                    comment=(
+                        comment if comment is not False else existing_comment
+                    ),
+                )
+            )
+        elif server_default is not False:
+            self._exec(
+                MySQLAlterDefault(
+                    table_name, column_name, server_default, schema=schema
+                )
+            )
+
+    def drop_constraint(
+        self,
+        const: Constraint,
+    ) -> None:
+        if isinstance(const, schema.CheckConstraint) and _is_type_bound(const):
+            return
+
+        super().drop_constraint(const)
+
+    def _is_mysql_allowed_functional_default(
+        self,
+        type_: Optional[TypeEngine],
+        server_default: Union[_ServerDefault, Literal[False]],
+    ) -> bool:
+        return (
+            type_ is not None
+            and type_._type_affinity is sqltypes.DateTime
+            and server_default is not None
+        )
+
+    def compare_server_default(
+        self,
+        inspector_column,
+        metadata_column,
+        rendered_metadata_default,
+        rendered_inspector_default,
+    ):
+        # partially a workaround for SQLAlchemy issue #3023; if the
+        # column were created without "NOT NULL", MySQL may have added
+        # an implicit default of '0' which we need to skip
+        # TODO: this is not really covered anymore ?
+        if (
+            metadata_column.type._type_affinity is sqltypes.Integer
+            and inspector_column.primary_key
+            and not inspector_column.autoincrement
+            and not rendered_metadata_default
+            and rendered_inspector_default == "'0'"
+        ):
+            return False
+        elif (
+            rendered_inspector_default
+            and inspector_column.type._type_affinity is sqltypes.Integer
+        ):
+            rendered_inspector_default = (
+                re.sub(r"^'|'$", "", rendered_inspector_default)
+                if rendered_inspector_default is not None
+                else None
+            )
+            return rendered_inspector_default != rendered_metadata_default
+        elif (
+            rendered_metadata_default
+            and metadata_column.type._type_affinity is sqltypes.String
+        ):
+            metadata_default = re.sub(r"^'|'$", "", rendered_metadata_default)
+            return rendered_inspector_default != f"'{metadata_default}'"
+        elif rendered_inspector_default and rendered_metadata_default:
+            # adjust for "function()" vs. "FUNCTION" as can occur particularly
+            # for the CURRENT_TIMESTAMP function on newer MariaDB versions
+
+            # SQLAlchemy MySQL dialect bundles ON UPDATE into the server
+            # default; adjust for this possibly being present.
+            onupdate_ins = re.match(
+                r"(.*) (on update.*?)(?:\(\))?$",
+                rendered_inspector_default.lower(),
+            )
+            onupdate_met = re.match(
+                r"(.*) (on update.*?)(?:\(\))?$",
+                rendered_metadata_default.lower(),
+            )
+
+            if onupdate_ins:
+                if not onupdate_met:
+                    return True
+                elif onupdate_ins.group(2) != onupdate_met.group(2):
+                    return True
+
+                rendered_inspector_default = onupdate_ins.group(1)
+                rendered_metadata_default = onupdate_met.group(1)
+
+            return re.sub(
+                r"(.*?)(?:\(\))?$", r"\1", rendered_inspector_default.lower()
+            ) != re.sub(
+                r"(.*?)(?:\(\))?$", r"\1", rendered_metadata_default.lower()
+            )
+        else:
+            return rendered_inspector_default != rendered_metadata_default
+
+    def correct_for_autogen_constraints(
+        self,
+        conn_unique_constraints,
+        conn_indexes,
+        metadata_unique_constraints,
+        metadata_indexes,
+    ):
+        # TODO: if SQLA 1.0, make use of "duplicates_index"
+        # metadata
+        removed = set()
+        for idx in list(conn_indexes):
+            if idx.unique:
+                continue
+            # MySQL puts implicit indexes on FK columns, even if
+            # composite and even if MyISAM, so can't check this too easily.
+            # the name of the index may be the column name or it may
+            # be the name of the FK constraint.
+            for col in idx.columns:
+                if idx.name == col.name:
+                    conn_indexes.remove(idx)
+                    removed.add(idx.name)
+                    break
+                for fk in col.foreign_keys:
+                    if fk.name == idx.name:
+                        conn_indexes.remove(idx)
+                        removed.add(idx.name)
+                        break
+                if idx.name in removed:
+                    break
+
+        # then remove indexes from the "metadata_indexes"
+        # that we've removed from reflected, otherwise they come out
+        # as adds (see #202)
+        for idx in list(metadata_indexes):
+            if idx.name in removed:
+                metadata_indexes.remove(idx)
+
+    def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks):
+        conn_fk_by_sig = {
+            self._create_reflected_constraint_sig(fk).unnamed_no_options: fk
+            for fk in conn_fks
+        }
+        metadata_fk_by_sig = {
+            self._create_metadata_constraint_sig(fk).unnamed_no_options: fk
+            for fk in metadata_fks
+        }
+
+        for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig):
+            mdfk = metadata_fk_by_sig[sig]
+            cnfk = conn_fk_by_sig[sig]
+            # MySQL considers RESTRICT to be the default and doesn't
+            # report on it.  if the model has explicit RESTRICT and
+            # the conn FK has None, set it to RESTRICT
+            if (
+                mdfk.ondelete is not None
+                and mdfk.ondelete.lower() == "restrict"
+                and cnfk.ondelete is None
+            ):
+                cnfk.ondelete = "RESTRICT"
+            if (
+                mdfk.onupdate is not None
+                and mdfk.onupdate.lower() == "restrict"
+                and cnfk.onupdate is None
+            ):
+                cnfk.onupdate = "RESTRICT"
+
+
+class MariaDBImpl(MySQLImpl):
+    __dialect__ = "mariadb"
+
+
+class MySQLAlterDefault(AlterColumn):
+    def __init__(
+        self,
+        name: str,
+        column_name: str,
+        default: _ServerDefault,
+        schema: Optional[str] = None,
+    ) -> None:
+        super(AlterColumn, self).__init__(name, schema=schema)
+        self.column_name = column_name
+        self.default = default
+
+
+class MySQLChangeColumn(AlterColumn):
+    def __init__(
+        self,
+        name: str,
+        column_name: str,
+        schema: Optional[str] = None,
+        newname: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        nullable: Optional[bool] = None,
+        default: Optional[Union[_ServerDefault, Literal[False]]] = False,
+        autoincrement: Optional[bool] = None,
+        comment: Optional[Union[str, Literal[False]]] = False,
+    ) -> None:
+        super(AlterColumn, self).__init__(name, schema=schema)
+        self.column_name = column_name
+        self.nullable = nullable
+        self.newname = newname
+        self.default = default
+        self.autoincrement = autoincrement
+        self.comment = comment
+        if type_ is None:
+            raise util.CommandError(
+                "All MySQL CHANGE/MODIFY COLUMN operations "
+                "require the existing type."
+            )
+
+        self.type_ = sqltypes.to_instance(type_)
+
+
+class MySQLModifyColumn(MySQLChangeColumn):
+    pass
+
+
+@compiles(ColumnNullable, "mysql", "mariadb")
+@compiles(ColumnName, "mysql", "mariadb")
+@compiles(ColumnDefault, "mysql", "mariadb")
+@compiles(ColumnType, "mysql", "mariadb")
+def _mysql_doesnt_support_individual(element, compiler, **kw):
+    raise NotImplementedError(
+        "Individual alter column constructs not supported by MySQL"
+    )
+
+
+@compiles(MySQLAlterDefault, "mysql", "mariadb")
+def _mysql_alter_default(
+    element: MySQLAlterDefault, compiler: MySQLDDLCompiler, **kw
+) -> str:
+    return "%s ALTER COLUMN %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        (
+            "SET DEFAULT %s" % format_server_default(compiler, element.default)
+            if element.default is not None
+            else "DROP DEFAULT"
+        ),
+    )
+
+
+@compiles(MySQLModifyColumn, "mysql", "mariadb")
+def _mysql_modify_column(
+    element: MySQLModifyColumn, compiler: MySQLDDLCompiler, **kw
+) -> str:
+    return "%s MODIFY %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        _mysql_colspec(
+            compiler,
+            nullable=element.nullable,
+            server_default=element.default,
+            type_=element.type_,
+            autoincrement=element.autoincrement,
+            comment=element.comment,
+        ),
+    )
+
+
+@compiles(MySQLChangeColumn, "mysql", "mariadb")
+def _mysql_change_column(
+    element: MySQLChangeColumn, compiler: MySQLDDLCompiler, **kw
+) -> str:
+    return "%s CHANGE %s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        format_column_name(compiler, element.newname),
+        _mysql_colspec(
+            compiler,
+            nullable=element.nullable,
+            server_default=element.default,
+            type_=element.type_,
+            autoincrement=element.autoincrement,
+            comment=element.comment,
+        ),
+    )
+
+
+def _mysql_colspec(
+    compiler: MySQLDDLCompiler,
+    nullable: Optional[bool],
+    server_default: Optional[Union[_ServerDefault, Literal[False]]],
+    type_: TypeEngine,
+    autoincrement: Optional[bool],
+    comment: Optional[Union[str, Literal[False]]],
+) -> str:
+    spec = "%s %s" % (
+        compiler.dialect.type_compiler.process(type_),
+        "NULL" if nullable else "NOT NULL",
+    )
+    if autoincrement:
+        spec += " AUTO_INCREMENT"
+    if server_default is not False and server_default is not None:
+        spec += " DEFAULT %s" % format_server_default(compiler, server_default)
+    if comment:
+        spec += " COMMENT %s" % compiler.sql_compiler.render_literal_value(
+            comment, sqltypes.String()
+        )
+
+    return spec
+
+
+@compiles(schema.DropConstraint, "mysql", "mariadb")
+def _mysql_drop_constraint(
+    element: DropConstraint, compiler: MySQLDDLCompiler, **kw
+) -> str:
+    """Redefine SQLAlchemy's drop constraint to
+    raise errors for invalid constraint type."""
+
+    constraint = element.element
+    if isinstance(
+        constraint,
+        (
+            schema.ForeignKeyConstraint,
+            schema.PrimaryKeyConstraint,
+            schema.UniqueConstraint,
+        ),
+    ):
+        assert not kw
+        return compiler.visit_drop_constraint(element)
+    elif isinstance(constraint, schema.CheckConstraint):
+        # note that SQLAlchemy as of 1.2 does not yet support
+        # DROP CONSTRAINT for MySQL/MariaDB, so we implement fully
+        # here.
+        if compiler.dialect.is_mariadb:  # type: ignore[attr-defined]
+            return "ALTER TABLE %s DROP CONSTRAINT %s" % (
+                compiler.preparer.format_table(constraint.table),
+                compiler.preparer.format_constraint(constraint),
+            )
+        else:
+            return "ALTER TABLE %s DROP CHECK %s" % (
+                compiler.preparer.format_table(constraint.table),
+                compiler.preparer.format_constraint(constraint),
+            )
+    else:
+        raise NotImplementedError(
+            "No generic 'DROP CONSTRAINT' in MySQL - "
+            "please specify constraint type"
+        )
diff --git a/.venv/lib/python3.12/site-packages/alembic/ddl/oracle.py b/.venv/lib/python3.12/site-packages/alembic/ddl/oracle.py
new file mode 100644
index 00000000..eac99124
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/ddl/oracle.py
@@ -0,0 +1,202 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import re
+from typing import Any
+from typing import Optional
+from typing import TYPE_CHECKING
+
+from sqlalchemy.sql import sqltypes
+
+from .base import AddColumn
+from .base import alter_table
+from .base import ColumnComment
+from .base import ColumnDefault
+from .base import ColumnName
+from .base import ColumnNullable
+from .base import ColumnType
+from .base import format_column_name
+from .base import format_server_default
+from .base import format_table_name
+from .base import format_type
+from .base import IdentityColumnDefault
+from .base import RenameTable
+from .impl import DefaultImpl
+from ..util.sqla_compat import compiles
+
+if TYPE_CHECKING:
+    from sqlalchemy.dialects.oracle.base import OracleDDLCompiler
+    from sqlalchemy.engine.cursor import CursorResult
+    from sqlalchemy.sql.schema import Column
+
+
+class OracleImpl(DefaultImpl):
+    __dialect__ = "oracle"
+    transactional_ddl = False
+    batch_separator = "/"
+    command_terminator = ""
+    type_synonyms = DefaultImpl.type_synonyms + (
+        {"VARCHAR", "VARCHAR2"},
+        {"BIGINT", "INTEGER", "SMALLINT", "DECIMAL", "NUMERIC", "NUMBER"},
+        {"DOUBLE", "FLOAT", "DOUBLE_PRECISION"},
+    )
+    identity_attrs_ignore = ()
+
+    def __init__(self, *arg, **kw) -> None:
+        super().__init__(*arg, **kw)
+        self.batch_separator = self.context_opts.get(
+            "oracle_batch_separator", self.batch_separator
+        )
+
+    def _exec(self, construct: Any, *args, **kw) -> Optional[CursorResult]:
+        result = super()._exec(construct, *args, **kw)
+        if self.as_sql and self.batch_separator:
+            self.static_output(self.batch_separator)
+        return result
+
+    def compare_server_default(
+        self,
+        inspector_column,
+        metadata_column,
+        rendered_metadata_default,
+        rendered_inspector_default,
+    ):
+        if rendered_metadata_default is not None:
+            rendered_metadata_default = re.sub(
+                r"^\((.+)\)$", r"\1", rendered_metadata_default
+            )
+
+            rendered_metadata_default = re.sub(
+                r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default
+            )
+
+        if rendered_inspector_default is not None:
+            rendered_inspector_default = re.sub(
+                r"^\((.+)\)$", r"\1", rendered_inspector_default
+            )
+
+            rendered_inspector_default = re.sub(
+                r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default
+            )
+
+            rendered_inspector_default = rendered_inspector_default.strip()
+        return rendered_inspector_default != rendered_metadata_default
+
+    def emit_begin(self) -> None:
+        self._exec("SET TRANSACTION READ WRITE")
+
+    def emit_commit(self) -> None:
+        self._exec("COMMIT")
+
+
+@compiles(AddColumn, "oracle")
+def visit_add_column(
+    element: AddColumn, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        add_column(compiler, element.column, **kw),
+    )
+
+
+@compiles(ColumnNullable, "oracle")
+def visit_column_nullable(
+    element: ColumnNullable, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "NULL" if element.nullable else "NOT NULL",
+    )
+
+
+@compiles(ColumnType, "oracle")
+def visit_column_type(
+    element: ColumnType, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "%s" % format_type(compiler, element.type_),
+    )
+
+
+@compiles(ColumnName, "oracle")
+def visit_column_name(
+    element: ColumnName, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s RENAME COLUMN %s TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        format_column_name(compiler, element.newname),
+    )
+
+
+@compiles(ColumnDefault, "oracle")
+def visit_column_default(
+    element: ColumnDefault, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        (
+            "DEFAULT %s" % format_server_default(compiler, element.default)
+            if element.default is not None
+            else "DEFAULT NULL"
+        ),
+    )
+
+
+@compiles(ColumnComment, "oracle")
+def visit_column_comment(
+    element: ColumnComment, compiler: OracleDDLCompiler, **kw
+) -> str:
+    ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}"
+
+    comment = compiler.sql_compiler.render_literal_value(
+        (element.comment if element.comment is not None else ""),
+        sqltypes.String(),
+    )
+
+    return ddl.format(
+        table_name=element.table_name,
+        column_name=element.column_name,
+        comment=comment,
+    )
+
+
+@compiles(RenameTable, "oracle")
+def visit_rename_table(
+    element: RenameTable, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s RENAME TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_table_name(compiler, element.new_table_name, None),
+    )
+
+
+def alter_column(compiler: OracleDDLCompiler, name: str) -> str:
+    return "MODIFY %s" % format_column_name(compiler, name)
+
+
+def add_column(compiler: OracleDDLCompiler, column: Column[Any], **kw) -> str:
+    return "ADD %s" % compiler.get_column_specification(column, **kw)
+
+
+@compiles(IdentityColumnDefault, "oracle")
+def visit_identity_column(
+    element: IdentityColumnDefault, compiler: OracleDDLCompiler, **kw
+):
+    text = "%s %s " % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+    )
+    if element.default is None:
+        # drop identity
+        text += "DROP IDENTITY"
+        return text
+    else:
+        text += compiler.visit_identity_column(element.default)
+        return text
diff --git a/.venv/lib/python3.12/site-packages/alembic/ddl/postgresql.py b/.venv/lib/python3.12/site-packages/alembic/ddl/postgresql.py
new file mode 100644
index 00000000..7cd8d35b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/ddl/postgresql.py
@@ -0,0 +1,850 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import logging
+import re
+from typing import Any
+from typing import cast
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import Sequence
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import Column
+from sqlalchemy import Float
+from sqlalchemy import Identity
+from sqlalchemy import literal_column
+from sqlalchemy import Numeric
+from sqlalchemy import select
+from sqlalchemy import text
+from sqlalchemy import types as sqltypes
+from sqlalchemy.dialects.postgresql import BIGINT
+from sqlalchemy.dialects.postgresql import ExcludeConstraint
+from sqlalchemy.dialects.postgresql import INTEGER
+from sqlalchemy.schema import CreateIndex
+from sqlalchemy.sql.elements import ColumnClause
+from sqlalchemy.sql.elements import TextClause
+from sqlalchemy.sql.functions import FunctionElement
+from sqlalchemy.types import NULLTYPE
+
+from .base import alter_column
+from .base import alter_table
+from .base import AlterColumn
+from .base import ColumnComment
+from .base import format_column_name
+from .base import format_table_name
+from .base import format_type
+from .base import IdentityColumnDefault
+from .base import RenameTable
+from .impl import ComparisonResult
+from .impl import DefaultImpl
+from .. import util
+from ..autogenerate import render
+from ..operations import ops
+from ..operations import schemaobj
+from ..operations.base import BatchOperations
+from ..operations.base import Operations
+from ..util import sqla_compat
+from ..util.sqla_compat import compiles
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy import Index
+    from sqlalchemy import UniqueConstraint
+    from sqlalchemy.dialects.postgresql.array import ARRAY
+    from sqlalchemy.dialects.postgresql.base import PGDDLCompiler
+    from sqlalchemy.dialects.postgresql.hstore import HSTORE
+    from sqlalchemy.dialects.postgresql.json import JSON
+    from sqlalchemy.dialects.postgresql.json import JSONB
+    from sqlalchemy.sql.elements import ClauseElement
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import quoted_name
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .base import _ServerDefault
+    from ..autogenerate.api import AutogenContext
+    from ..autogenerate.render import _f_name
+    from ..runtime.migration import MigrationContext
+
+
+log = logging.getLogger(__name__)
+
+
+class PostgresqlImpl(DefaultImpl):
+    __dialect__ = "postgresql"
+    transactional_ddl = True
+    type_synonyms = DefaultImpl.type_synonyms + (
+        {"FLOAT", "DOUBLE PRECISION"},
+    )
+
+    def create_index(self, index: Index, **kw: Any) -> None:
+        # this likely defaults to None if not present, so get()
+        # should normally not return the default value.  being
+        # defensive in any case
+        postgresql_include = index.kwargs.get("postgresql_include", None) or ()
+        for col in postgresql_include:
+            if col not in index.table.c:  # type: ignore[union-attr]
+                index.table.append_column(  # type: ignore[union-attr]
+                    Column(col, sqltypes.NullType)
+                )
+        self._exec(CreateIndex(index, **kw))
+
+    def prep_table_for_batch(self, batch_impl, table):
+        for constraint in table.constraints:
+            if (
+                constraint.name is not None
+                and constraint.name in batch_impl.named_constraints
+            ):
+                self.drop_constraint(constraint)
+
+    def compare_server_default(
+        self,
+        inspector_column,
+        metadata_column,
+        rendered_metadata_default,
+        rendered_inspector_default,
+    ):
+        # don't do defaults for SERIAL columns
+        if (
+            metadata_column.primary_key
+            and metadata_column is metadata_column.table._autoincrement_column
+        ):
+            return False
+
+        conn_col_default = rendered_inspector_default
+
+        defaults_equal = conn_col_default == rendered_metadata_default
+        if defaults_equal:
+            return False
+
+        if None in (
+            conn_col_default,
+            rendered_metadata_default,
+            metadata_column.server_default,
+        ):
+            return not defaults_equal
+
+        metadata_default = metadata_column.server_default.arg
+
+        if isinstance(metadata_default, str):
+            if not isinstance(inspector_column.type, (Numeric, Float)):
+                metadata_default = re.sub(r"^'|'$", "", metadata_default)
+                metadata_default = f"'{metadata_default}'"
+
+            metadata_default = literal_column(metadata_default)
+
+        # run a real compare against the server
+        conn = self.connection
+        assert conn is not None
+        return not conn.scalar(
+            select(literal_column(conn_col_default) == metadata_default)
+        )
+
+    def alter_column(  # type:ignore[override]
+        self,
+        table_name: str,
+        column_name: str,
+        nullable: Optional[bool] = None,
+        server_default: Union[_ServerDefault, Literal[False]] = False,
+        name: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        schema: Optional[str] = None,
+        autoincrement: Optional[bool] = None,
+        existing_type: Optional[TypeEngine] = None,
+        existing_server_default: Optional[_ServerDefault] = None,
+        existing_nullable: Optional[bool] = None,
+        existing_autoincrement: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        using = kw.pop("postgresql_using", None)
+
+        if using is not None and type_ is None:
+            raise util.CommandError(
+                "postgresql_using must be used with the type_ parameter"
+            )
+
+        if type_ is not None:
+            self._exec(
+                PostgresqlColumnType(
+                    table_name,
+                    column_name,
+                    type_,
+                    schema=schema,
+                    using=using,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                )
+            )
+
+        super().alter_column(
+            table_name,
+            column_name,
+            nullable=nullable,
+            server_default=server_default,
+            name=name,
+            schema=schema,
+            autoincrement=autoincrement,
+            existing_type=existing_type,
+            existing_server_default=existing_server_default,
+            existing_nullable=existing_nullable,
+            existing_autoincrement=existing_autoincrement,
+            **kw,
+        )
+
+    def autogen_column_reflect(self, inspector, table, column_info):
+        if column_info.get("default") and isinstance(
+            column_info["type"], (INTEGER, BIGINT)
+        ):
+            seq_match = re.match(
+                r"nextval\('(.+?)'::regclass\)", column_info["default"]
+            )
+            if seq_match:
+                info = sqla_compat._exec_on_inspector(
+                    inspector,
+                    text(
+                        "select c.relname, a.attname "
+                        "from pg_class as c join "
+                        "pg_depend d on d.objid=c.oid and "
+                        "d.classid='pg_class'::regclass and "
+                        "d.refclassid='pg_class'::regclass "
+                        "join pg_class t on t.oid=d.refobjid "
+                        "join pg_attribute a on a.attrelid=t.oid and "
+                        "a.attnum=d.refobjsubid "
+                        "where c.relkind='S' and "
+                        "c.oid=cast(:seqname as regclass)"
+                    ),
+                    seqname=seq_match.group(1),
+                ).first()
+                if info:
+                    seqname, colname = info
+                    if colname == column_info["name"]:
+                        log.info(
+                            "Detected sequence named '%s' as "
+                            "owned by integer column '%s(%s)', "
+                            "assuming SERIAL and omitting",
+                            seqname,
+                            table.name,
+                            colname,
+                        )
+                        # sequence, and the owner is this column,
+                        # its a SERIAL - whack it!
+                        del column_info["default"]
+
+    def correct_for_autogen_constraints(
+        self,
+        conn_unique_constraints,
+        conn_indexes,
+        metadata_unique_constraints,
+        metadata_indexes,
+    ):
+        doubled_constraints = {
+            index
+            for index in conn_indexes
+            if index.info.get("duplicates_constraint")
+        }
+
+        for ix in doubled_constraints:
+            conn_indexes.remove(ix)
+
+        if not sqla_compat.sqla_2:
+            self._skip_functional_indexes(metadata_indexes, conn_indexes)
+
+    # pg behavior regarding modifiers
+    # | # | compiled sql     | returned sql     | regexp. group is removed |
+    # | - | ---------------- | -----------------| ------------------------ |
+    # | 1 | nulls first      | nulls first      | -                        |
+    # | 2 | nulls last       |                  | (?<! desc)( nulls last)$ |
+    # | 3 | asc              |                  | ( asc)$                  |
+    # | 4 | asc nulls first  | nulls first      | ( asc) nulls first$      |
+    # | 5 | asc nulls last   |                  | ( asc nulls last)$       |
+    # | 6 | desc             | desc             | -                        |
+    # | 7 | desc nulls first | desc             | desc( nulls first)$      |
+    # | 8 | desc nulls last  | desc nulls last  | -                        |
+    _default_modifiers_re = (  # order of case 2 and 5 matters
+        re.compile("( asc nulls last)$"),  # case 5
+        re.compile("(?<! desc)( nulls last)$"),  # case 2
+        re.compile("( asc)$"),  # case 3
+        re.compile("( asc) nulls first$"),  # case 4
+        re.compile(" desc( nulls first)$"),  # case 7
+    )
+
+    def _cleanup_index_expr(self, index: Index, expr: str) -> str:
+        expr = expr.lower().replace('"', "").replace("'", "")
+        if index.table is not None:
+            # should not be needed, since include_table=False is in compile
+            expr = expr.replace(f"{index.table.name.lower()}.", "")
+
+        if "::" in expr:
+            # strip :: cast. types can have spaces in them
+            expr = re.sub(r"(::[\w ]+\w)", "", expr)
+
+        while expr and expr[0] == "(" and expr[-1] == ")":
+            expr = expr[1:-1]
+
+        # NOTE: when parsing the connection expression this cleanup could
+        # be skipped
+        for rs in self._default_modifiers_re:
+            if match := rs.search(expr):
+                start, end = match.span(1)
+                expr = expr[:start] + expr[end:]
+                break
+
+        while expr and expr[0] == "(" and expr[-1] == ")":
+            expr = expr[1:-1]
+
+        # strip casts
+        cast_re = re.compile(r"cast\s*\(")
+        if cast_re.match(expr):
+            expr = cast_re.sub("", expr)
+            # remove the as type
+            expr = re.sub(r"as\s+[^)]+\)", "", expr)
+        # remove spaces
+        expr = expr.replace(" ", "")
+        return expr
+
+    def _dialect_options(
+        self, item: Union[Index, UniqueConstraint]
+    ) -> Tuple[Any, ...]:
+        # only the positive case is returned by sqlalchemy reflection so
+        # None and False are threated the same
+        if item.dialect_kwargs.get("postgresql_nulls_not_distinct"):
+            return ("nulls_not_distinct",)
+        return ()
+
+    def compare_indexes(
+        self,
+        metadata_index: Index,
+        reflected_index: Index,
+    ) -> ComparisonResult:
+        msg = []
+        unique_msg = self._compare_index_unique(
+            metadata_index, reflected_index
+        )
+        if unique_msg:
+            msg.append(unique_msg)
+        m_exprs = metadata_index.expressions
+        r_exprs = reflected_index.expressions
+        if len(m_exprs) != len(r_exprs):
+            msg.append(f"expression number {len(r_exprs)} to {len(m_exprs)}")
+        if msg:
+            # no point going further, return early
+            return ComparisonResult.Different(msg)
+        skip = []
+        for pos, (m_e, r_e) in enumerate(zip(m_exprs, r_exprs), 1):
+            m_compile = self._compile_element(m_e)
+            m_text = self._cleanup_index_expr(metadata_index, m_compile)
+            # print(f"META ORIG: {m_compile!r} CLEANUP: {m_text!r}")
+            r_compile = self._compile_element(r_e)
+            r_text = self._cleanup_index_expr(metadata_index, r_compile)
+            # print(f"CONN ORIG: {r_compile!r} CLEANUP: {r_text!r}")
+            if m_text == r_text:
+                continue  # expressions these are equal
+            elif m_compile.strip().endswith("_ops") and (
+                " " in m_compile or ")" in m_compile  # is an expression
+            ):
+                skip.append(
+                    f"expression #{pos} {m_compile!r} detected "
+                    "as including operator clause."
+                )
+                util.warn(
+                    f"Expression #{pos} {m_compile!r} in index "
+                    f"{reflected_index.name!r} detected to include "
+                    "an operator clause. Expression compare cannot proceed. "
+                    "Please move the operator clause to the "
+                    "``postgresql_ops`` dict to enable proper compare "
+                    "of the index expressions: "
+                    "https://docs.sqlalchemy.org/en/latest/dialects/postgresql.html#operator-classes",  # noqa: E501
+                )
+            else:
+                msg.append(f"expression #{pos} {r_compile!r} to {m_compile!r}")
+
+        m_options = self._dialect_options(metadata_index)
+        r_options = self._dialect_options(reflected_index)
+        if m_options != r_options:
+            msg.extend(f"options {r_options} to {m_options}")
+
+        if msg:
+            return ComparisonResult.Different(msg)
+        elif skip:
+            # if there are other changes detected don't skip the index
+            return ComparisonResult.Skip(skip)
+        else:
+            return ComparisonResult.Equal()
+
+    def compare_unique_constraint(
+        self,
+        metadata_constraint: UniqueConstraint,
+        reflected_constraint: UniqueConstraint,
+    ) -> ComparisonResult:
+        metadata_tup = self._create_metadata_constraint_sig(
+            metadata_constraint
+        )
+        reflected_tup = self._create_reflected_constraint_sig(
+            reflected_constraint
+        )
+
+        meta_sig = metadata_tup.unnamed
+        conn_sig = reflected_tup.unnamed
+        if conn_sig != meta_sig:
+            return ComparisonResult.Different(
+                f"expression {conn_sig} to {meta_sig}"
+            )
+
+        metadata_do = self._dialect_options(metadata_tup.const)
+        conn_do = self._dialect_options(reflected_tup.const)
+        if metadata_do != conn_do:
+            return ComparisonResult.Different(
+                f"expression {conn_do} to {metadata_do}"
+            )
+
+        return ComparisonResult.Equal()
+
+    def adjust_reflected_dialect_options(
+        self, reflected_options: Dict[str, Any], kind: str
+    ) -> Dict[str, Any]:
+        options: Dict[str, Any]
+        options = reflected_options.get("dialect_options", {}).copy()
+        if not options.get("postgresql_include"):
+            options.pop("postgresql_include", None)
+        return options
+
+    def _compile_element(self, element: Union[ClauseElement, str]) -> str:
+        if isinstance(element, str):
+            return element
+        return element.compile(
+            dialect=self.dialect,
+            compile_kwargs={"literal_binds": True, "include_table": False},
+        ).string
+
+    def render_ddl_sql_expr(
+        self,
+        expr: ClauseElement,
+        is_server_default: bool = False,
+        is_index: bool = False,
+        **kw: Any,
+    ) -> str:
+        """Render a SQL expression that is typically a server default,
+        index expression, etc.
+
+        """
+
+        # apply self_group to index expressions;
+        # see https://github.com/sqlalchemy/sqlalchemy/blob/
+        # 82fa95cfce070fab401d020c6e6e4a6a96cc2578/
+        # lib/sqlalchemy/dialects/postgresql/base.py#L2261
+        if is_index and not isinstance(expr, ColumnClause):
+            expr = expr.self_group()
+
+        return super().render_ddl_sql_expr(
+            expr, is_server_default=is_server_default, is_index=is_index, **kw
+        )
+
+    def render_type(
+        self, type_: TypeEngine, autogen_context: AutogenContext
+    ) -> Union[str, Literal[False]]:
+        mod = type(type_).__module__
+        if not mod.startswith("sqlalchemy.dialects.postgresql"):
+            return False
+
+        if hasattr(self, "_render_%s_type" % type_.__visit_name__):
+            meth = getattr(self, "_render_%s_type" % type_.__visit_name__)
+            return meth(type_, autogen_context)
+
+        return False
+
+    def _render_HSTORE_type(
+        self, type_: HSTORE, autogen_context: AutogenContext
+    ) -> str:
+        return cast(
+            str,
+            render._render_type_w_subtype(
+                type_, autogen_context, "text_type", r"(.+?\(.*text_type=)"
+            ),
+        )
+
+    def _render_ARRAY_type(
+        self, type_: ARRAY, autogen_context: AutogenContext
+    ) -> str:
+        return cast(
+            str,
+            render._render_type_w_subtype(
+                type_, autogen_context, "item_type", r"(.+?\()"
+            ),
+        )
+
+    def _render_JSON_type(
+        self, type_: JSON, autogen_context: AutogenContext
+    ) -> str:
+        return cast(
+            str,
+            render._render_type_w_subtype(
+                type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)"
+            ),
+        )
+
+    def _render_JSONB_type(
+        self, type_: JSONB, autogen_context: AutogenContext
+    ) -> str:
+        return cast(
+            str,
+            render._render_type_w_subtype(
+                type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)"
+            ),
+        )
+
+
+class PostgresqlColumnType(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, type_: TypeEngine, **kw
+    ) -> None:
+        using = kw.pop("using", None)
+        super().__init__(name, column_name, **kw)
+        self.type_ = sqltypes.to_instance(type_)
+        self.using = using
+
+
+@compiles(RenameTable, "postgresql")
+def visit_rename_table(
+    element: RenameTable, compiler: PGDDLCompiler, **kw
+) -> str:
+    return "%s RENAME TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_table_name(compiler, element.new_table_name, None),
+    )
+
+
+@compiles(PostgresqlColumnType, "postgresql")
+def visit_column_type(
+    element: PostgresqlColumnType, compiler: PGDDLCompiler, **kw
+) -> str:
+    return "%s %s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "TYPE %s" % format_type(compiler, element.type_),
+        "USING %s" % element.using if element.using else "",
+    )
+
+
+@compiles(ColumnComment, "postgresql")
+def visit_column_comment(
+    element: ColumnComment, compiler: PGDDLCompiler, **kw
+) -> str:
+    ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}"
+    comment = (
+        compiler.sql_compiler.render_literal_value(
+            element.comment, sqltypes.String()
+        )
+        if element.comment is not None
+        else "NULL"
+    )
+
+    return ddl.format(
+        table_name=format_table_name(
+            compiler, element.table_name, element.schema
+        ),
+        column_name=format_column_name(compiler, element.column_name),
+        comment=comment,
+    )
+
+
+@compiles(IdentityColumnDefault, "postgresql")
+def visit_identity_column(
+    element: IdentityColumnDefault, compiler: PGDDLCompiler, **kw
+):
+    text = "%s %s " % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+    )
+    if element.default is None:
+        # drop identity
+        text += "DROP IDENTITY"
+        return text
+    elif element.existing_server_default is None:
+        # add identity options
+        text += "ADD "
+        text += compiler.visit_identity_column(element.default)
+        return text
+    else:
+        # alter identity
+        diff, _, _ = element.impl._compare_identity_default(
+            element.default, element.existing_server_default
+        )
+        identity = element.default
+        for attr in sorted(diff):
+            if attr == "always":
+                text += "SET GENERATED %s " % (
+                    "ALWAYS" if identity.always else "BY DEFAULT"
+                )
+            else:
+                text += "SET %s " % compiler.get_identity_options(
+                    Identity(**{attr: getattr(identity, attr)})
+                )
+        return text
+
+
+@Operations.register_operation("create_exclude_constraint")
+@BatchOperations.register_operation(
+    "create_exclude_constraint", "batch_create_exclude_constraint"
+)
+@ops.AddConstraintOp.register_add_constraint("exclude_constraint")
+class CreateExcludeConstraintOp(ops.AddConstraintOp):
+    """Represent a create exclude constraint operation."""
+
+    constraint_type = "exclude"
+
+    def __init__(
+        self,
+        constraint_name: sqla_compat._ConstraintName,
+        table_name: Union[str, quoted_name],
+        elements: Union[
+            Sequence[Tuple[str, str]],
+            Sequence[Tuple[ColumnClause[Any], str]],
+        ],
+        where: Optional[Union[ColumnElement[bool], str]] = None,
+        schema: Optional[str] = None,
+        _orig_constraint: Optional[ExcludeConstraint] = None,
+        **kw,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.table_name = table_name
+        self.elements = elements
+        self.where = where
+        self.schema = schema
+        self._orig_constraint = _orig_constraint
+        self.kw = kw
+
+    @classmethod
+    def from_constraint(  # type:ignore[override]
+        cls, constraint: ExcludeConstraint
+    ) -> CreateExcludeConstraintOp:
+        constraint_table = sqla_compat._table_for_constraint(constraint)
+        return cls(
+            constraint.name,
+            constraint_table.name,
+            [  # type: ignore
+                (expr, op) for expr, name, op in constraint._render_exprs
+            ],
+            where=cast("ColumnElement[bool] | None", constraint.where),
+            schema=constraint_table.schema,
+            _orig_constraint=constraint,
+            deferrable=constraint.deferrable,
+            initially=constraint.initially,
+            using=constraint.using,
+        )
+
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> ExcludeConstraint:
+        if self._orig_constraint is not None:
+            return self._orig_constraint
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        t = schema_obj.table(self.table_name, schema=self.schema)
+        excl = ExcludeConstraint(
+            *self.elements,
+            name=self.constraint_name,
+            where=self.where,
+            **self.kw,
+        )
+        for (
+            expr,
+            name,
+            oper,
+        ) in excl._render_exprs:
+            t.append_column(Column(name, NULLTYPE))
+        t.append_constraint(excl)
+        return excl
+
+    @classmethod
+    def create_exclude_constraint(
+        cls,
+        operations: Operations,
+        constraint_name: str,
+        table_name: str,
+        *elements: Any,
+        **kw: Any,
+    ) -> Optional[Table]:
+        """Issue an alter to create an EXCLUDE constraint using the
+        current migration context.
+
+        .. note::  This method is Postgresql specific, and additionally
+           requires at least SQLAlchemy 1.0.
+
+        e.g.::
+
+            from alembic import op
+
+            op.create_exclude_constraint(
+                "user_excl",
+                "user",
+                ("period", "&&"),
+                ("group", "="),
+                where=("group != 'some group'"),
+            )
+
+        Note that the expressions work the same way as that of
+        the ``ExcludeConstraint`` object itself; if plain strings are
+        passed, quoting rules must be applied manually.
+
+        :param name: Name of the constraint.
+        :param table_name: String name of the source table.
+        :param elements: exclude conditions.
+        :param where: SQL expression or SQL string with optional WHERE
+         clause.
+        :param deferrable: optional bool. If set, emit DEFERRABLE or
+         NOT DEFERRABLE when issuing DDL for this constraint.
+        :param initially: optional string. If set, emit INITIALLY <value>
+         when issuing DDL for this constraint.
+        :param schema: Optional schema name to operate within.
+
+        """
+        op = cls(constraint_name, table_name, elements, **kw)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_exclude_constraint(
+        cls,
+        operations: BatchOperations,
+        constraint_name: str,
+        *elements: Any,
+        **kw: Any,
+    ) -> Optional[Table]:
+        """Issue a "create exclude constraint" instruction using the
+        current batch migration context.
+
+        .. note::  This method is Postgresql specific, and additionally
+           requires at least SQLAlchemy 1.0.
+
+        .. seealso::
+
+            :meth:`.Operations.create_exclude_constraint`
+
+        """
+        kw["schema"] = operations.impl.schema
+        op = cls(constraint_name, operations.impl.table_name, elements, **kw)
+        return operations.invoke(op)
+
+
+@render.renderers.dispatch_for(CreateExcludeConstraintOp)
+def _add_exclude_constraint(
+    autogen_context: AutogenContext, op: CreateExcludeConstraintOp
+) -> str:
+    return _exclude_constraint(op.to_constraint(), autogen_context, alter=True)
+
+
+@render._constraint_renderers.dispatch_for(ExcludeConstraint)
+def _render_inline_exclude_constraint(
+    constraint: ExcludeConstraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: MetaData,
+) -> str:
+    rendered = render._user_defined_render(
+        "exclude", constraint, autogen_context
+    )
+    if rendered is not False:
+        return rendered
+
+    return _exclude_constraint(constraint, autogen_context, False)
+
+
+def _postgresql_autogenerate_prefix(autogen_context: AutogenContext) -> str:
+    imports = autogen_context.imports
+    if imports is not None:
+        imports.add("from sqlalchemy.dialects import postgresql")
+    return "postgresql."
+
+
+def _exclude_constraint(
+    constraint: ExcludeConstraint,
+    autogen_context: AutogenContext,
+    alter: bool,
+) -> str:
+    opts: List[Tuple[str, Union[quoted_name, str, _f_name, None]]] = []
+
+    has_batch = autogen_context._has_batch
+
+    if constraint.deferrable:
+        opts.append(("deferrable", str(constraint.deferrable)))
+    if constraint.initially:
+        opts.append(("initially", str(constraint.initially)))
+    if constraint.using:
+        opts.append(("using", str(constraint.using)))
+    if not has_batch and alter and constraint.table.schema:
+        opts.append(("schema", render._ident(constraint.table.schema)))
+    if not alter and constraint.name:
+        opts.append(
+            ("name", render._render_gen_name(autogen_context, constraint.name))
+        )
+
+    def do_expr_where_opts():
+        args = [
+            "(%s, %r)"
+            % (
+                _render_potential_column(
+                    sqltext,  # type:ignore[arg-type]
+                    autogen_context,
+                ),
+                opstring,
+            )
+            for sqltext, name, opstring in constraint._render_exprs
+        ]
+        if constraint.where is not None:
+            args.append(
+                "where=%s"
+                % render._render_potential_expr(
+                    constraint.where, autogen_context
+                )
+            )
+        args.extend(["%s=%r" % (k, v) for k, v in opts])
+        return args
+
+    if alter:
+        args = [
+            repr(render._render_gen_name(autogen_context, constraint.name))
+        ]
+        if not has_batch:
+            args += [repr(render._ident(constraint.table.name))]
+        args.extend(do_expr_where_opts())
+        return "%(prefix)screate_exclude_constraint(%(args)s)" % {
+            "prefix": render._alembic_autogenerate_prefix(autogen_context),
+            "args": ", ".join(args),
+        }
+    else:
+        args = do_expr_where_opts()
+        return "%(prefix)sExcludeConstraint(%(args)s)" % {
+            "prefix": _postgresql_autogenerate_prefix(autogen_context),
+            "args": ", ".join(args),
+        }
+
+
+def _render_potential_column(
+    value: Union[
+        ColumnClause[Any], Column[Any], TextClause, FunctionElement[Any]
+    ],
+    autogen_context: AutogenContext,
+) -> str:
+    if isinstance(value, ColumnClause):
+        if value.is_literal:
+            # like literal_column("int8range(from, to)") in ExcludeConstraint
+            template = "%(prefix)sliteral_column(%(name)r)"
+        else:
+            template = "%(prefix)scolumn(%(name)r)"
+
+        return template % {
+            "prefix": render._sqlalchemy_autogenerate_prefix(autogen_context),
+            "name": value.name,
+        }
+    else:
+        return render._render_potential_expr(
+            value,
+            autogen_context,
+            wrap_in_element=isinstance(value, (TextClause, FunctionElement)),
+        )
diff --git a/.venv/lib/python3.12/site-packages/alembic/ddl/sqlite.py b/.venv/lib/python3.12/site-packages/alembic/ddl/sqlite.py
new file mode 100644
index 00000000..7c6fb20c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/ddl/sqlite.py
@@ -0,0 +1,237 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import re
+from typing import Any
+from typing import Dict
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import cast
+from sqlalchemy import Computed
+from sqlalchemy import JSON
+from sqlalchemy import schema
+from sqlalchemy import sql
+
+from .base import alter_table
+from .base import ColumnName
+from .base import format_column_name
+from .base import format_table_name
+from .base import RenameTable
+from .impl import DefaultImpl
+from .. import util
+from ..util.sqla_compat import compiles
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine.reflection import Inspector
+    from sqlalchemy.sql.compiler import DDLCompiler
+    from sqlalchemy.sql.elements import Cast
+    from sqlalchemy.sql.elements import ClauseElement
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from ..operations.batch import BatchOperationsImpl
+
+
+class SQLiteImpl(DefaultImpl):
+    __dialect__ = "sqlite"
+
+    transactional_ddl = False
+    """SQLite supports transactional DDL, but pysqlite does not:
+    see: http://bugs.python.org/issue10740
+    """
+
+    def requires_recreate_in_batch(
+        self, batch_op: BatchOperationsImpl
+    ) -> bool:
+        """Return True if the given :class:`.BatchOperationsImpl`
+        would need the table to be recreated and copied in order to
+        proceed.
+
+        Normally, only returns True on SQLite when operations other
+        than add_column are present.
+
+        """
+        for op in batch_op.batch:
+            if op[0] == "add_column":
+                col = op[1][1]
+                if isinstance(
+                    col.server_default, schema.DefaultClause
+                ) and isinstance(col.server_default.arg, sql.ClauseElement):
+                    return True
+                elif (
+                    isinstance(col.server_default, Computed)
+                    and col.server_default.persisted
+                ):
+                    return True
+            elif op[0] not in ("create_index", "drop_index"):
+                return True
+        else:
+            return False
+
+    def add_constraint(self, const: Constraint):
+        # attempt to distinguish between an
+        # auto-gen constraint and an explicit one
+        if const._create_rule is None:
+            raise NotImplementedError(
+                "No support for ALTER of constraints in SQLite dialect. "
+                "Please refer to the batch mode feature which allows for "
+                "SQLite migrations using a copy-and-move strategy."
+            )
+        elif const._create_rule(self):
+            util.warn(
+                "Skipping unsupported ALTER for "
+                "creation of implicit constraint. "
+                "Please refer to the batch mode feature which allows for "
+                "SQLite migrations using a copy-and-move strategy."
+            )
+
+    def drop_constraint(self, const: Constraint):
+        if const._create_rule is None:
+            raise NotImplementedError(
+                "No support for ALTER of constraints in SQLite dialect. "
+                "Please refer to the batch mode feature which allows for "
+                "SQLite migrations using a copy-and-move strategy."
+            )
+
+    def compare_server_default(
+        self,
+        inspector_column: Column[Any],
+        metadata_column: Column[Any],
+        rendered_metadata_default: Optional[str],
+        rendered_inspector_default: Optional[str],
+    ) -> bool:
+        if rendered_metadata_default is not None:
+            rendered_metadata_default = re.sub(
+                r"^\((.+)\)$", r"\1", rendered_metadata_default
+            )
+
+            rendered_metadata_default = re.sub(
+                r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default
+            )
+
+        if rendered_inspector_default is not None:
+            rendered_inspector_default = re.sub(
+                r"^\((.+)\)$", r"\1", rendered_inspector_default
+            )
+
+            rendered_inspector_default = re.sub(
+                r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default
+            )
+
+        return rendered_inspector_default != rendered_metadata_default
+
+    def _guess_if_default_is_unparenthesized_sql_expr(
+        self, expr: Optional[str]
+    ) -> bool:
+        """Determine if a server default is a SQL expression or a constant.
+
+        There are too many assertions that expect server defaults to round-trip
+        identically without parenthesis added so we will add parens only in
+        very specific cases.
+
+        """
+        if not expr:
+            return False
+        elif re.match(r"^[0-9\.]$", expr):
+            return False
+        elif re.match(r"^'.+'$", expr):
+            return False
+        elif re.match(r"^\(.+\)$", expr):
+            return False
+        else:
+            return True
+
+    def autogen_column_reflect(
+        self,
+        inspector: Inspector,
+        table: Table,
+        column_info: Dict[str, Any],
+    ) -> None:
+        # SQLite expression defaults require parenthesis when sent
+        # as DDL
+        if self._guess_if_default_is_unparenthesized_sql_expr(
+            column_info.get("default", None)
+        ):
+            column_info["default"] = "(%s)" % (column_info["default"],)
+
+    def render_ddl_sql_expr(
+        self, expr: ClauseElement, is_server_default: bool = False, **kw
+    ) -> str:
+        # SQLite expression defaults require parenthesis when sent
+        # as DDL
+        str_expr = super().render_ddl_sql_expr(
+            expr, is_server_default=is_server_default, **kw
+        )
+
+        if (
+            is_server_default
+            and self._guess_if_default_is_unparenthesized_sql_expr(str_expr)
+        ):
+            str_expr = "(%s)" % (str_expr,)
+        return str_expr
+
+    def cast_for_batch_migrate(
+        self,
+        existing: Column[Any],
+        existing_transfer: Dict[str, Union[TypeEngine, Cast]],
+        new_type: TypeEngine,
+    ) -> None:
+        if (
+            existing.type._type_affinity is not new_type._type_affinity
+            and not isinstance(new_type, JSON)
+        ):
+            existing_transfer["expr"] = cast(
+                existing_transfer["expr"], new_type
+            )
+
+    def correct_for_autogen_constraints(
+        self,
+        conn_unique_constraints,
+        conn_indexes,
+        metadata_unique_constraints,
+        metadata_indexes,
+    ):
+        self._skip_functional_indexes(metadata_indexes, conn_indexes)
+
+
+@compiles(RenameTable, "sqlite")
+def visit_rename_table(
+    element: RenameTable, compiler: DDLCompiler, **kw
+) -> str:
+    return "%s RENAME TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_table_name(compiler, element.new_table_name, None),
+    )
+
+
+@compiles(ColumnName, "sqlite")
+def visit_column_name(element: ColumnName, compiler: DDLCompiler, **kw) -> str:
+    return "%s RENAME COLUMN %s TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        format_column_name(compiler, element.newname),
+    )
+
+
+# @compiles(AddColumn, 'sqlite')
+# def visit_add_column(element, compiler, **kw):
+#    return "%s %s" % (
+#        alter_table(compiler, element.table_name, element.schema),
+#        add_column(compiler, element.column, **kw)
+#    )
+
+
+# def add_column(compiler, column, **kw):
+#    text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw)
+# need to modify SQLAlchemy so that the CHECK associated with a Boolean
+# or Enum gets placed as part of the column constraints, not the Table
+# see ticket 98
+#    for const in column.constraints:
+#        text += compiler.process(AddConstraint(const))
+#    return text
diff --git a/.venv/lib/python3.12/site-packages/alembic/environment.py b/.venv/lib/python3.12/site-packages/alembic/environment.py
new file mode 100644
index 00000000..adfc93eb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/environment.py
@@ -0,0 +1 @@
+from .runtime.environment import *  # noqa
diff --git a/.venv/lib/python3.12/site-packages/alembic/migration.py b/.venv/lib/python3.12/site-packages/alembic/migration.py
new file mode 100644
index 00000000..02626e2c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/migration.py
@@ -0,0 +1 @@
+from .runtime.migration import *  # noqa
diff --git a/.venv/lib/python3.12/site-packages/alembic/op.py b/.venv/lib/python3.12/site-packages/alembic/op.py
new file mode 100644
index 00000000..f3f5fac0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/op.py
@@ -0,0 +1,5 @@
+from .operations.base import Operations
+
+# create proxy functions for
+# each method on the Operations class.
+Operations.create_module_class_proxy(globals(), locals())
diff --git a/.venv/lib/python3.12/site-packages/alembic/op.pyi b/.venv/lib/python3.12/site-packages/alembic/op.pyi
new file mode 100644
index 00000000..d86bef46
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/op.pyi
@@ -0,0 +1,1337 @@
+# ### this file stubs are generated by tools/write_pyi.py - do not edit ###
+# ### imports are manually managed
+from __future__ import annotations
+
+from contextlib import contextmanager
+from typing import Any
+from typing import Awaitable
+from typing import Callable
+from typing import Dict
+from typing import Iterator
+from typing import List
+from typing import Literal
+from typing import Mapping
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine import Connection
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import conv
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.expression import TableClause
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Computed
+    from sqlalchemy.sql.schema import Identity
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.type_api import TypeEngine
+    from sqlalchemy.util import immutabledict
+
+    from .operations.base import BatchOperations
+    from .operations.ops import AddColumnOp
+    from .operations.ops import AddConstraintOp
+    from .operations.ops import AlterColumnOp
+    from .operations.ops import AlterTableOp
+    from .operations.ops import BulkInsertOp
+    from .operations.ops import CreateIndexOp
+    from .operations.ops import CreateTableCommentOp
+    from .operations.ops import CreateTableOp
+    from .operations.ops import DropColumnOp
+    from .operations.ops import DropConstraintOp
+    from .operations.ops import DropIndexOp
+    from .operations.ops import DropTableCommentOp
+    from .operations.ops import DropTableOp
+    from .operations.ops import ExecuteSQLOp
+    from .operations.ops import MigrateOperation
+    from .runtime.migration import MigrationContext
+    from .util.sqla_compat import _literal_bindparam
+
+_T = TypeVar("_T")
+_C = TypeVar("_C", bound=Callable[..., Any])
+
+### end imports ###
+
+def add_column(
+    table_name: str, column: Column[Any], *, schema: Optional[str] = None
+) -> None:
+    """Issue an "add column" instruction using the current
+    migration context.
+
+    e.g.::
+
+        from alembic import op
+        from sqlalchemy import Column, String
+
+        op.add_column("organization", Column("name", String()))
+
+    The :meth:`.Operations.add_column` method typically corresponds
+    to the SQL command "ALTER TABLE... ADD COLUMN".    Within the scope
+    of this command, the column's name, datatype, nullability,
+    and optional server-generated defaults may be indicated.
+
+    .. note::
+
+        With the exception of NOT NULL constraints or single-column FOREIGN
+        KEY constraints, other kinds of constraints such as PRIMARY KEY,
+        UNIQUE or CHECK constraints **cannot** be generated using this
+        method; for these constraints, refer to operations such as
+        :meth:`.Operations.create_primary_key` and
+        :meth:`.Operations.create_check_constraint`. In particular, the
+        following :class:`~sqlalchemy.schema.Column` parameters are
+        **ignored**:
+
+        * :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases
+          typically do not support an ALTER operation that can add
+          individual columns one at a time to an existing primary key
+          constraint, therefore it's less ambiguous to use the
+          :meth:`.Operations.create_primary_key` method, which assumes no
+          existing primary key constraint is present.
+        * :paramref:`~sqlalchemy.schema.Column.unique` - use the
+          :meth:`.Operations.create_unique_constraint` method
+        * :paramref:`~sqlalchemy.schema.Column.index` - use the
+          :meth:`.Operations.create_index` method
+
+
+    The provided :class:`~sqlalchemy.schema.Column` object may include a
+    :class:`~sqlalchemy.schema.ForeignKey` constraint directive,
+    referencing a remote table name. For this specific type of constraint,
+    Alembic will automatically emit a second ALTER statement in order to
+    add the single-column FOREIGN KEY constraint separately::
+
+        from alembic import op
+        from sqlalchemy import Column, INTEGER, ForeignKey
+
+        op.add_column(
+            "organization",
+            Column("account_id", INTEGER, ForeignKey("accounts.id")),
+        )
+
+    The column argument passed to :meth:`.Operations.add_column` is a
+    :class:`~sqlalchemy.schema.Column` construct, used in the same way it's
+    used in SQLAlchemy. In particular, values or functions to be indicated
+    as producing the column's default value on the database side are
+    specified using the ``server_default`` parameter, and not ``default``
+    which only specifies Python-side defaults::
+
+        from alembic import op
+        from sqlalchemy import Column, TIMESTAMP, func
+
+        # specify "DEFAULT NOW" along with the column add
+        op.add_column(
+            "account",
+            Column("timestamp", TIMESTAMP, server_default=func.now()),
+        )
+
+    :param table_name: String name of the parent table.
+    :param column: a :class:`sqlalchemy.schema.Column` object
+     representing the new column.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def alter_column(
+    table_name: str,
+    column_name: str,
+    *,
+    nullable: Optional[bool] = None,
+    comment: Union[str, Literal[False], None] = False,
+    server_default: Any = False,
+    new_column_name: Optional[str] = None,
+    type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None,
+    existing_type: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None,
+    existing_server_default: Union[
+        str, bool, Identity, Computed, None
+    ] = False,
+    existing_nullable: Optional[bool] = None,
+    existing_comment: Optional[str] = None,
+    schema: Optional[str] = None,
+    **kw: Any,
+) -> None:
+    r"""Issue an "alter column" instruction using the
+    current migration context.
+
+    Generally, only that aspect of the column which
+    is being changed, i.e. name, type, nullability,
+    default, needs to be specified.  Multiple changes
+    can also be specified at once and the backend should
+    "do the right thing", emitting each change either
+    separately or together as the backend allows.
+
+    MySQL has special requirements here, since MySQL
+    cannot ALTER a column without a full specification.
+    When producing MySQL-compatible migration files,
+    it is recommended that the ``existing_type``,
+    ``existing_server_default``, and ``existing_nullable``
+    parameters be present, if not being altered.
+
+    Type changes which are against the SQLAlchemy
+    "schema" types :class:`~sqlalchemy.types.Boolean`
+    and  :class:`~sqlalchemy.types.Enum` may also
+    add or drop constraints which accompany those
+    types on backends that don't support them natively.
+    The ``existing_type`` argument is
+    used in this case to identify and remove a previous
+    constraint that was bound to the type object.
+
+    :param table_name: string name of the target table.
+    :param column_name: string name of the target column,
+     as it exists before the operation begins.
+    :param nullable: Optional; specify ``True`` or ``False``
+     to alter the column's nullability.
+    :param server_default: Optional; specify a string
+     SQL expression, :func:`~sqlalchemy.sql.expression.text`,
+     or :class:`~sqlalchemy.schema.DefaultClause` to indicate
+     an alteration to the column's default value.
+     Set to ``None`` to have the default removed.
+    :param comment: optional string text of a new comment to add to the
+     column.
+    :param new_column_name: Optional; specify a string name here to
+     indicate the new name within a column rename operation.
+    :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine`
+     type object to specify a change to the column's type.
+     For SQLAlchemy types that also indicate a constraint (i.e.
+     :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
+     the constraint is also generated.
+    :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
+     currently understood by the MySQL dialect.
+    :param existing_type: Optional; a
+     :class:`~sqlalchemy.types.TypeEngine`
+     type object to specify the previous type.   This
+     is required for all MySQL column alter operations that
+     don't otherwise specify a new type, as well as for
+     when nullability is being changed on a SQL Server
+     column.  It is also used if the type is a so-called
+     SQLAlchemy "schema" type which may define a constraint (i.e.
+     :class:`~sqlalchemy.types.Boolean`,
+     :class:`~sqlalchemy.types.Enum`),
+     so that the constraint can be dropped.
+    :param existing_server_default: Optional; The existing
+     default value of the column.   Required on MySQL if
+     an existing default is not being changed; else MySQL
+     removes the default.
+    :param existing_nullable: Optional; the existing nullability
+     of the column.  Required on MySQL if the existing nullability
+     is not being changed; else MySQL sets this to NULL.
+    :param existing_autoincrement: Optional; the existing autoincrement
+     of the column.  Used for MySQL's system of altering a column
+     that specifies ``AUTO_INCREMENT``.
+    :param existing_comment: string text of the existing comment on the
+     column to be maintained.  Required on MySQL if the existing comment
+     on the column is not being changed.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+    :param postgresql_using: String argument which will indicate a
+     SQL expression to render within the Postgresql-specific USING clause
+     within ALTER COLUMN.    This string is taken directly as raw SQL which
+     must explicitly include any necessary quoting or escaping of tokens
+     within the expression.
+
+    """
+
+@contextmanager
+def batch_alter_table(
+    table_name: str,
+    schema: Optional[str] = None,
+    recreate: Literal["auto", "always", "never"] = "auto",
+    partial_reordering: Optional[Tuple[Any, ...]] = None,
+    copy_from: Optional[Table] = None,
+    table_args: Tuple[Any, ...] = (),
+    table_kwargs: Mapping[str, Any] = immutabledict({}),
+    reflect_args: Tuple[Any, ...] = (),
+    reflect_kwargs: Mapping[str, Any] = immutabledict({}),
+    naming_convention: Optional[Dict[str, str]] = None,
+) -> Iterator[BatchOperations]:
+    """Invoke a series of per-table migrations in batch.
+
+    Batch mode allows a series of operations specific to a table
+    to be syntactically grouped together, and allows for alternate
+    modes of table migration, in particular the "recreate" style of
+    migration required by SQLite.
+
+    "recreate" style is as follows:
+
+    1. A new table is created with the new specification, based on the
+       migration directives within the batch, using a temporary name.
+
+    2. the data copied from the existing table to the new table.
+
+    3. the existing table is dropped.
+
+    4. the new table is renamed to the existing table name.
+
+    The directive by default will only use "recreate" style on the
+    SQLite backend, and only if directives are present which require
+    this form, e.g. anything other than ``add_column()``.   The batch
+    operation on other backends will proceed using standard ALTER TABLE
+    operations.
+
+    The method is used as a context manager, which returns an instance
+    of :class:`.BatchOperations`; this object is the same as
+    :class:`.Operations` except that table names and schema names
+    are omitted.  E.g.::
+
+        with op.batch_alter_table("some_table") as batch_op:
+            batch_op.add_column(Column("foo", Integer))
+            batch_op.drop_column("bar")
+
+    The operations within the context manager are invoked at once
+    when the context is ended.   When run against SQLite, if the
+    migrations include operations not supported by SQLite's ALTER TABLE,
+    the entire table will be copied to a new one with the new
+    specification, moving all data across as well.
+
+    The copy operation by default uses reflection to retrieve the current
+    structure of the table, and therefore :meth:`.batch_alter_table`
+    in this mode requires that the migration is run in "online" mode.
+    The ``copy_from`` parameter may be passed which refers to an existing
+    :class:`.Table` object, which will bypass this reflection step.
+
+    .. note::  The table copy operation will currently not copy
+       CHECK constraints, and may not copy UNIQUE constraints that are
+       unnamed, as is possible on SQLite.   See the section
+       :ref:`sqlite_batch_constraints` for workarounds.
+
+    :param table_name: name of table
+    :param schema: optional schema name.
+    :param recreate: under what circumstances the table should be
+     recreated. At its default of ``"auto"``, the SQLite dialect will
+     recreate the table if any operations other than ``add_column()``,
+     ``create_index()``, or ``drop_index()`` are
+     present. Other options include ``"always"`` and ``"never"``.
+    :param copy_from: optional :class:`~sqlalchemy.schema.Table` object
+     that will act as the structure of the table being copied.  If omitted,
+     table reflection is used to retrieve the structure of the table.
+
+     .. seealso::
+
+        :ref:`batch_offline_mode`
+
+        :paramref:`~.Operations.batch_alter_table.reflect_args`
+
+        :paramref:`~.Operations.batch_alter_table.reflect_kwargs`
+
+    :param reflect_args: a sequence of additional positional arguments that
+     will be applied to the table structure being reflected / copied;
+     this may be used to pass column and constraint overrides to the
+     table that will be reflected, in lieu of passing the whole
+     :class:`~sqlalchemy.schema.Table` using
+     :paramref:`~.Operations.batch_alter_table.copy_from`.
+    :param reflect_kwargs: a dictionary of additional keyword arguments
+     that will be applied to the table structure being copied; this may be
+     used to pass additional table and reflection options to the table that
+     will be reflected, in lieu of passing the whole
+     :class:`~sqlalchemy.schema.Table` using
+     :paramref:`~.Operations.batch_alter_table.copy_from`.
+    :param table_args: a sequence of additional positional arguments that
+     will be applied to the new :class:`~sqlalchemy.schema.Table` when
+     created, in addition to those copied from the source table.
+     This may be used to provide additional constraints such as CHECK
+     constraints that may not be reflected.
+    :param table_kwargs: a dictionary of additional keyword arguments
+     that will be applied to the new :class:`~sqlalchemy.schema.Table`
+     when created, in addition to those copied from the source table.
+     This may be used to provide for additional table options that may
+     not be reflected.
+    :param naming_convention: a naming convention dictionary of the form
+     described at :ref:`autogen_naming_conventions` which will be applied
+     to the :class:`~sqlalchemy.schema.MetaData` during the reflection
+     process.  This is typically required if one wants to drop SQLite
+     constraints, as these constraints will not have names when
+     reflected on this backend.  Requires SQLAlchemy **0.9.4** or greater.
+
+     .. seealso::
+
+        :ref:`dropping_sqlite_foreign_keys`
+
+    :param partial_reordering: a list of tuples, each suggesting a desired
+     ordering of two or more columns in the newly created table.  Requires
+     that :paramref:`.batch_alter_table.recreate` is set to ``"always"``.
+     Examples, given a table with columns "a", "b", "c", and "d":
+
+     Specify the order of all columns::
+
+        with op.batch_alter_table(
+            "some_table",
+            recreate="always",
+            partial_reordering=[("c", "d", "a", "b")],
+        ) as batch_op:
+            pass
+
+     Ensure "d" appears before "c", and "b", appears before "a"::
+
+        with op.batch_alter_table(
+            "some_table",
+            recreate="always",
+            partial_reordering=[("d", "c"), ("b", "a")],
+        ) as batch_op:
+            pass
+
+     The ordering of columns not included in the partial_reordering
+     set is undefined.   Therefore it is best to specify the complete
+     ordering of all columns for best results.
+
+    .. note:: batch mode requires SQLAlchemy 0.8 or above.
+
+    .. seealso::
+
+        :ref:`batch_migrations`
+
+    """
+
+def bulk_insert(
+    table: Union[Table, TableClause],
+    rows: List[Dict[str, Any]],
+    *,
+    multiinsert: bool = True,
+) -> None:
+    """Issue a "bulk insert" operation using the current
+    migration context.
+
+    This provides a means of representing an INSERT of multiple rows
+    which works equally well in the context of executing on a live
+    connection as well as that of generating a SQL script.   In the
+    case of a SQL script, the values are rendered inline into the
+    statement.
+
+    e.g.::
+
+        from alembic import op
+        from datetime import date
+        from sqlalchemy.sql import table, column
+        from sqlalchemy import String, Integer, Date
+
+        # Create an ad-hoc table to use for the insert statement.
+        accounts_table = table(
+            "account",
+            column("id", Integer),
+            column("name", String),
+            column("create_date", Date),
+        )
+
+        op.bulk_insert(
+            accounts_table,
+            [
+                {
+                    "id": 1,
+                    "name": "John Smith",
+                    "create_date": date(2010, 10, 5),
+                },
+                {
+                    "id": 2,
+                    "name": "Ed Williams",
+                    "create_date": date(2007, 5, 27),
+                },
+                {
+                    "id": 3,
+                    "name": "Wendy Jones",
+                    "create_date": date(2008, 8, 15),
+                },
+            ],
+        )
+
+    When using --sql mode, some datatypes may not render inline
+    automatically, such as dates and other special types.   When this
+    issue is present, :meth:`.Operations.inline_literal` may be used::
+
+        op.bulk_insert(
+            accounts_table,
+            [
+                {
+                    "id": 1,
+                    "name": "John Smith",
+                    "create_date": op.inline_literal("2010-10-05"),
+                },
+                {
+                    "id": 2,
+                    "name": "Ed Williams",
+                    "create_date": op.inline_literal("2007-05-27"),
+                },
+                {
+                    "id": 3,
+                    "name": "Wendy Jones",
+                    "create_date": op.inline_literal("2008-08-15"),
+                },
+            ],
+            multiinsert=False,
+        )
+
+    When using :meth:`.Operations.inline_literal` in conjunction with
+    :meth:`.Operations.bulk_insert`, in order for the statement to work
+    in "online" (e.g. non --sql) mode, the
+    :paramref:`~.Operations.bulk_insert.multiinsert`
+    flag should be set to ``False``, which will have the effect of
+    individual INSERT statements being emitted to the database, each
+    with a distinct VALUES clause, so that the "inline" values can
+    still be rendered, rather than attempting to pass the values
+    as bound parameters.
+
+    :param table: a table object which represents the target of the INSERT.
+
+    :param rows: a list of dictionaries indicating rows.
+
+    :param multiinsert: when at its default of True and --sql mode is not
+       enabled, the INSERT statement will be executed using
+       "executemany()" style, where all elements in the list of
+       dictionaries are passed as bound parameters in a single
+       list.   Setting this to False results in individual INSERT
+       statements being emitted per parameter set, and is needed
+       in those cases where non-literal values are present in the
+       parameter sets.
+
+    """
+
+def create_check_constraint(
+    constraint_name: Optional[str],
+    table_name: str,
+    condition: Union[str, ColumnElement[bool], TextClause],
+    *,
+    schema: Optional[str] = None,
+    **kw: Any,
+) -> None:
+    """Issue a "create check constraint" instruction using the
+    current migration context.
+
+    e.g.::
+
+        from alembic import op
+        from sqlalchemy.sql import column, func
+
+        op.create_check_constraint(
+            "ck_user_name_len",
+            "user",
+            func.len(column("name")) > 5,
+        )
+
+    CHECK constraints are usually against a SQL expression, so ad-hoc
+    table metadata is usually needed.   The function will convert the given
+    arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
+    to an anonymous table in order to emit the CREATE statement.
+
+    :param name: Name of the check constraint.  The name is necessary
+     so that an ALTER statement can be emitted.  For setups that
+     use an automated naming scheme such as that described at
+     :ref:`sqla:constraint_naming_conventions`,
+     ``name`` here can be ``None``, as the event listener will
+     apply the name to the constraint object when it is associated
+     with the table.
+    :param table_name: String name of the source table.
+    :param condition: SQL expression that's the condition of the
+     constraint. Can be a string or SQLAlchemy expression language
+     structure.
+    :param deferrable: optional bool. If set, emit DEFERRABLE or
+     NOT DEFERRABLE when issuing DDL for this constraint.
+    :param initially: optional string. If set, emit INITIALLY <value>
+     when issuing DDL for this constraint.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def create_exclude_constraint(
+    constraint_name: str, table_name: str, *elements: Any, **kw: Any
+) -> Optional[Table]:
+    """Issue an alter to create an EXCLUDE constraint using the
+    current migration context.
+
+    .. note::  This method is Postgresql specific, and additionally
+       requires at least SQLAlchemy 1.0.
+
+    e.g.::
+
+        from alembic import op
+
+        op.create_exclude_constraint(
+            "user_excl",
+            "user",
+            ("period", "&&"),
+            ("group", "="),
+            where=("group != 'some group'"),
+        )
+
+    Note that the expressions work the same way as that of
+    the ``ExcludeConstraint`` object itself; if plain strings are
+    passed, quoting rules must be applied manually.
+
+    :param name: Name of the constraint.
+    :param table_name: String name of the source table.
+    :param elements: exclude conditions.
+    :param where: SQL expression or SQL string with optional WHERE
+     clause.
+    :param deferrable: optional bool. If set, emit DEFERRABLE or
+     NOT DEFERRABLE when issuing DDL for this constraint.
+    :param initially: optional string. If set, emit INITIALLY <value>
+     when issuing DDL for this constraint.
+    :param schema: Optional schema name to operate within.
+
+    """
+
+def create_foreign_key(
+    constraint_name: Optional[str],
+    source_table: str,
+    referent_table: str,
+    local_cols: List[str],
+    remote_cols: List[str],
+    *,
+    onupdate: Optional[str] = None,
+    ondelete: Optional[str] = None,
+    deferrable: Optional[bool] = None,
+    initially: Optional[str] = None,
+    match: Optional[str] = None,
+    source_schema: Optional[str] = None,
+    referent_schema: Optional[str] = None,
+    **dialect_kw: Any,
+) -> None:
+    """Issue a "create foreign key" instruction using the
+    current migration context.
+
+    e.g.::
+
+        from alembic import op
+
+        op.create_foreign_key(
+            "fk_user_address",
+            "address",
+            "user",
+            ["user_id"],
+            ["id"],
+        )
+
+    This internally generates a :class:`~sqlalchemy.schema.Table` object
+    containing the necessary columns, then generates a new
+    :class:`~sqlalchemy.schema.ForeignKeyConstraint`
+    object which it then associates with the
+    :class:`~sqlalchemy.schema.Table`.
+    Any event listeners associated with this action will be fired
+    off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+    construct is ultimately used to generate the ALTER statement.
+
+    :param constraint_name: Name of the foreign key constraint.  The name
+     is necessary so that an ALTER statement can be emitted.  For setups
+     that use an automated naming scheme such as that described at
+     :ref:`sqla:constraint_naming_conventions`,
+     ``name`` here can be ``None``, as the event listener will
+     apply the name to the constraint object when it is associated
+     with the table.
+    :param source_table: String name of the source table.
+    :param referent_table: String name of the destination table.
+    :param local_cols: a list of string column names in the
+     source table.
+    :param remote_cols: a list of string column names in the
+     remote table.
+    :param onupdate: Optional string. If set, emit ON UPDATE <value> when
+     issuing DDL for this constraint. Typical values include CASCADE,
+     DELETE and RESTRICT.
+    :param ondelete: Optional string. If set, emit ON DELETE <value> when
+     issuing DDL for this constraint. Typical values include CASCADE,
+     DELETE and RESTRICT.
+    :param deferrable: optional bool. If set, emit DEFERRABLE or NOT
+     DEFERRABLE when issuing DDL for this constraint.
+    :param source_schema: Optional schema name of the source table.
+    :param referent_schema: Optional schema name of the destination table.
+
+    """
+
+def create_index(
+    index_name: Optional[str],
+    table_name: str,
+    columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
+    *,
+    schema: Optional[str] = None,
+    unique: bool = False,
+    if_not_exists: Optional[bool] = None,
+    **kw: Any,
+) -> None:
+    r"""Issue a "create index" instruction using the current
+    migration context.
+
+    e.g.::
+
+        from alembic import op
+
+        op.create_index("ik_test", "t1", ["foo", "bar"])
+
+    Functional indexes can be produced by using the
+    :func:`sqlalchemy.sql.expression.text` construct::
+
+        from alembic import op
+        from sqlalchemy import text
+
+        op.create_index("ik_test", "t1", [text("lower(foo)")])
+
+    :param index_name: name of the index.
+    :param table_name: name of the owning table.
+    :param columns: a list consisting of string column names and/or
+     :func:`~sqlalchemy.sql.expression.text` constructs.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+    :param unique: If True, create a unique index.
+
+    :param quote: Force quoting of this column's name on or off,
+     corresponding to ``True`` or ``False``. When left at its default
+     of ``None``, the column identifier will be quoted according to
+     whether the name is case sensitive (identifiers with at least one
+     upper case character are treated as case sensitive), or if it's a
+     reserved word. This flag is only needed to force quoting of a
+     reserved word which is not known by the SQLAlchemy dialect.
+
+    :param if_not_exists: If True, adds IF NOT EXISTS operator when
+     creating the new index.
+
+     .. versionadded:: 1.12.0
+
+    :param \**kw: Additional keyword arguments not mentioned above are
+     dialect specific, and passed in the form
+     ``<dialectname>_<argname>``.
+     See the documentation regarding an individual dialect at
+     :ref:`dialect_toplevel` for detail on documented arguments.
+
+    """
+
+def create_primary_key(
+    constraint_name: Optional[str],
+    table_name: str,
+    columns: List[str],
+    *,
+    schema: Optional[str] = None,
+) -> None:
+    """Issue a "create primary key" instruction using the current
+    migration context.
+
+    e.g.::
+
+        from alembic import op
+
+        op.create_primary_key("pk_my_table", "my_table", ["id", "version"])
+
+    This internally generates a :class:`~sqlalchemy.schema.Table` object
+    containing the necessary columns, then generates a new
+    :class:`~sqlalchemy.schema.PrimaryKeyConstraint`
+    object which it then associates with the
+    :class:`~sqlalchemy.schema.Table`.
+    Any event listeners associated with this action will be fired
+    off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+    construct is ultimately used to generate the ALTER statement.
+
+    :param constraint_name: Name of the primary key constraint.  The name
+     is necessary so that an ALTER statement can be emitted.  For setups
+     that use an automated naming scheme such as that described at
+     :ref:`sqla:constraint_naming_conventions`
+     ``name`` here can be ``None``, as the event listener will
+     apply the name to the constraint object when it is associated
+     with the table.
+    :param table_name: String name of the target table.
+    :param columns: a list of string column names to be applied to the
+     primary key constraint.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def create_table(
+    table_name: str,
+    *columns: SchemaItem,
+    if_not_exists: Optional[bool] = None,
+    **kw: Any,
+) -> Table:
+    r"""Issue a "create table" instruction using the current migration
+    context.
+
+    This directive receives an argument list similar to that of the
+    traditional :class:`sqlalchemy.schema.Table` construct, but without the
+    metadata::
+
+        from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+        from alembic import op
+
+        op.create_table(
+            "account",
+            Column("id", INTEGER, primary_key=True),
+            Column("name", VARCHAR(50), nullable=False),
+            Column("description", NVARCHAR(200)),
+            Column("timestamp", TIMESTAMP, server_default=func.now()),
+        )
+
+    Note that :meth:`.create_table` accepts
+    :class:`~sqlalchemy.schema.Column`
+    constructs directly from the SQLAlchemy library.  In particular,
+    default values to be created on the database side are
+    specified using the ``server_default`` parameter, and not
+    ``default`` which only specifies Python-side defaults::
+
+        from alembic import op
+        from sqlalchemy import Column, TIMESTAMP, func
+
+        # specify "DEFAULT NOW" along with the "timestamp" column
+        op.create_table(
+            "account",
+            Column("id", INTEGER, primary_key=True),
+            Column("timestamp", TIMESTAMP, server_default=func.now()),
+        )
+
+    The function also returns a newly created
+    :class:`~sqlalchemy.schema.Table` object, corresponding to the table
+    specification given, which is suitable for
+    immediate SQL operations, in particular
+    :meth:`.Operations.bulk_insert`::
+
+        from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+        from alembic import op
+
+        account_table = op.create_table(
+            "account",
+            Column("id", INTEGER, primary_key=True),
+            Column("name", VARCHAR(50), nullable=False),
+            Column("description", NVARCHAR(200)),
+            Column("timestamp", TIMESTAMP, server_default=func.now()),
+        )
+
+        op.bulk_insert(
+            account_table,
+            [
+                {"name": "A1", "description": "account 1"},
+                {"name": "A2", "description": "account 2"},
+            ],
+        )
+
+    :param table_name: Name of the table
+    :param \*columns: collection of :class:`~sqlalchemy.schema.Column`
+     objects within
+     the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
+     objects
+     and :class:`~.sqlalchemy.schema.Index` objects.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+    :param if_not_exists: If True, adds IF NOT EXISTS operator when
+     creating the new table.
+
+     .. versionadded:: 1.13.3
+    :param \**kw: Other keyword arguments are passed to the underlying
+     :class:`sqlalchemy.schema.Table` object created for the command.
+
+    :return: the :class:`~sqlalchemy.schema.Table` object corresponding
+     to the parameters given.
+
+    """
+
+def create_table_comment(
+    table_name: str,
+    comment: Optional[str],
+    *,
+    existing_comment: Optional[str] = None,
+    schema: Optional[str] = None,
+) -> None:
+    """Emit a COMMENT ON operation to set the comment for a table.
+
+    :param table_name: string name of the target table.
+    :param comment: string value of the comment being registered against
+     the specified table.
+    :param existing_comment: String value of a comment
+     already registered on the specified table, used within autogenerate
+     so that the operation is reversible, but not required for direct
+     use.
+
+    .. seealso::
+
+        :meth:`.Operations.drop_table_comment`
+
+        :paramref:`.Operations.alter_column.comment`
+
+    """
+
+def create_unique_constraint(
+    constraint_name: Optional[str],
+    table_name: str,
+    columns: Sequence[str],
+    *,
+    schema: Optional[str] = None,
+    **kw: Any,
+) -> Any:
+    """Issue a "create unique constraint" instruction using the
+    current migration context.
+
+    e.g.::
+
+        from alembic import op
+        op.create_unique_constraint("uq_user_name", "user", ["name"])
+
+    This internally generates a :class:`~sqlalchemy.schema.Table` object
+    containing the necessary columns, then generates a new
+    :class:`~sqlalchemy.schema.UniqueConstraint`
+    object which it then associates with the
+    :class:`~sqlalchemy.schema.Table`.
+    Any event listeners associated with this action will be fired
+    off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+    construct is ultimately used to generate the ALTER statement.
+
+    :param name: Name of the unique constraint.  The name is necessary
+     so that an ALTER statement can be emitted.  For setups that
+     use an automated naming scheme such as that described at
+     :ref:`sqla:constraint_naming_conventions`,
+     ``name`` here can be ``None``, as the event listener will
+     apply the name to the constraint object when it is associated
+     with the table.
+    :param table_name: String name of the source table.
+    :param columns: a list of string column names in the
+     source table.
+    :param deferrable: optional bool. If set, emit DEFERRABLE or
+     NOT DEFERRABLE when issuing DDL for this constraint.
+    :param initially: optional string. If set, emit INITIALLY <value>
+     when issuing DDL for this constraint.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def drop_column(
+    table_name: str,
+    column_name: str,
+    *,
+    schema: Optional[str] = None,
+    **kw: Any,
+) -> None:
+    """Issue a "drop column" instruction using the current
+    migration context.
+
+    e.g.::
+
+        drop_column("organization", "account_id")
+
+    :param table_name: name of table
+    :param column_name: name of column
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+    :param mssql_drop_check: Optional boolean.  When ``True``, on
+     Microsoft SQL Server only, first
+     drop the CHECK constraint on the column using a
+     SQL-script-compatible
+     block that selects into a @variable from sys.check_constraints,
+     then exec's a separate DROP CONSTRAINT for that constraint.
+    :param mssql_drop_default: Optional boolean.  When ``True``, on
+     Microsoft SQL Server only, first
+     drop the DEFAULT constraint on the column using a
+     SQL-script-compatible
+     block that selects into a @variable from sys.default_constraints,
+     then exec's a separate DROP CONSTRAINT for that default.
+    :param mssql_drop_foreign_key: Optional boolean.  When ``True``, on
+     Microsoft SQL Server only, first
+     drop a single FOREIGN KEY constraint on the column using a
+     SQL-script-compatible
+     block that selects into a @variable from
+     sys.foreign_keys/sys.foreign_key_columns,
+     then exec's a separate DROP CONSTRAINT for that default.  Only
+     works if the column has exactly one FK constraint which refers to
+     it, at the moment.
+
+    """
+
+def drop_constraint(
+    constraint_name: str,
+    table_name: str,
+    type_: Optional[str] = None,
+    *,
+    schema: Optional[str] = None,
+) -> None:
+    r"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
+
+    :param constraint_name: name of the constraint.
+    :param table_name: table name.
+    :param type\_: optional, required on MySQL.  can be
+     'foreignkey', 'primary', 'unique', or 'check'.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def drop_index(
+    index_name: str,
+    table_name: Optional[str] = None,
+    *,
+    schema: Optional[str] = None,
+    if_exists: Optional[bool] = None,
+    **kw: Any,
+) -> None:
+    r"""Issue a "drop index" instruction using the current
+    migration context.
+
+    e.g.::
+
+        drop_index("accounts")
+
+    :param index_name: name of the index.
+    :param table_name: name of the owning table.  Some
+     backends such as Microsoft SQL Server require this.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    :param if_exists: If True, adds IF EXISTS operator when
+     dropping the index.
+
+     .. versionadded:: 1.12.0
+
+    :param \**kw: Additional keyword arguments not mentioned above are
+     dialect specific, and passed in the form
+     ``<dialectname>_<argname>``.
+     See the documentation regarding an individual dialect at
+     :ref:`dialect_toplevel` for detail on documented arguments.
+
+    """
+
+def drop_table(
+    table_name: str,
+    *,
+    schema: Optional[str] = None,
+    if_exists: Optional[bool] = None,
+    **kw: Any,
+) -> None:
+    r"""Issue a "drop table" instruction using the current
+    migration context.
+
+
+    e.g.::
+
+        drop_table("accounts")
+
+    :param table_name: Name of the table
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+    :param if_exists: If True, adds IF EXISTS operator when
+     dropping the table.
+
+     .. versionadded:: 1.13.3
+    :param \**kw: Other keyword arguments are passed to the underlying
+     :class:`sqlalchemy.schema.Table` object created for the command.
+
+    """
+
+def drop_table_comment(
+    table_name: str,
+    *,
+    existing_comment: Optional[str] = None,
+    schema: Optional[str] = None,
+) -> None:
+    """Issue a "drop table comment" operation to
+    remove an existing comment set on a table.
+
+    :param table_name: string name of the target table.
+    :param existing_comment: An optional string value of a comment already
+     registered on the specified table.
+
+    .. seealso::
+
+        :meth:`.Operations.create_table_comment`
+
+        :paramref:`.Operations.alter_column.comment`
+
+    """
+
+def execute(
+    sqltext: Union[Executable, str],
+    *,
+    execution_options: Optional[dict[str, Any]] = None,
+) -> None:
+    r"""Execute the given SQL using the current migration context.
+
+    The given SQL can be a plain string, e.g.::
+
+        op.execute("INSERT INTO table (foo) VALUES ('some value')")
+
+    Or it can be any kind of Core SQL Expression construct, such as
+    below where we use an update construct::
+
+        from sqlalchemy.sql import table, column
+        from sqlalchemy import String
+        from alembic import op
+
+        account = table("account", column("name", String))
+        op.execute(
+            account.update()
+            .where(account.c.name == op.inline_literal("account 1"))
+            .values({"name": op.inline_literal("account 2")})
+        )
+
+    Above, we made use of the SQLAlchemy
+    :func:`sqlalchemy.sql.expression.table` and
+    :func:`sqlalchemy.sql.expression.column` constructs to make a brief,
+    ad-hoc table construct just for our UPDATE statement.  A full
+    :class:`~sqlalchemy.schema.Table` construct of course works perfectly
+    fine as well, though note it's a recommended practice to at least
+    ensure the definition of a table is self-contained within the migration
+    script, rather than imported from a module that may break compatibility
+    with older migrations.
+
+    In a SQL script context, the statement is emitted directly to the
+    output stream.   There is *no* return result, however, as this
+    function is oriented towards generating a change script
+    that can run in "offline" mode.     Additionally, parameterized
+    statements are discouraged here, as they *will not work* in offline
+    mode.  Above, we use :meth:`.inline_literal` where parameters are
+    to be used.
+
+    For full interaction with a connected database where parameters can
+    also be used normally, use the "bind" available from the context::
+
+        from alembic import op
+
+        connection = op.get_bind()
+
+        connection.execute(
+            account.update()
+            .where(account.c.name == "account 1")
+            .values({"name": "account 2"})
+        )
+
+    Additionally, when passing the statement as a plain string, it is first
+    coerced into a :func:`sqlalchemy.sql.expression.text` construct
+    before being passed along.  In the less likely case that the
+    literal SQL string contains a colon, it must be escaped with a
+    backslash, as::
+
+       op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')")
+
+
+    :param sqltext: Any legal SQLAlchemy expression, including:
+
+    * a string
+    * a :func:`sqlalchemy.sql.expression.text` construct.
+    * a :func:`sqlalchemy.sql.expression.insert` construct.
+    * a :func:`sqlalchemy.sql.expression.update` construct.
+    * a :func:`sqlalchemy.sql.expression.delete` construct.
+    * Any "executable" described in SQLAlchemy Core documentation,
+      noting that no result set is returned.
+
+    .. note::  when passing a plain string, the statement is coerced into
+       a :func:`sqlalchemy.sql.expression.text` construct. This construct
+       considers symbols with colons, e.g. ``:foo`` to be bound parameters.
+       To avoid this, ensure that colon symbols are escaped, e.g.
+       ``\:foo``.
+
+    :param execution_options: Optional dictionary of
+     execution options, will be passed to
+     :meth:`sqlalchemy.engine.Connection.execution_options`.
+    """
+
+def f(name: str) -> conv:
+    """Indicate a string name that has already had a naming convention
+    applied to it.
+
+    This feature combines with the SQLAlchemy ``naming_convention`` feature
+    to disambiguate constraint names that have already had naming
+    conventions applied to them, versus those that have not.  This is
+    necessary in the case that the ``"%(constraint_name)s"`` token
+    is used within a naming convention, so that it can be identified
+    that this particular name should remain fixed.
+
+    If the :meth:`.Operations.f` is used on a constraint, the naming
+    convention will not take effect::
+
+        op.add_column("t", "x", Boolean(name=op.f("ck_bool_t_x")))
+
+    Above, the CHECK constraint generated will have the name
+    ``ck_bool_t_x`` regardless of whether or not a naming convention is
+    in use.
+
+    Alternatively, if a naming convention is in use, and 'f' is not used,
+    names will be converted along conventions.  If the ``target_metadata``
+    contains the naming convention
+    ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the
+    output of the following:
+
+        op.add_column("t", "x", Boolean(name="x"))
+
+    will be::
+
+        CONSTRAINT ck_bool_t_x CHECK (x in (1, 0)))
+
+    The function is rendered in the output of autogenerate when
+    a particular constraint name is already converted.
+
+    """
+
+def get_bind() -> Connection:
+    """Return the current 'bind'.
+
+    Under normal circumstances, this is the
+    :class:`~sqlalchemy.engine.Connection` currently being used
+    to emit SQL to the database.
+
+    In a SQL script context, this value is ``None``. [TODO: verify this]
+
+    """
+
+def get_context() -> MigrationContext:
+    """Return the :class:`.MigrationContext` object that's
+    currently in use.
+
+    """
+
+def implementation_for(op_cls: Any) -> Callable[[_C], _C]:
+    """Register an implementation for a given :class:`.MigrateOperation`.
+
+    This is part of the operation extensibility API.
+
+    .. seealso::
+
+        :ref:`operation_plugins` - example of use
+
+    """
+
+def inline_literal(
+    value: Union[str, int], type_: Optional[TypeEngine[Any]] = None
+) -> _literal_bindparam:
+    r"""Produce an 'inline literal' expression, suitable for
+    using in an INSERT, UPDATE, or DELETE statement.
+
+    When using Alembic in "offline" mode, CRUD operations
+    aren't compatible with SQLAlchemy's default behavior surrounding
+    literal values,
+    which is that they are converted into bound values and passed
+    separately into the ``execute()`` method of the DBAPI cursor.
+    An offline SQL
+    script needs to have these rendered inline.  While it should
+    always be noted that inline literal values are an **enormous**
+    security hole in an application that handles untrusted input,
+    a schema migration is not run in this context, so
+    literals are safe to render inline, with the caveat that
+    advanced types like dates may not be supported directly
+    by SQLAlchemy.
+
+    See :meth:`.Operations.execute` for an example usage of
+    :meth:`.Operations.inline_literal`.
+
+    The environment can also be configured to attempt to render
+    "literal" values inline automatically, for those simple types
+    that are supported by the dialect; see
+    :paramref:`.EnvironmentContext.configure.literal_binds` for this
+    more recently added feature.
+
+    :param value: The value to render.  Strings, integers, and simple
+     numerics should be supported.   Other types like boolean,
+     dates, etc. may or may not be supported yet by various
+     backends.
+    :param type\_: optional - a :class:`sqlalchemy.types.TypeEngine`
+     subclass stating the type of this value.  In SQLAlchemy
+     expressions, this is usually derived automatically
+     from the Python type of the value itself, as well as
+     based on the context in which the value is used.
+
+    .. seealso::
+
+        :paramref:`.EnvironmentContext.configure.literal_binds`
+
+    """
+
+@overload
+def invoke(operation: CreateTableOp) -> Table: ...
+@overload
+def invoke(
+    operation: Union[
+        AddConstraintOp,
+        DropConstraintOp,
+        CreateIndexOp,
+        DropIndexOp,
+        AddColumnOp,
+        AlterColumnOp,
+        AlterTableOp,
+        CreateTableCommentOp,
+        DropTableCommentOp,
+        DropColumnOp,
+        BulkInsertOp,
+        DropTableOp,
+        ExecuteSQLOp,
+    ]
+) -> None: ...
+@overload
+def invoke(operation: MigrateOperation) -> Any:
+    """Given a :class:`.MigrateOperation`, invoke it in terms of
+    this :class:`.Operations` instance.
+
+    """
+
+def register_operation(
+    name: str, sourcename: Optional[str] = None
+) -> Callable[[Type[_T]], Type[_T]]:
+    """Register a new operation for this class.
+
+    This method is normally used to add new operations
+    to the :class:`.Operations` class, and possibly the
+    :class:`.BatchOperations` class as well.   All Alembic migration
+    operations are implemented via this system, however the system
+    is also available as a public API to facilitate adding custom
+    operations.
+
+    .. seealso::
+
+        :ref:`operation_plugins`
+
+
+    """
+
+def rename_table(
+    old_table_name: str, new_table_name: str, *, schema: Optional[str] = None
+) -> None:
+    """Emit an ALTER TABLE to rename a table.
+
+    :param old_table_name: old name.
+    :param new_table_name: new name.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def run_async(
+    async_function: Callable[..., Awaitable[_T]], *args: Any, **kw_args: Any
+) -> _T:
+    """Invoke the given asynchronous callable, passing an asynchronous
+    :class:`~sqlalchemy.ext.asyncio.AsyncConnection` as the first
+    argument.
+
+    This method allows calling async functions from within the
+    synchronous ``upgrade()`` or ``downgrade()`` alembic migration
+    method.
+
+    The async connection passed to the callable shares the same
+    transaction as the connection running in the migration context.
+
+    Any additional arg or kw_arg passed to this function are passed
+    to the provided async function.
+
+    .. versionadded: 1.11
+
+    .. note::
+
+        This method can be called only when alembic is called using
+        an async dialect.
+    """
diff --git a/.venv/lib/python3.12/site-packages/alembic/operations/__init__.py b/.venv/lib/python3.12/site-packages/alembic/operations/__init__.py
new file mode 100644
index 00000000..26197cbe
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/operations/__init__.py
@@ -0,0 +1,15 @@
+from . import toimpl
+from .base import AbstractOperations
+from .base import BatchOperations
+from .base import Operations
+from .ops import MigrateOperation
+from .ops import MigrationScript
+
+
+__all__ = [
+    "AbstractOperations",
+    "Operations",
+    "BatchOperations",
+    "MigrateOperation",
+    "MigrationScript",
+]
diff --git a/.venv/lib/python3.12/site-packages/alembic/operations/base.py b/.venv/lib/python3.12/site-packages/alembic/operations/base.py
new file mode 100644
index 00000000..456d1c75
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/operations/base.py
@@ -0,0 +1,1906 @@
+# mypy: allow-untyped-calls
+
+from __future__ import annotations
+
+from contextlib import contextmanager
+import re
+import textwrap
+from typing import Any
+from typing import Awaitable
+from typing import Callable
+from typing import Dict
+from typing import Iterator
+from typing import List  # noqa
+from typing import Mapping
+from typing import NoReturn
+from typing import Optional
+from typing import overload
+from typing import Sequence  # noqa
+from typing import Tuple
+from typing import Type  # noqa
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy.sql.elements import conv
+
+from . import batch
+from . import schemaobj
+from .. import util
+from ..util import sqla_compat
+from ..util.compat import formatannotation_fwdref
+from ..util.compat import inspect_formatargspec
+from ..util.compat import inspect_getfullargspec
+from ..util.sqla_compat import _literal_bindparam
+
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy import Table
+    from sqlalchemy.engine import Connection
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.expression import ColumnElement
+    from sqlalchemy.sql.expression import TableClause
+    from sqlalchemy.sql.expression import TextClause
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Computed
+    from sqlalchemy.sql.schema import Identity
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.types import TypeEngine
+
+    from .batch import BatchOperationsImpl
+    from .ops import AddColumnOp
+    from .ops import AddConstraintOp
+    from .ops import AlterColumnOp
+    from .ops import AlterTableOp
+    from .ops import BulkInsertOp
+    from .ops import CreateIndexOp
+    from .ops import CreateTableCommentOp
+    from .ops import CreateTableOp
+    from .ops import DropColumnOp
+    from .ops import DropConstraintOp
+    from .ops import DropIndexOp
+    from .ops import DropTableCommentOp
+    from .ops import DropTableOp
+    from .ops import ExecuteSQLOp
+    from .ops import MigrateOperation
+    from ..ddl import DefaultImpl
+    from ..runtime.migration import MigrationContext
+__all__ = ("Operations", "BatchOperations")
+_T = TypeVar("_T")
+
+_C = TypeVar("_C", bound=Callable[..., Any])
+
+
+class AbstractOperations(util.ModuleClsProxy):
+    """Base class for Operations and BatchOperations.
+
+    .. versionadded:: 1.11.0
+
+    """
+
+    impl: Union[DefaultImpl, BatchOperationsImpl]
+    _to_impl = util.Dispatcher()
+
+    def __init__(
+        self,
+        migration_context: MigrationContext,
+        impl: Optional[BatchOperationsImpl] = None,
+    ) -> None:
+        """Construct a new :class:`.Operations`
+
+        :param migration_context: a :class:`.MigrationContext`
+         instance.
+
+        """
+        self.migration_context = migration_context
+        if impl is None:
+            self.impl = migration_context.impl
+        else:
+            self.impl = impl
+
+        self.schema_obj = schemaobj.SchemaObjects(migration_context)
+
+    @classmethod
+    def register_operation(
+        cls, name: str, sourcename: Optional[str] = None
+    ) -> Callable[[Type[_T]], Type[_T]]:
+        """Register a new operation for this class.
+
+        This method is normally used to add new operations
+        to the :class:`.Operations` class, and possibly the
+        :class:`.BatchOperations` class as well.   All Alembic migration
+        operations are implemented via this system, however the system
+        is also available as a public API to facilitate adding custom
+        operations.
+
+        .. seealso::
+
+            :ref:`operation_plugins`
+
+
+        """
+
+        def register(op_cls: Type[_T]) -> Type[_T]:
+            if sourcename is None:
+                fn = getattr(op_cls, name)
+                source_name = fn.__name__
+            else:
+                fn = getattr(op_cls, sourcename)
+                source_name = fn.__name__
+
+            spec = inspect_getfullargspec(fn)
+
+            name_args = spec[0]
+            assert name_args[0:2] == ["cls", "operations"]
+
+            name_args[0:2] = ["self"]
+
+            args = inspect_formatargspec(
+                *spec, formatannotation=formatannotation_fwdref
+            )
+            num_defaults = len(spec[3]) if spec[3] else 0
+
+            defaulted_vals: Tuple[Any, ...]
+
+            if num_defaults:
+                defaulted_vals = tuple(name_args[0 - num_defaults :])
+            else:
+                defaulted_vals = ()
+
+            defaulted_vals += tuple(spec[4])
+            # here, we are using formatargspec in a different way in order
+            # to get a string that will re-apply incoming arguments to a new
+            # function call
+
+            apply_kw = inspect_formatargspec(
+                name_args + spec[4],
+                spec[1],
+                spec[2],
+                defaulted_vals,
+                formatvalue=lambda x: "=" + x,
+                formatannotation=formatannotation_fwdref,
+            )
+
+            args = re.sub(
+                r'[_]?ForwardRef\(([\'"].+?[\'"])\)',
+                lambda m: m.group(1),
+                args,
+            )
+
+            func_text = textwrap.dedent(
+                """\
+            def %(name)s%(args)s:
+                %(doc)r
+                return op_cls.%(source_name)s%(apply_kw)s
+            """
+                % {
+                    "name": name,
+                    "source_name": source_name,
+                    "args": args,
+                    "apply_kw": apply_kw,
+                    "doc": fn.__doc__,
+                }
+            )
+
+            globals_ = dict(globals())
+            globals_.update({"op_cls": op_cls})
+            lcl: Dict[str, Any] = {}
+
+            exec(func_text, globals_, lcl)
+            setattr(cls, name, lcl[name])
+            fn.__func__.__doc__ = (
+                "This method is proxied on "
+                "the :class:`.%s` class, via the :meth:`.%s.%s` method."
+                % (cls.__name__, cls.__name__, name)
+            )
+            if hasattr(fn, "_legacy_translations"):
+                lcl[name]._legacy_translations = fn._legacy_translations
+            return op_cls
+
+        return register
+
+    @classmethod
+    def implementation_for(cls, op_cls: Any) -> Callable[[_C], _C]:
+        """Register an implementation for a given :class:`.MigrateOperation`.
+
+        This is part of the operation extensibility API.
+
+        .. seealso::
+
+            :ref:`operation_plugins` - example of use
+
+        """
+
+        def decorate(fn: _C) -> _C:
+            cls._to_impl.dispatch_for(op_cls)(fn)
+            return fn
+
+        return decorate
+
+    @classmethod
+    @contextmanager
+    def context(
+        cls, migration_context: MigrationContext
+    ) -> Iterator[Operations]:
+        op = Operations(migration_context)
+        op._install_proxy()
+        yield op
+        op._remove_proxy()
+
+    @contextmanager
+    def batch_alter_table(
+        self,
+        table_name: str,
+        schema: Optional[str] = None,
+        recreate: Literal["auto", "always", "never"] = "auto",
+        partial_reordering: Optional[Tuple[Any, ...]] = None,
+        copy_from: Optional[Table] = None,
+        table_args: Tuple[Any, ...] = (),
+        table_kwargs: Mapping[str, Any] = util.immutabledict(),
+        reflect_args: Tuple[Any, ...] = (),
+        reflect_kwargs: Mapping[str, Any] = util.immutabledict(),
+        naming_convention: Optional[Dict[str, str]] = None,
+    ) -> Iterator[BatchOperations]:
+        """Invoke a series of per-table migrations in batch.
+
+        Batch mode allows a series of operations specific to a table
+        to be syntactically grouped together, and allows for alternate
+        modes of table migration, in particular the "recreate" style of
+        migration required by SQLite.
+
+        "recreate" style is as follows:
+
+        1. A new table is created with the new specification, based on the
+           migration directives within the batch, using a temporary name.
+
+        2. the data copied from the existing table to the new table.
+
+        3. the existing table is dropped.
+
+        4. the new table is renamed to the existing table name.
+
+        The directive by default will only use "recreate" style on the
+        SQLite backend, and only if directives are present which require
+        this form, e.g. anything other than ``add_column()``.   The batch
+        operation on other backends will proceed using standard ALTER TABLE
+        operations.
+
+        The method is used as a context manager, which returns an instance
+        of :class:`.BatchOperations`; this object is the same as
+        :class:`.Operations` except that table names and schema names
+        are omitted.  E.g.::
+
+            with op.batch_alter_table("some_table") as batch_op:
+                batch_op.add_column(Column("foo", Integer))
+                batch_op.drop_column("bar")
+
+        The operations within the context manager are invoked at once
+        when the context is ended.   When run against SQLite, if the
+        migrations include operations not supported by SQLite's ALTER TABLE,
+        the entire table will be copied to a new one with the new
+        specification, moving all data across as well.
+
+        The copy operation by default uses reflection to retrieve the current
+        structure of the table, and therefore :meth:`.batch_alter_table`
+        in this mode requires that the migration is run in "online" mode.
+        The ``copy_from`` parameter may be passed which refers to an existing
+        :class:`.Table` object, which will bypass this reflection step.
+
+        .. note::  The table copy operation will currently not copy
+           CHECK constraints, and may not copy UNIQUE constraints that are
+           unnamed, as is possible on SQLite.   See the section
+           :ref:`sqlite_batch_constraints` for workarounds.
+
+        :param table_name: name of table
+        :param schema: optional schema name.
+        :param recreate: under what circumstances the table should be
+         recreated. At its default of ``"auto"``, the SQLite dialect will
+         recreate the table if any operations other than ``add_column()``,
+         ``create_index()``, or ``drop_index()`` are
+         present. Other options include ``"always"`` and ``"never"``.
+        :param copy_from: optional :class:`~sqlalchemy.schema.Table` object
+         that will act as the structure of the table being copied.  If omitted,
+         table reflection is used to retrieve the structure of the table.
+
+         .. seealso::
+
+            :ref:`batch_offline_mode`
+
+            :paramref:`~.Operations.batch_alter_table.reflect_args`
+
+            :paramref:`~.Operations.batch_alter_table.reflect_kwargs`
+
+        :param reflect_args: a sequence of additional positional arguments that
+         will be applied to the table structure being reflected / copied;
+         this may be used to pass column and constraint overrides to the
+         table that will be reflected, in lieu of passing the whole
+         :class:`~sqlalchemy.schema.Table` using
+         :paramref:`~.Operations.batch_alter_table.copy_from`.
+        :param reflect_kwargs: a dictionary of additional keyword arguments
+         that will be applied to the table structure being copied; this may be
+         used to pass additional table and reflection options to the table that
+         will be reflected, in lieu of passing the whole
+         :class:`~sqlalchemy.schema.Table` using
+         :paramref:`~.Operations.batch_alter_table.copy_from`.
+        :param table_args: a sequence of additional positional arguments that
+         will be applied to the new :class:`~sqlalchemy.schema.Table` when
+         created, in addition to those copied from the source table.
+         This may be used to provide additional constraints such as CHECK
+         constraints that may not be reflected.
+        :param table_kwargs: a dictionary of additional keyword arguments
+         that will be applied to the new :class:`~sqlalchemy.schema.Table`
+         when created, in addition to those copied from the source table.
+         This may be used to provide for additional table options that may
+         not be reflected.
+        :param naming_convention: a naming convention dictionary of the form
+         described at :ref:`autogen_naming_conventions` which will be applied
+         to the :class:`~sqlalchemy.schema.MetaData` during the reflection
+         process.  This is typically required if one wants to drop SQLite
+         constraints, as these constraints will not have names when
+         reflected on this backend.  Requires SQLAlchemy **0.9.4** or greater.
+
+         .. seealso::
+
+            :ref:`dropping_sqlite_foreign_keys`
+
+        :param partial_reordering: a list of tuples, each suggesting a desired
+         ordering of two or more columns in the newly created table.  Requires
+         that :paramref:`.batch_alter_table.recreate` is set to ``"always"``.
+         Examples, given a table with columns "a", "b", "c", and "d":
+
+         Specify the order of all columns::
+
+            with op.batch_alter_table(
+                "some_table",
+                recreate="always",
+                partial_reordering=[("c", "d", "a", "b")],
+            ) as batch_op:
+                pass
+
+         Ensure "d" appears before "c", and "b", appears before "a"::
+
+            with op.batch_alter_table(
+                "some_table",
+                recreate="always",
+                partial_reordering=[("d", "c"), ("b", "a")],
+            ) as batch_op:
+                pass
+
+         The ordering of columns not included in the partial_reordering
+         set is undefined.   Therefore it is best to specify the complete
+         ordering of all columns for best results.
+
+        .. note:: batch mode requires SQLAlchemy 0.8 or above.
+
+        .. seealso::
+
+            :ref:`batch_migrations`
+
+        """
+        impl = batch.BatchOperationsImpl(
+            self,
+            table_name,
+            schema,
+            recreate,
+            copy_from,
+            table_args,
+            table_kwargs,
+            reflect_args,
+            reflect_kwargs,
+            naming_convention,
+            partial_reordering,
+        )
+        batch_op = BatchOperations(self.migration_context, impl=impl)
+        yield batch_op
+        impl.flush()
+
+    def get_context(self) -> MigrationContext:
+        """Return the :class:`.MigrationContext` object that's
+        currently in use.
+
+        """
+
+        return self.migration_context
+
+    @overload
+    def invoke(self, operation: CreateTableOp) -> Table: ...
+
+    @overload
+    def invoke(
+        self,
+        operation: Union[
+            AddConstraintOp,
+            DropConstraintOp,
+            CreateIndexOp,
+            DropIndexOp,
+            AddColumnOp,
+            AlterColumnOp,
+            AlterTableOp,
+            CreateTableCommentOp,
+            DropTableCommentOp,
+            DropColumnOp,
+            BulkInsertOp,
+            DropTableOp,
+            ExecuteSQLOp,
+        ],
+    ) -> None: ...
+
+    @overload
+    def invoke(self, operation: MigrateOperation) -> Any: ...
+
+    def invoke(self, operation: MigrateOperation) -> Any:
+        """Given a :class:`.MigrateOperation`, invoke it in terms of
+        this :class:`.Operations` instance.
+
+        """
+        fn = self._to_impl.dispatch(
+            operation, self.migration_context.impl.__dialect__
+        )
+        return fn(self, operation)
+
+    def f(self, name: str) -> conv:
+        """Indicate a string name that has already had a naming convention
+        applied to it.
+
+        This feature combines with the SQLAlchemy ``naming_convention`` feature
+        to disambiguate constraint names that have already had naming
+        conventions applied to them, versus those that have not.  This is
+        necessary in the case that the ``"%(constraint_name)s"`` token
+        is used within a naming convention, so that it can be identified
+        that this particular name should remain fixed.
+
+        If the :meth:`.Operations.f` is used on a constraint, the naming
+        convention will not take effect::
+
+            op.add_column("t", "x", Boolean(name=op.f("ck_bool_t_x")))
+
+        Above, the CHECK constraint generated will have the name
+        ``ck_bool_t_x`` regardless of whether or not a naming convention is
+        in use.
+
+        Alternatively, if a naming convention is in use, and 'f' is not used,
+        names will be converted along conventions.  If the ``target_metadata``
+        contains the naming convention
+        ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the
+        output of the following:
+
+            op.add_column("t", "x", Boolean(name="x"))
+
+        will be::
+
+            CONSTRAINT ck_bool_t_x CHECK (x in (1, 0)))
+
+        The function is rendered in the output of autogenerate when
+        a particular constraint name is already converted.
+
+        """
+        return conv(name)
+
+    def inline_literal(
+        self, value: Union[str, int], type_: Optional[TypeEngine[Any]] = None
+    ) -> _literal_bindparam:
+        r"""Produce an 'inline literal' expression, suitable for
+        using in an INSERT, UPDATE, or DELETE statement.
+
+        When using Alembic in "offline" mode, CRUD operations
+        aren't compatible with SQLAlchemy's default behavior surrounding
+        literal values,
+        which is that they are converted into bound values and passed
+        separately into the ``execute()`` method of the DBAPI cursor.
+        An offline SQL
+        script needs to have these rendered inline.  While it should
+        always be noted that inline literal values are an **enormous**
+        security hole in an application that handles untrusted input,
+        a schema migration is not run in this context, so
+        literals are safe to render inline, with the caveat that
+        advanced types like dates may not be supported directly
+        by SQLAlchemy.
+
+        See :meth:`.Operations.execute` for an example usage of
+        :meth:`.Operations.inline_literal`.
+
+        The environment can also be configured to attempt to render
+        "literal" values inline automatically, for those simple types
+        that are supported by the dialect; see
+        :paramref:`.EnvironmentContext.configure.literal_binds` for this
+        more recently added feature.
+
+        :param value: The value to render.  Strings, integers, and simple
+         numerics should be supported.   Other types like boolean,
+         dates, etc. may or may not be supported yet by various
+         backends.
+        :param type\_: optional - a :class:`sqlalchemy.types.TypeEngine`
+         subclass stating the type of this value.  In SQLAlchemy
+         expressions, this is usually derived automatically
+         from the Python type of the value itself, as well as
+         based on the context in which the value is used.
+
+        .. seealso::
+
+            :paramref:`.EnvironmentContext.configure.literal_binds`
+
+        """
+        return sqla_compat._literal_bindparam(None, value, type_=type_)
+
+    def get_bind(self) -> Connection:
+        """Return the current 'bind'.
+
+        Under normal circumstances, this is the
+        :class:`~sqlalchemy.engine.Connection` currently being used
+        to emit SQL to the database.
+
+        In a SQL script context, this value is ``None``. [TODO: verify this]
+
+        """
+        return self.migration_context.impl.bind  # type: ignore[return-value]
+
+    def run_async(
+        self,
+        async_function: Callable[..., Awaitable[_T]],
+        *args: Any,
+        **kw_args: Any,
+    ) -> _T:
+        """Invoke the given asynchronous callable, passing an asynchronous
+        :class:`~sqlalchemy.ext.asyncio.AsyncConnection` as the first
+        argument.
+
+        This method allows calling async functions from within the
+        synchronous ``upgrade()`` or ``downgrade()`` alembic migration
+        method.
+
+        The async connection passed to the callable shares the same
+        transaction as the connection running in the migration context.
+
+        Any additional arg or kw_arg passed to this function are passed
+        to the provided async function.
+
+        .. versionadded: 1.11
+
+        .. note::
+
+            This method can be called only when alembic is called using
+            an async dialect.
+        """
+        if not sqla_compat.sqla_14_18:
+            raise NotImplementedError("SQLAlchemy 1.4.18+ required")
+        sync_conn = self.get_bind()
+        if sync_conn is None:
+            raise NotImplementedError("Cannot call run_async in SQL mode")
+        if not sync_conn.dialect.is_async:
+            raise ValueError("Cannot call run_async with a sync engine")
+        from sqlalchemy.ext.asyncio import AsyncConnection
+        from sqlalchemy.util import await_only
+
+        async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
+        return await_only(async_function(async_conn, *args, **kw_args))
+
+
+class Operations(AbstractOperations):
+    """Define high level migration operations.
+
+    Each operation corresponds to some schema migration operation,
+    executed against a particular :class:`.MigrationContext`
+    which in turn represents connectivity to a database,
+    or a file output stream.
+
+    While :class:`.Operations` is normally configured as
+    part of the :meth:`.EnvironmentContext.run_migrations`
+    method called from an ``env.py`` script, a standalone
+    :class:`.Operations` instance can be
+    made for use cases external to regular Alembic
+    migrations by passing in a :class:`.MigrationContext`::
+
+        from alembic.migration import MigrationContext
+        from alembic.operations import Operations
+
+        conn = myengine.connect()
+        ctx = MigrationContext.configure(conn)
+        op = Operations(ctx)
+
+        op.alter_column("t", "c", nullable=True)
+
+    Note that as of 0.8, most of the methods on this class are produced
+    dynamically using the :meth:`.Operations.register_operation`
+    method.
+
+    """
+
+    if TYPE_CHECKING:
+        # START STUB FUNCTIONS: op_cls
+        # ### the following stubs are generated by tools/write_pyi.py ###
+        # ### do not edit ###
+
+        def add_column(
+            self,
+            table_name: str,
+            column: Column[Any],
+            *,
+            schema: Optional[str] = None,
+        ) -> None:
+            """Issue an "add column" instruction using the current
+            migration context.
+
+            e.g.::
+
+                from alembic import op
+                from sqlalchemy import Column, String
+
+                op.add_column("organization", Column("name", String()))
+
+            The :meth:`.Operations.add_column` method typically corresponds
+            to the SQL command "ALTER TABLE... ADD COLUMN".    Within the scope
+            of this command, the column's name, datatype, nullability,
+            and optional server-generated defaults may be indicated.
+
+            .. note::
+
+                With the exception of NOT NULL constraints or single-column FOREIGN
+                KEY constraints, other kinds of constraints such as PRIMARY KEY,
+                UNIQUE or CHECK constraints **cannot** be generated using this
+                method; for these constraints, refer to operations such as
+                :meth:`.Operations.create_primary_key` and
+                :meth:`.Operations.create_check_constraint`. In particular, the
+                following :class:`~sqlalchemy.schema.Column` parameters are
+                **ignored**:
+
+                * :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases
+                  typically do not support an ALTER operation that can add
+                  individual columns one at a time to an existing primary key
+                  constraint, therefore it's less ambiguous to use the
+                  :meth:`.Operations.create_primary_key` method, which assumes no
+                  existing primary key constraint is present.
+                * :paramref:`~sqlalchemy.schema.Column.unique` - use the
+                  :meth:`.Operations.create_unique_constraint` method
+                * :paramref:`~sqlalchemy.schema.Column.index` - use the
+                  :meth:`.Operations.create_index` method
+
+
+            The provided :class:`~sqlalchemy.schema.Column` object may include a
+            :class:`~sqlalchemy.schema.ForeignKey` constraint directive,
+            referencing a remote table name. For this specific type of constraint,
+            Alembic will automatically emit a second ALTER statement in order to
+            add the single-column FOREIGN KEY constraint separately::
+
+                from alembic import op
+                from sqlalchemy import Column, INTEGER, ForeignKey
+
+                op.add_column(
+                    "organization",
+                    Column("account_id", INTEGER, ForeignKey("accounts.id")),
+                )
+
+            The column argument passed to :meth:`.Operations.add_column` is a
+            :class:`~sqlalchemy.schema.Column` construct, used in the same way it's
+            used in SQLAlchemy. In particular, values or functions to be indicated
+            as producing the column's default value on the database side are
+            specified using the ``server_default`` parameter, and not ``default``
+            which only specifies Python-side defaults::
+
+                from alembic import op
+                from sqlalchemy import Column, TIMESTAMP, func
+
+                # specify "DEFAULT NOW" along with the column add
+                op.add_column(
+                    "account",
+                    Column("timestamp", TIMESTAMP, server_default=func.now()),
+                )
+
+            :param table_name: String name of the parent table.
+            :param column: a :class:`sqlalchemy.schema.Column` object
+             representing the new column.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        def alter_column(
+            self,
+            table_name: str,
+            column_name: str,
+            *,
+            nullable: Optional[bool] = None,
+            comment: Union[str, Literal[False], None] = False,
+            server_default: Any = False,
+            new_column_name: Optional[str] = None,
+            type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None,
+            existing_type: Union[
+                TypeEngine[Any], Type[TypeEngine[Any]], None
+            ] = None,
+            existing_server_default: Union[
+                str, bool, Identity, Computed, None
+            ] = False,
+            existing_nullable: Optional[bool] = None,
+            existing_comment: Optional[str] = None,
+            schema: Optional[str] = None,
+            **kw: Any,
+        ) -> None:
+            r"""Issue an "alter column" instruction using the
+            current migration context.
+
+            Generally, only that aspect of the column which
+            is being changed, i.e. name, type, nullability,
+            default, needs to be specified.  Multiple changes
+            can also be specified at once and the backend should
+            "do the right thing", emitting each change either
+            separately or together as the backend allows.
+
+            MySQL has special requirements here, since MySQL
+            cannot ALTER a column without a full specification.
+            When producing MySQL-compatible migration files,
+            it is recommended that the ``existing_type``,
+            ``existing_server_default``, and ``existing_nullable``
+            parameters be present, if not being altered.
+
+            Type changes which are against the SQLAlchemy
+            "schema" types :class:`~sqlalchemy.types.Boolean`
+            and  :class:`~sqlalchemy.types.Enum` may also
+            add or drop constraints which accompany those
+            types on backends that don't support them natively.
+            The ``existing_type`` argument is
+            used in this case to identify and remove a previous
+            constraint that was bound to the type object.
+
+            :param table_name: string name of the target table.
+            :param column_name: string name of the target column,
+             as it exists before the operation begins.
+            :param nullable: Optional; specify ``True`` or ``False``
+             to alter the column's nullability.
+            :param server_default: Optional; specify a string
+             SQL expression, :func:`~sqlalchemy.sql.expression.text`,
+             or :class:`~sqlalchemy.schema.DefaultClause` to indicate
+             an alteration to the column's default value.
+             Set to ``None`` to have the default removed.
+            :param comment: optional string text of a new comment to add to the
+             column.
+            :param new_column_name: Optional; specify a string name here to
+             indicate the new name within a column rename operation.
+            :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine`
+             type object to specify a change to the column's type.
+             For SQLAlchemy types that also indicate a constraint (i.e.
+             :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
+             the constraint is also generated.
+            :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
+             currently understood by the MySQL dialect.
+            :param existing_type: Optional; a
+             :class:`~sqlalchemy.types.TypeEngine`
+             type object to specify the previous type.   This
+             is required for all MySQL column alter operations that
+             don't otherwise specify a new type, as well as for
+             when nullability is being changed on a SQL Server
+             column.  It is also used if the type is a so-called
+             SQLAlchemy "schema" type which may define a constraint (i.e.
+             :class:`~sqlalchemy.types.Boolean`,
+             :class:`~sqlalchemy.types.Enum`),
+             so that the constraint can be dropped.
+            :param existing_server_default: Optional; The existing
+             default value of the column.   Required on MySQL if
+             an existing default is not being changed; else MySQL
+             removes the default.
+            :param existing_nullable: Optional; the existing nullability
+             of the column.  Required on MySQL if the existing nullability
+             is not being changed; else MySQL sets this to NULL.
+            :param existing_autoincrement: Optional; the existing autoincrement
+             of the column.  Used for MySQL's system of altering a column
+             that specifies ``AUTO_INCREMENT``.
+            :param existing_comment: string text of the existing comment on the
+             column to be maintained.  Required on MySQL if the existing comment
+             on the column is not being changed.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+            :param postgresql_using: String argument which will indicate a
+             SQL expression to render within the Postgresql-specific USING clause
+             within ALTER COLUMN.    This string is taken directly as raw SQL which
+             must explicitly include any necessary quoting or escaping of tokens
+             within the expression.
+
+            """  # noqa: E501
+            ...
+
+        def bulk_insert(
+            self,
+            table: Union[Table, TableClause],
+            rows: List[Dict[str, Any]],
+            *,
+            multiinsert: bool = True,
+        ) -> None:
+            """Issue a "bulk insert" operation using the current
+            migration context.
+
+            This provides a means of representing an INSERT of multiple rows
+            which works equally well in the context of executing on a live
+            connection as well as that of generating a SQL script.   In the
+            case of a SQL script, the values are rendered inline into the
+            statement.
+
+            e.g.::
+
+                from alembic import op
+                from datetime import date
+                from sqlalchemy.sql import table, column
+                from sqlalchemy import String, Integer, Date
+
+                # Create an ad-hoc table to use for the insert statement.
+                accounts_table = table(
+                    "account",
+                    column("id", Integer),
+                    column("name", String),
+                    column("create_date", Date),
+                )
+
+                op.bulk_insert(
+                    accounts_table,
+                    [
+                        {
+                            "id": 1,
+                            "name": "John Smith",
+                            "create_date": date(2010, 10, 5),
+                        },
+                        {
+                            "id": 2,
+                            "name": "Ed Williams",
+                            "create_date": date(2007, 5, 27),
+                        },
+                        {
+                            "id": 3,
+                            "name": "Wendy Jones",
+                            "create_date": date(2008, 8, 15),
+                        },
+                    ],
+                )
+
+            When using --sql mode, some datatypes may not render inline
+            automatically, such as dates and other special types.   When this
+            issue is present, :meth:`.Operations.inline_literal` may be used::
+
+                op.bulk_insert(
+                    accounts_table,
+                    [
+                        {
+                            "id": 1,
+                            "name": "John Smith",
+                            "create_date": op.inline_literal("2010-10-05"),
+                        },
+                        {
+                            "id": 2,
+                            "name": "Ed Williams",
+                            "create_date": op.inline_literal("2007-05-27"),
+                        },
+                        {
+                            "id": 3,
+                            "name": "Wendy Jones",
+                            "create_date": op.inline_literal("2008-08-15"),
+                        },
+                    ],
+                    multiinsert=False,
+                )
+
+            When using :meth:`.Operations.inline_literal` in conjunction with
+            :meth:`.Operations.bulk_insert`, in order for the statement to work
+            in "online" (e.g. non --sql) mode, the
+            :paramref:`~.Operations.bulk_insert.multiinsert`
+            flag should be set to ``False``, which will have the effect of
+            individual INSERT statements being emitted to the database, each
+            with a distinct VALUES clause, so that the "inline" values can
+            still be rendered, rather than attempting to pass the values
+            as bound parameters.
+
+            :param table: a table object which represents the target of the INSERT.
+
+            :param rows: a list of dictionaries indicating rows.
+
+            :param multiinsert: when at its default of True and --sql mode is not
+               enabled, the INSERT statement will be executed using
+               "executemany()" style, where all elements in the list of
+               dictionaries are passed as bound parameters in a single
+               list.   Setting this to False results in individual INSERT
+               statements being emitted per parameter set, and is needed
+               in those cases where non-literal values are present in the
+               parameter sets.
+
+            """  # noqa: E501
+            ...
+
+        def create_check_constraint(
+            self,
+            constraint_name: Optional[str],
+            table_name: str,
+            condition: Union[str, ColumnElement[bool], TextClause],
+            *,
+            schema: Optional[str] = None,
+            **kw: Any,
+        ) -> None:
+            """Issue a "create check constraint" instruction using the
+            current migration context.
+
+            e.g.::
+
+                from alembic import op
+                from sqlalchemy.sql import column, func
+
+                op.create_check_constraint(
+                    "ck_user_name_len",
+                    "user",
+                    func.len(column("name")) > 5,
+                )
+
+            CHECK constraints are usually against a SQL expression, so ad-hoc
+            table metadata is usually needed.   The function will convert the given
+            arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
+            to an anonymous table in order to emit the CREATE statement.
+
+            :param name: Name of the check constraint.  The name is necessary
+             so that an ALTER statement can be emitted.  For setups that
+             use an automated naming scheme such as that described at
+             :ref:`sqla:constraint_naming_conventions`,
+             ``name`` here can be ``None``, as the event listener will
+             apply the name to the constraint object when it is associated
+             with the table.
+            :param table_name: String name of the source table.
+            :param condition: SQL expression that's the condition of the
+             constraint. Can be a string or SQLAlchemy expression language
+             structure.
+            :param deferrable: optional bool. If set, emit DEFERRABLE or
+             NOT DEFERRABLE when issuing DDL for this constraint.
+            :param initially: optional string. If set, emit INITIALLY <value>
+             when issuing DDL for this constraint.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        def create_exclude_constraint(
+            self,
+            constraint_name: str,
+            table_name: str,
+            *elements: Any,
+            **kw: Any,
+        ) -> Optional[Table]:
+            """Issue an alter to create an EXCLUDE constraint using the
+            current migration context.
+
+            .. note::  This method is Postgresql specific, and additionally
+               requires at least SQLAlchemy 1.0.
+
+            e.g.::
+
+                from alembic import op
+
+                op.create_exclude_constraint(
+                    "user_excl",
+                    "user",
+                    ("period", "&&"),
+                    ("group", "="),
+                    where=("group != 'some group'"),
+                )
+
+            Note that the expressions work the same way as that of
+            the ``ExcludeConstraint`` object itself; if plain strings are
+            passed, quoting rules must be applied manually.
+
+            :param name: Name of the constraint.
+            :param table_name: String name of the source table.
+            :param elements: exclude conditions.
+            :param where: SQL expression or SQL string with optional WHERE
+             clause.
+            :param deferrable: optional bool. If set, emit DEFERRABLE or
+             NOT DEFERRABLE when issuing DDL for this constraint.
+            :param initially: optional string. If set, emit INITIALLY <value>
+             when issuing DDL for this constraint.
+            :param schema: Optional schema name to operate within.
+
+            """  # noqa: E501
+            ...
+
+        def create_foreign_key(
+            self,
+            constraint_name: Optional[str],
+            source_table: str,
+            referent_table: str,
+            local_cols: List[str],
+            remote_cols: List[str],
+            *,
+            onupdate: Optional[str] = None,
+            ondelete: Optional[str] = None,
+            deferrable: Optional[bool] = None,
+            initially: Optional[str] = None,
+            match: Optional[str] = None,
+            source_schema: Optional[str] = None,
+            referent_schema: Optional[str] = None,
+            **dialect_kw: Any,
+        ) -> None:
+            """Issue a "create foreign key" instruction using the
+            current migration context.
+
+            e.g.::
+
+                from alembic import op
+
+                op.create_foreign_key(
+                    "fk_user_address",
+                    "address",
+                    "user",
+                    ["user_id"],
+                    ["id"],
+                )
+
+            This internally generates a :class:`~sqlalchemy.schema.Table` object
+            containing the necessary columns, then generates a new
+            :class:`~sqlalchemy.schema.ForeignKeyConstraint`
+            object which it then associates with the
+            :class:`~sqlalchemy.schema.Table`.
+            Any event listeners associated with this action will be fired
+            off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+            construct is ultimately used to generate the ALTER statement.
+
+            :param constraint_name: Name of the foreign key constraint.  The name
+             is necessary so that an ALTER statement can be emitted.  For setups
+             that use an automated naming scheme such as that described at
+             :ref:`sqla:constraint_naming_conventions`,
+             ``name`` here can be ``None``, as the event listener will
+             apply the name to the constraint object when it is associated
+             with the table.
+            :param source_table: String name of the source table.
+            :param referent_table: String name of the destination table.
+            :param local_cols: a list of string column names in the
+             source table.
+            :param remote_cols: a list of string column names in the
+             remote table.
+            :param onupdate: Optional string. If set, emit ON UPDATE <value> when
+             issuing DDL for this constraint. Typical values include CASCADE,
+             DELETE and RESTRICT.
+            :param ondelete: Optional string. If set, emit ON DELETE <value> when
+             issuing DDL for this constraint. Typical values include CASCADE,
+             DELETE and RESTRICT.
+            :param deferrable: optional bool. If set, emit DEFERRABLE or NOT
+             DEFERRABLE when issuing DDL for this constraint.
+            :param source_schema: Optional schema name of the source table.
+            :param referent_schema: Optional schema name of the destination table.
+
+            """  # noqa: E501
+            ...
+
+        def create_index(
+            self,
+            index_name: Optional[str],
+            table_name: str,
+            columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
+            *,
+            schema: Optional[str] = None,
+            unique: bool = False,
+            if_not_exists: Optional[bool] = None,
+            **kw: Any,
+        ) -> None:
+            r"""Issue a "create index" instruction using the current
+            migration context.
+
+            e.g.::
+
+                from alembic import op
+
+                op.create_index("ik_test", "t1", ["foo", "bar"])
+
+            Functional indexes can be produced by using the
+            :func:`sqlalchemy.sql.expression.text` construct::
+
+                from alembic import op
+                from sqlalchemy import text
+
+                op.create_index("ik_test", "t1", [text("lower(foo)")])
+
+            :param index_name: name of the index.
+            :param table_name: name of the owning table.
+            :param columns: a list consisting of string column names and/or
+             :func:`~sqlalchemy.sql.expression.text` constructs.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+            :param unique: If True, create a unique index.
+
+            :param quote: Force quoting of this column's name on or off,
+             corresponding to ``True`` or ``False``. When left at its default
+             of ``None``, the column identifier will be quoted according to
+             whether the name is case sensitive (identifiers with at least one
+             upper case character are treated as case sensitive), or if it's a
+             reserved word. This flag is only needed to force quoting of a
+             reserved word which is not known by the SQLAlchemy dialect.
+
+            :param if_not_exists: If True, adds IF NOT EXISTS operator when
+             creating the new index.
+
+             .. versionadded:: 1.12.0
+
+            :param \**kw: Additional keyword arguments not mentioned above are
+             dialect specific, and passed in the form
+             ``<dialectname>_<argname>``.
+             See the documentation regarding an individual dialect at
+             :ref:`dialect_toplevel` for detail on documented arguments.
+
+            """  # noqa: E501
+            ...
+
+        def create_primary_key(
+            self,
+            constraint_name: Optional[str],
+            table_name: str,
+            columns: List[str],
+            *,
+            schema: Optional[str] = None,
+        ) -> None:
+            """Issue a "create primary key" instruction using the current
+            migration context.
+
+            e.g.::
+
+                from alembic import op
+
+                op.create_primary_key("pk_my_table", "my_table", ["id", "version"])
+
+            This internally generates a :class:`~sqlalchemy.schema.Table` object
+            containing the necessary columns, then generates a new
+            :class:`~sqlalchemy.schema.PrimaryKeyConstraint`
+            object which it then associates with the
+            :class:`~sqlalchemy.schema.Table`.
+            Any event listeners associated with this action will be fired
+            off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+            construct is ultimately used to generate the ALTER statement.
+
+            :param constraint_name: Name of the primary key constraint.  The name
+             is necessary so that an ALTER statement can be emitted.  For setups
+             that use an automated naming scheme such as that described at
+             :ref:`sqla:constraint_naming_conventions`
+             ``name`` here can be ``None``, as the event listener will
+             apply the name to the constraint object when it is associated
+             with the table.
+            :param table_name: String name of the target table.
+            :param columns: a list of string column names to be applied to the
+             primary key constraint.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        def create_table(
+            self,
+            table_name: str,
+            *columns: SchemaItem,
+            if_not_exists: Optional[bool] = None,
+            **kw: Any,
+        ) -> Table:
+            r"""Issue a "create table" instruction using the current migration
+            context.
+
+            This directive receives an argument list similar to that of the
+            traditional :class:`sqlalchemy.schema.Table` construct, but without the
+            metadata::
+
+                from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+                from alembic import op
+
+                op.create_table(
+                    "account",
+                    Column("id", INTEGER, primary_key=True),
+                    Column("name", VARCHAR(50), nullable=False),
+                    Column("description", NVARCHAR(200)),
+                    Column("timestamp", TIMESTAMP, server_default=func.now()),
+                )
+
+            Note that :meth:`.create_table` accepts
+            :class:`~sqlalchemy.schema.Column`
+            constructs directly from the SQLAlchemy library.  In particular,
+            default values to be created on the database side are
+            specified using the ``server_default`` parameter, and not
+            ``default`` which only specifies Python-side defaults::
+
+                from alembic import op
+                from sqlalchemy import Column, TIMESTAMP, func
+
+                # specify "DEFAULT NOW" along with the "timestamp" column
+                op.create_table(
+                    "account",
+                    Column("id", INTEGER, primary_key=True),
+                    Column("timestamp", TIMESTAMP, server_default=func.now()),
+                )
+
+            The function also returns a newly created
+            :class:`~sqlalchemy.schema.Table` object, corresponding to the table
+            specification given, which is suitable for
+            immediate SQL operations, in particular
+            :meth:`.Operations.bulk_insert`::
+
+                from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+                from alembic import op
+
+                account_table = op.create_table(
+                    "account",
+                    Column("id", INTEGER, primary_key=True),
+                    Column("name", VARCHAR(50), nullable=False),
+                    Column("description", NVARCHAR(200)),
+                    Column("timestamp", TIMESTAMP, server_default=func.now()),
+                )
+
+                op.bulk_insert(
+                    account_table,
+                    [
+                        {"name": "A1", "description": "account 1"},
+                        {"name": "A2", "description": "account 2"},
+                    ],
+                )
+
+            :param table_name: Name of the table
+            :param \*columns: collection of :class:`~sqlalchemy.schema.Column`
+             objects within
+             the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
+             objects
+             and :class:`~.sqlalchemy.schema.Index` objects.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+            :param if_not_exists: If True, adds IF NOT EXISTS operator when
+             creating the new table.
+
+             .. versionadded:: 1.13.3
+            :param \**kw: Other keyword arguments are passed to the underlying
+             :class:`sqlalchemy.schema.Table` object created for the command.
+
+            :return: the :class:`~sqlalchemy.schema.Table` object corresponding
+             to the parameters given.
+
+            """  # noqa: E501
+            ...
+
+        def create_table_comment(
+            self,
+            table_name: str,
+            comment: Optional[str],
+            *,
+            existing_comment: Optional[str] = None,
+            schema: Optional[str] = None,
+        ) -> None:
+            """Emit a COMMENT ON operation to set the comment for a table.
+
+            :param table_name: string name of the target table.
+            :param comment: string value of the comment being registered against
+             the specified table.
+            :param existing_comment: String value of a comment
+             already registered on the specified table, used within autogenerate
+             so that the operation is reversible, but not required for direct
+             use.
+
+            .. seealso::
+
+                :meth:`.Operations.drop_table_comment`
+
+                :paramref:`.Operations.alter_column.comment`
+
+            """  # noqa: E501
+            ...
+
+        def create_unique_constraint(
+            self,
+            constraint_name: Optional[str],
+            table_name: str,
+            columns: Sequence[str],
+            *,
+            schema: Optional[str] = None,
+            **kw: Any,
+        ) -> Any:
+            """Issue a "create unique constraint" instruction using the
+            current migration context.
+
+            e.g.::
+
+                from alembic import op
+                op.create_unique_constraint("uq_user_name", "user", ["name"])
+
+            This internally generates a :class:`~sqlalchemy.schema.Table` object
+            containing the necessary columns, then generates a new
+            :class:`~sqlalchemy.schema.UniqueConstraint`
+            object which it then associates with the
+            :class:`~sqlalchemy.schema.Table`.
+            Any event listeners associated with this action will be fired
+            off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+            construct is ultimately used to generate the ALTER statement.
+
+            :param name: Name of the unique constraint.  The name is necessary
+             so that an ALTER statement can be emitted.  For setups that
+             use an automated naming scheme such as that described at
+             :ref:`sqla:constraint_naming_conventions`,
+             ``name`` here can be ``None``, as the event listener will
+             apply the name to the constraint object when it is associated
+             with the table.
+            :param table_name: String name of the source table.
+            :param columns: a list of string column names in the
+             source table.
+            :param deferrable: optional bool. If set, emit DEFERRABLE or
+             NOT DEFERRABLE when issuing DDL for this constraint.
+            :param initially: optional string. If set, emit INITIALLY <value>
+             when issuing DDL for this constraint.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        def drop_column(
+            self,
+            table_name: str,
+            column_name: str,
+            *,
+            schema: Optional[str] = None,
+            **kw: Any,
+        ) -> None:
+            """Issue a "drop column" instruction using the current
+            migration context.
+
+            e.g.::
+
+                drop_column("organization", "account_id")
+
+            :param table_name: name of table
+            :param column_name: name of column
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+            :param mssql_drop_check: Optional boolean.  When ``True``, on
+             Microsoft SQL Server only, first
+             drop the CHECK constraint on the column using a
+             SQL-script-compatible
+             block that selects into a @variable from sys.check_constraints,
+             then exec's a separate DROP CONSTRAINT for that constraint.
+            :param mssql_drop_default: Optional boolean.  When ``True``, on
+             Microsoft SQL Server only, first
+             drop the DEFAULT constraint on the column using a
+             SQL-script-compatible
+             block that selects into a @variable from sys.default_constraints,
+             then exec's a separate DROP CONSTRAINT for that default.
+            :param mssql_drop_foreign_key: Optional boolean.  When ``True``, on
+             Microsoft SQL Server only, first
+             drop a single FOREIGN KEY constraint on the column using a
+             SQL-script-compatible
+             block that selects into a @variable from
+             sys.foreign_keys/sys.foreign_key_columns,
+             then exec's a separate DROP CONSTRAINT for that default.  Only
+             works if the column has exactly one FK constraint which refers to
+             it, at the moment.
+
+            """  # noqa: E501
+            ...
+
+        def drop_constraint(
+            self,
+            constraint_name: str,
+            table_name: str,
+            type_: Optional[str] = None,
+            *,
+            schema: Optional[str] = None,
+        ) -> None:
+            r"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
+
+            :param constraint_name: name of the constraint.
+            :param table_name: table name.
+            :param type\_: optional, required on MySQL.  can be
+             'foreignkey', 'primary', 'unique', or 'check'.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        def drop_index(
+            self,
+            index_name: str,
+            table_name: Optional[str] = None,
+            *,
+            schema: Optional[str] = None,
+            if_exists: Optional[bool] = None,
+            **kw: Any,
+        ) -> None:
+            r"""Issue a "drop index" instruction using the current
+            migration context.
+
+            e.g.::
+
+                drop_index("accounts")
+
+            :param index_name: name of the index.
+            :param table_name: name of the owning table.  Some
+             backends such as Microsoft SQL Server require this.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            :param if_exists: If True, adds IF EXISTS operator when
+             dropping the index.
+
+             .. versionadded:: 1.12.0
+
+            :param \**kw: Additional keyword arguments not mentioned above are
+             dialect specific, and passed in the form
+             ``<dialectname>_<argname>``.
+             See the documentation regarding an individual dialect at
+             :ref:`dialect_toplevel` for detail on documented arguments.
+
+            """  # noqa: E501
+            ...
+
+        def drop_table(
+            self,
+            table_name: str,
+            *,
+            schema: Optional[str] = None,
+            if_exists: Optional[bool] = None,
+            **kw: Any,
+        ) -> None:
+            r"""Issue a "drop table" instruction using the current
+            migration context.
+
+
+            e.g.::
+
+                drop_table("accounts")
+
+            :param table_name: Name of the table
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+            :param if_exists: If True, adds IF EXISTS operator when
+             dropping the table.
+
+             .. versionadded:: 1.13.3
+            :param \**kw: Other keyword arguments are passed to the underlying
+             :class:`sqlalchemy.schema.Table` object created for the command.
+
+            """  # noqa: E501
+            ...
+
+        def drop_table_comment(
+            self,
+            table_name: str,
+            *,
+            existing_comment: Optional[str] = None,
+            schema: Optional[str] = None,
+        ) -> None:
+            """Issue a "drop table comment" operation to
+            remove an existing comment set on a table.
+
+            :param table_name: string name of the target table.
+            :param existing_comment: An optional string value of a comment already
+             registered on the specified table.
+
+            .. seealso::
+
+                :meth:`.Operations.create_table_comment`
+
+                :paramref:`.Operations.alter_column.comment`
+
+            """  # noqa: E501
+            ...
+
+        def execute(
+            self,
+            sqltext: Union[Executable, str],
+            *,
+            execution_options: Optional[dict[str, Any]] = None,
+        ) -> None:
+            r"""Execute the given SQL using the current migration context.
+
+            The given SQL can be a plain string, e.g.::
+
+                op.execute("INSERT INTO table (foo) VALUES ('some value')")
+
+            Or it can be any kind of Core SQL Expression construct, such as
+            below where we use an update construct::
+
+                from sqlalchemy.sql import table, column
+                from sqlalchemy import String
+                from alembic import op
+
+                account = table("account", column("name", String))
+                op.execute(
+                    account.update()
+                    .where(account.c.name == op.inline_literal("account 1"))
+                    .values({"name": op.inline_literal("account 2")})
+                )
+
+            Above, we made use of the SQLAlchemy
+            :func:`sqlalchemy.sql.expression.table` and
+            :func:`sqlalchemy.sql.expression.column` constructs to make a brief,
+            ad-hoc table construct just for our UPDATE statement.  A full
+            :class:`~sqlalchemy.schema.Table` construct of course works perfectly
+            fine as well, though note it's a recommended practice to at least
+            ensure the definition of a table is self-contained within the migration
+            script, rather than imported from a module that may break compatibility
+            with older migrations.
+
+            In a SQL script context, the statement is emitted directly to the
+            output stream.   There is *no* return result, however, as this
+            function is oriented towards generating a change script
+            that can run in "offline" mode.     Additionally, parameterized
+            statements are discouraged here, as they *will not work* in offline
+            mode.  Above, we use :meth:`.inline_literal` where parameters are
+            to be used.
+
+            For full interaction with a connected database where parameters can
+            also be used normally, use the "bind" available from the context::
+
+                from alembic import op
+
+                connection = op.get_bind()
+
+                connection.execute(
+                    account.update()
+                    .where(account.c.name == "account 1")
+                    .values({"name": "account 2"})
+                )
+
+            Additionally, when passing the statement as a plain string, it is first
+            coerced into a :func:`sqlalchemy.sql.expression.text` construct
+            before being passed along.  In the less likely case that the
+            literal SQL string contains a colon, it must be escaped with a
+            backslash, as::
+
+               op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')")
+
+
+            :param sqltext: Any legal SQLAlchemy expression, including:
+
+            * a string
+            * a :func:`sqlalchemy.sql.expression.text` construct.
+            * a :func:`sqlalchemy.sql.expression.insert` construct.
+            * a :func:`sqlalchemy.sql.expression.update` construct.
+            * a :func:`sqlalchemy.sql.expression.delete` construct.
+            * Any "executable" described in SQLAlchemy Core documentation,
+              noting that no result set is returned.
+
+            .. note::  when passing a plain string, the statement is coerced into
+               a :func:`sqlalchemy.sql.expression.text` construct. This construct
+               considers symbols with colons, e.g. ``:foo`` to be bound parameters.
+               To avoid this, ensure that colon symbols are escaped, e.g.
+               ``\:foo``.
+
+            :param execution_options: Optional dictionary of
+             execution options, will be passed to
+             :meth:`sqlalchemy.engine.Connection.execution_options`.
+            """  # noqa: E501
+            ...
+
+        def rename_table(
+            self,
+            old_table_name: str,
+            new_table_name: str,
+            *,
+            schema: Optional[str] = None,
+        ) -> None:
+            """Emit an ALTER TABLE to rename a table.
+
+            :param old_table_name: old name.
+            :param new_table_name: new name.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        # END STUB FUNCTIONS: op_cls
+
+
+class BatchOperations(AbstractOperations):
+    """Modifies the interface :class:`.Operations` for batch mode.
+
+    This basically omits the ``table_name`` and ``schema`` parameters
+    from associated methods, as these are a given when running under batch
+    mode.
+
+    .. seealso::
+
+        :meth:`.Operations.batch_alter_table`
+
+    Note that as of 0.8, most of the methods on this class are produced
+    dynamically using the :meth:`.Operations.register_operation`
+    method.
+
+    """
+
+    impl: BatchOperationsImpl
+
+    def _noop(self, operation: Any) -> NoReturn:
+        raise NotImplementedError(
+            "The %s method does not apply to a batch table alter operation."
+            % operation
+        )
+
+    if TYPE_CHECKING:
+        # START STUB FUNCTIONS: batch_op
+        # ### the following stubs are generated by tools/write_pyi.py ###
+        # ### do not edit ###
+
+        def add_column(
+            self,
+            column: Column[Any],
+            *,
+            insert_before: Optional[str] = None,
+            insert_after: Optional[str] = None,
+        ) -> None:
+            """Issue an "add column" instruction using the current
+            batch migration context.
+
+            .. seealso::
+
+                :meth:`.Operations.add_column`
+
+            """  # noqa: E501
+            ...
+
+        def alter_column(
+            self,
+            column_name: str,
+            *,
+            nullable: Optional[bool] = None,
+            comment: Union[str, Literal[False], None] = False,
+            server_default: Any = False,
+            new_column_name: Optional[str] = None,
+            type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None,
+            existing_type: Union[
+                TypeEngine[Any], Type[TypeEngine[Any]], None
+            ] = None,
+            existing_server_default: Union[
+                str, bool, Identity, Computed, None
+            ] = False,
+            existing_nullable: Optional[bool] = None,
+            existing_comment: Optional[str] = None,
+            insert_before: Optional[str] = None,
+            insert_after: Optional[str] = None,
+            **kw: Any,
+        ) -> None:
+            """Issue an "alter column" instruction using the current
+            batch migration context.
+
+            Parameters are the same as that of :meth:`.Operations.alter_column`,
+            as well as the following option(s):
+
+            :param insert_before: String name of an existing column which this
+             column should be placed before, when creating the new table.
+
+            :param insert_after: String name of an existing column which this
+             column should be placed after, when creating the new table.  If
+             both :paramref:`.BatchOperations.alter_column.insert_before`
+             and :paramref:`.BatchOperations.alter_column.insert_after` are
+             omitted, the column is inserted after the last existing column
+             in the table.
+
+            .. seealso::
+
+                :meth:`.Operations.alter_column`
+
+
+            """  # noqa: E501
+            ...
+
+        def create_check_constraint(
+            self,
+            constraint_name: str,
+            condition: Union[str, ColumnElement[bool], TextClause],
+            **kw: Any,
+        ) -> None:
+            """Issue a "create check constraint" instruction using the
+            current batch migration context.
+
+            The batch form of this call omits the ``source`` and ``schema``
+            arguments from the call.
+
+            .. seealso::
+
+                :meth:`.Operations.create_check_constraint`
+
+            """  # noqa: E501
+            ...
+
+        def create_exclude_constraint(
+            self, constraint_name: str, *elements: Any, **kw: Any
+        ) -> Optional[Table]:
+            """Issue a "create exclude constraint" instruction using the
+            current batch migration context.
+
+            .. note::  This method is Postgresql specific, and additionally
+               requires at least SQLAlchemy 1.0.
+
+            .. seealso::
+
+                :meth:`.Operations.create_exclude_constraint`
+
+            """  # noqa: E501
+            ...
+
+        def create_foreign_key(
+            self,
+            constraint_name: Optional[str],
+            referent_table: str,
+            local_cols: List[str],
+            remote_cols: List[str],
+            *,
+            referent_schema: Optional[str] = None,
+            onupdate: Optional[str] = None,
+            ondelete: Optional[str] = None,
+            deferrable: Optional[bool] = None,
+            initially: Optional[str] = None,
+            match: Optional[str] = None,
+            **dialect_kw: Any,
+        ) -> None:
+            """Issue a "create foreign key" instruction using the
+            current batch migration context.
+
+            The batch form of this call omits the ``source`` and ``source_schema``
+            arguments from the call.
+
+            e.g.::
+
+                with batch_alter_table("address") as batch_op:
+                    batch_op.create_foreign_key(
+                        "fk_user_address",
+                        "user",
+                        ["user_id"],
+                        ["id"],
+                    )
+
+            .. seealso::
+
+                :meth:`.Operations.create_foreign_key`
+
+            """  # noqa: E501
+            ...
+
+        def create_index(
+            self, index_name: str, columns: List[str], **kw: Any
+        ) -> None:
+            """Issue a "create index" instruction using the
+            current batch migration context.
+
+            .. seealso::
+
+                :meth:`.Operations.create_index`
+
+            """  # noqa: E501
+            ...
+
+        def create_primary_key(
+            self, constraint_name: Optional[str], columns: List[str]
+        ) -> None:
+            """Issue a "create primary key" instruction using the
+            current batch migration context.
+
+            The batch form of this call omits the ``table_name`` and ``schema``
+            arguments from the call.
+
+            .. seealso::
+
+                :meth:`.Operations.create_primary_key`
+
+            """  # noqa: E501
+            ...
+
+        def create_table_comment(
+            self,
+            comment: Optional[str],
+            *,
+            existing_comment: Optional[str] = None,
+        ) -> None:
+            """Emit a COMMENT ON operation to set the comment for a table
+            using the current batch migration context.
+
+            :param comment: string value of the comment being registered against
+             the specified table.
+            :param existing_comment: String value of a comment
+             already registered on the specified table, used within autogenerate
+             so that the operation is reversible, but not required for direct
+             use.
+
+            """  # noqa: E501
+            ...
+
+        def create_unique_constraint(
+            self, constraint_name: str, columns: Sequence[str], **kw: Any
+        ) -> Any:
+            """Issue a "create unique constraint" instruction using the
+            current batch migration context.
+
+            The batch form of this call omits the ``source`` and ``schema``
+            arguments from the call.
+
+            .. seealso::
+
+                :meth:`.Operations.create_unique_constraint`
+
+            """  # noqa: E501
+            ...
+
+        def drop_column(self, column_name: str, **kw: Any) -> None:
+            """Issue a "drop column" instruction using the current
+            batch migration context.
+
+            .. seealso::
+
+                :meth:`.Operations.drop_column`
+
+            """  # noqa: E501
+            ...
+
+        def drop_constraint(
+            self, constraint_name: str, type_: Optional[str] = None
+        ) -> None:
+            """Issue a "drop constraint" instruction using the
+            current batch migration context.
+
+            The batch form of this call omits the ``table_name`` and ``schema``
+            arguments from the call.
+
+            .. seealso::
+
+                :meth:`.Operations.drop_constraint`
+
+            """  # noqa: E501
+            ...
+
+        def drop_index(self, index_name: str, **kw: Any) -> None:
+            """Issue a "drop index" instruction using the
+            current batch migration context.
+
+            .. seealso::
+
+                :meth:`.Operations.drop_index`
+
+            """  # noqa: E501
+            ...
+
+        def drop_table_comment(
+            self, *, existing_comment: Optional[str] = None
+        ) -> None:
+            """Issue a "drop table comment" operation to
+            remove an existing comment set on a table using the current
+            batch operations context.
+
+            :param existing_comment: An optional string value of a comment already
+             registered on the specified table.
+
+            """  # noqa: E501
+            ...
+
+        def execute(
+            self,
+            sqltext: Union[Executable, str],
+            *,
+            execution_options: Optional[dict[str, Any]] = None,
+        ) -> None:
+            """Execute the given SQL using the current migration context.
+
+            .. seealso::
+
+                :meth:`.Operations.execute`
+
+            """  # noqa: E501
+            ...
+
+        # END STUB FUNCTIONS: batch_op
diff --git a/.venv/lib/python3.12/site-packages/alembic/operations/batch.py b/.venv/lib/python3.12/site-packages/alembic/operations/batch.py
new file mode 100644
index 00000000..fe183e9c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/operations/batch.py
@@ -0,0 +1,718 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+from typing import Any
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import CheckConstraint
+from sqlalchemy import Column
+from sqlalchemy import ForeignKeyConstraint
+from sqlalchemy import Index
+from sqlalchemy import MetaData
+from sqlalchemy import PrimaryKeyConstraint
+from sqlalchemy import schema as sql_schema
+from sqlalchemy import select
+from sqlalchemy import Table
+from sqlalchemy import types as sqltypes
+from sqlalchemy.sql.schema import SchemaEventTarget
+from sqlalchemy.util import OrderedDict
+from sqlalchemy.util import topological
+
+from ..util import exc
+from ..util.sqla_compat import _columns_for_constraint
+from ..util.sqla_compat import _copy
+from ..util.sqla_compat import _copy_expression
+from ..util.sqla_compat import _ensure_scope_for_ddl
+from ..util.sqla_compat import _fk_is_self_referential
+from ..util.sqla_compat import _idx_table_bound_expressions
+from ..util.sqla_compat import _is_type_bound
+from ..util.sqla_compat import _remove_column_from_collection
+from ..util.sqla_compat import _resolve_for_variant
+from ..util.sqla_compat import constraint_name_defined
+from ..util.sqla_compat import constraint_name_string
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.engine import Dialect
+    from sqlalchemy.sql.elements import ColumnClause
+    from sqlalchemy.sql.elements import quoted_name
+    from sqlalchemy.sql.functions import Function
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from ..ddl.impl import DefaultImpl
+
+
+class BatchOperationsImpl:
+    def __init__(
+        self,
+        operations,
+        table_name,
+        schema,
+        recreate,
+        copy_from,
+        table_args,
+        table_kwargs,
+        reflect_args,
+        reflect_kwargs,
+        naming_convention,
+        partial_reordering,
+    ):
+        self.operations = operations
+        self.table_name = table_name
+        self.schema = schema
+        if recreate not in ("auto", "always", "never"):
+            raise ValueError(
+                "recreate may be one of 'auto', 'always', or 'never'."
+            )
+        self.recreate = recreate
+        self.copy_from = copy_from
+        self.table_args = table_args
+        self.table_kwargs = dict(table_kwargs)
+        self.reflect_args = reflect_args
+        self.reflect_kwargs = dict(reflect_kwargs)
+        self.reflect_kwargs.setdefault(
+            "listeners", list(self.reflect_kwargs.get("listeners", ()))
+        )
+        self.reflect_kwargs["listeners"].append(
+            ("column_reflect", operations.impl.autogen_column_reflect)
+        )
+        self.naming_convention = naming_convention
+        self.partial_reordering = partial_reordering
+        self.batch = []
+
+    @property
+    def dialect(self) -> Dialect:
+        return self.operations.impl.dialect
+
+    @property
+    def impl(self) -> DefaultImpl:
+        return self.operations.impl
+
+    def _should_recreate(self) -> bool:
+        if self.recreate == "auto":
+            return self.operations.impl.requires_recreate_in_batch(self)
+        elif self.recreate == "always":
+            return True
+        else:
+            return False
+
+    def flush(self) -> None:
+        should_recreate = self._should_recreate()
+
+        with _ensure_scope_for_ddl(self.impl.connection):
+            if not should_recreate:
+                for opname, arg, kw in self.batch:
+                    fn = getattr(self.operations.impl, opname)
+                    fn(*arg, **kw)
+            else:
+                if self.naming_convention:
+                    m1 = MetaData(naming_convention=self.naming_convention)
+                else:
+                    m1 = MetaData()
+
+                if self.copy_from is not None:
+                    existing_table = self.copy_from
+                    reflected = False
+                else:
+                    if self.operations.migration_context.as_sql:
+                        raise exc.CommandError(
+                            f"This operation cannot proceed in --sql mode; "
+                            f"batch mode with dialect "
+                            f"{self.operations.migration_context.dialect.name} "  # noqa: E501
+                            f"requires a live database connection with which "
+                            f'to reflect the table "{self.table_name}". '
+                            f"To generate a batch SQL migration script using "
+                            "table "
+                            '"move and copy", a complete Table object '
+                            f'should be passed to the "copy_from" argument '
+                            "of the batch_alter_table() method so that table "
+                            "reflection can be skipped."
+                        )
+
+                    existing_table = Table(
+                        self.table_name,
+                        m1,
+                        schema=self.schema,
+                        autoload_with=self.operations.get_bind(),
+                        *self.reflect_args,
+                        **self.reflect_kwargs,
+                    )
+                    reflected = True
+
+                batch_impl = ApplyBatchImpl(
+                    self.impl,
+                    existing_table,
+                    self.table_args,
+                    self.table_kwargs,
+                    reflected,
+                    partial_reordering=self.partial_reordering,
+                )
+                for opname, arg, kw in self.batch:
+                    fn = getattr(batch_impl, opname)
+                    fn(*arg, **kw)
+
+                batch_impl._create(self.impl)
+
+    def alter_column(self, *arg, **kw) -> None:
+        self.batch.append(("alter_column", arg, kw))
+
+    def add_column(self, *arg, **kw) -> None:
+        if (
+            "insert_before" in kw or "insert_after" in kw
+        ) and not self._should_recreate():
+            raise exc.CommandError(
+                "Can't specify insert_before or insert_after when using "
+                "ALTER; please specify recreate='always'"
+            )
+        self.batch.append(("add_column", arg, kw))
+
+    def drop_column(self, *arg, **kw) -> None:
+        self.batch.append(("drop_column", arg, kw))
+
+    def add_constraint(self, const: Constraint) -> None:
+        self.batch.append(("add_constraint", (const,), {}))
+
+    def drop_constraint(self, const: Constraint) -> None:
+        self.batch.append(("drop_constraint", (const,), {}))
+
+    def rename_table(self, *arg, **kw):
+        self.batch.append(("rename_table", arg, kw))
+
+    def create_index(self, idx: Index, **kw: Any) -> None:
+        self.batch.append(("create_index", (idx,), kw))
+
+    def drop_index(self, idx: Index, **kw: Any) -> None:
+        self.batch.append(("drop_index", (idx,), kw))
+
+    def create_table_comment(self, table):
+        self.batch.append(("create_table_comment", (table,), {}))
+
+    def drop_table_comment(self, table):
+        self.batch.append(("drop_table_comment", (table,), {}))
+
+    def create_table(self, table):
+        raise NotImplementedError("Can't create table in batch mode")
+
+    def drop_table(self, table):
+        raise NotImplementedError("Can't drop table in batch mode")
+
+    def create_column_comment(self, column):
+        self.batch.append(("create_column_comment", (column,), {}))
+
+
+class ApplyBatchImpl:
+    def __init__(
+        self,
+        impl: DefaultImpl,
+        table: Table,
+        table_args: tuple,
+        table_kwargs: Dict[str, Any],
+        reflected: bool,
+        partial_reordering: tuple = (),
+    ) -> None:
+        self.impl = impl
+        self.table = table  # this is a Table object
+        self.table_args = table_args
+        self.table_kwargs = table_kwargs
+        self.temp_table_name = self._calc_temp_name(table.name)
+        self.new_table: Optional[Table] = None
+
+        self.partial_reordering = partial_reordering  # tuple of tuples
+        self.add_col_ordering: Tuple[
+            Tuple[str, str], ...
+        ] = ()  # tuple of tuples
+
+        self.column_transfers = OrderedDict(
+            (c.name, {"expr": c}) for c in self.table.c
+        )
+        self.existing_ordering = list(self.column_transfers)
+
+        self.reflected = reflected
+        self._grab_table_elements()
+
+    @classmethod
+    def _calc_temp_name(cls, tablename: Union[quoted_name, str]) -> str:
+        return ("_alembic_tmp_%s" % tablename)[0:50]
+
+    def _grab_table_elements(self) -> None:
+        schema = self.table.schema
+        self.columns: Dict[str, Column[Any]] = OrderedDict()
+        for c in self.table.c:
+            c_copy = _copy(c, schema=schema)
+            c_copy.unique = c_copy.index = False
+            # ensure that the type object was copied,
+            # as we may need to modify it in-place
+            if isinstance(c.type, SchemaEventTarget):
+                assert c_copy.type is not c.type
+            self.columns[c.name] = c_copy
+        self.named_constraints: Dict[str, Constraint] = {}
+        self.unnamed_constraints = []
+        self.col_named_constraints = {}
+        self.indexes: Dict[str, Index] = {}
+        self.new_indexes: Dict[str, Index] = {}
+
+        for const in self.table.constraints:
+            if _is_type_bound(const):
+                continue
+            elif (
+                self.reflected
+                and isinstance(const, CheckConstraint)
+                and not const.name
+            ):
+                # TODO: we are skipping unnamed reflected CheckConstraint
+                # because
+                # we have no way to determine _is_type_bound() for these.
+                pass
+            elif constraint_name_string(const.name):
+                self.named_constraints[const.name] = const
+            else:
+                self.unnamed_constraints.append(const)
+
+        if not self.reflected:
+            for col in self.table.c:
+                for const in col.constraints:
+                    if const.name:
+                        self.col_named_constraints[const.name] = (col, const)
+
+        for idx in self.table.indexes:
+            self.indexes[idx.name] = idx  # type: ignore[index]
+
+        for k in self.table.kwargs:
+            self.table_kwargs.setdefault(k, self.table.kwargs[k])
+
+    def _adjust_self_columns_for_partial_reordering(self) -> None:
+        pairs = set()
+
+        col_by_idx = list(self.columns)
+
+        if self.partial_reordering:
+            for tuple_ in self.partial_reordering:
+                for index, elem in enumerate(tuple_):
+                    if index > 0:
+                        pairs.add((tuple_[index - 1], elem))
+        else:
+            for index, elem in enumerate(self.existing_ordering):
+                if index > 0:
+                    pairs.add((col_by_idx[index - 1], elem))
+
+        pairs.update(self.add_col_ordering)
+
+        # this can happen if some columns were dropped and not removed
+        # from existing_ordering.  this should be prevented already, but
+        # conservatively making sure this didn't happen
+        pairs_list = [p for p in pairs if p[0] != p[1]]
+
+        sorted_ = list(
+            topological.sort(pairs_list, col_by_idx, deterministic_order=True)
+        )
+        self.columns = OrderedDict((k, self.columns[k]) for k in sorted_)
+        self.column_transfers = OrderedDict(
+            (k, self.column_transfers[k]) for k in sorted_
+        )
+
+    def _transfer_elements_to_new_table(self) -> None:
+        assert self.new_table is None, "Can only create new table once"
+
+        m = MetaData()
+        schema = self.table.schema
+
+        if self.partial_reordering or self.add_col_ordering:
+            self._adjust_self_columns_for_partial_reordering()
+
+        self.new_table = new_table = Table(
+            self.temp_table_name,
+            m,
+            *(list(self.columns.values()) + list(self.table_args)),
+            schema=schema,
+            **self.table_kwargs,
+        )
+
+        for const in (
+            list(self.named_constraints.values()) + self.unnamed_constraints
+        ):
+            const_columns = {c.key for c in _columns_for_constraint(const)}
+
+            if not const_columns.issubset(self.column_transfers):
+                continue
+
+            const_copy: Constraint
+            if isinstance(const, ForeignKeyConstraint):
+                if _fk_is_self_referential(const):
+                    # for self-referential constraint, refer to the
+                    # *original* table name, and not _alembic_batch_temp.
+                    # This is consistent with how we're handling
+                    # FK constraints from other tables; we assume SQLite
+                    # no foreign keys just keeps the names unchanged, so
+                    # when we rename back, they match again.
+                    const_copy = _copy(
+                        const, schema=schema, target_table=self.table
+                    )
+                else:
+                    # "target_table" for ForeignKeyConstraint.copy() is
+                    # only used if the FK is detected as being
+                    # self-referential, which we are handling above.
+                    const_copy = _copy(const, schema=schema)
+            else:
+                const_copy = _copy(
+                    const, schema=schema, target_table=new_table
+                )
+            if isinstance(const, ForeignKeyConstraint):
+                self._setup_referent(m, const)
+            new_table.append_constraint(const_copy)
+
+    def _gather_indexes_from_both_tables(self) -> List[Index]:
+        assert self.new_table is not None
+        idx: List[Index] = []
+
+        for idx_existing in self.indexes.values():
+            # this is a lift-and-move from Table.to_metadata
+
+            if idx_existing._column_flag:
+                continue
+
+            idx_copy = Index(
+                idx_existing.name,
+                unique=idx_existing.unique,
+                *[
+                    _copy_expression(expr, self.new_table)
+                    for expr in _idx_table_bound_expressions(idx_existing)
+                ],
+                _table=self.new_table,
+                **idx_existing.kwargs,
+            )
+            idx.append(idx_copy)
+
+        for index in self.new_indexes.values():
+            idx.append(
+                Index(
+                    index.name,
+                    unique=index.unique,
+                    *[self.new_table.c[col] for col in index.columns.keys()],
+                    **index.kwargs,
+                )
+            )
+        return idx
+
+    def _setup_referent(
+        self, metadata: MetaData, constraint: ForeignKeyConstraint
+    ) -> None:
+        spec = constraint.elements[0]._get_colspec()
+        parts = spec.split(".")
+        tname = parts[-2]
+        if len(parts) == 3:
+            referent_schema = parts[0]
+        else:
+            referent_schema = None
+
+        if tname != self.temp_table_name:
+            key = sql_schema._get_table_key(tname, referent_schema)
+
+            def colspec(elem: Any):
+                return elem._get_colspec()
+
+            if key in metadata.tables:
+                t = metadata.tables[key]
+                for elem in constraint.elements:
+                    colname = colspec(elem).split(".")[-1]
+                    if colname not in t.c:
+                        t.append_column(Column(colname, sqltypes.NULLTYPE))
+            else:
+                Table(
+                    tname,
+                    metadata,
+                    *[
+                        Column(n, sqltypes.NULLTYPE)
+                        for n in [
+                            colspec(elem).split(".")[-1]
+                            for elem in constraint.elements
+                        ]
+                    ],
+                    schema=referent_schema,
+                )
+
+    def _create(self, op_impl: DefaultImpl) -> None:
+        self._transfer_elements_to_new_table()
+
+        op_impl.prep_table_for_batch(self, self.table)
+        assert self.new_table is not None
+        op_impl.create_table(self.new_table)
+
+        try:
+            op_impl._exec(
+                self.new_table.insert()
+                .inline()
+                .from_select(
+                    list(
+                        k
+                        for k, transfer in self.column_transfers.items()
+                        if "expr" in transfer
+                    ),
+                    select(
+                        *[
+                            transfer["expr"]
+                            for transfer in self.column_transfers.values()
+                            if "expr" in transfer
+                        ]
+                    ),
+                )
+            )
+            op_impl.drop_table(self.table)
+        except:
+            op_impl.drop_table(self.new_table)
+            raise
+        else:
+            op_impl.rename_table(
+                self.temp_table_name, self.table.name, schema=self.table.schema
+            )
+            self.new_table.name = self.table.name
+            try:
+                for idx in self._gather_indexes_from_both_tables():
+                    op_impl.create_index(idx)
+            finally:
+                self.new_table.name = self.temp_table_name
+
+    def alter_column(
+        self,
+        table_name: str,
+        column_name: str,
+        nullable: Optional[bool] = None,
+        server_default: Optional[Union[Function[Any], str, bool]] = False,
+        name: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        autoincrement: Optional[Union[bool, Literal["auto"]]] = None,
+        comment: Union[str, Literal[False]] = False,
+        **kw,
+    ) -> None:
+        existing = self.columns[column_name]
+        existing_transfer: Dict[str, Any] = self.column_transfers[column_name]
+        if name is not None and name != column_name:
+            # note that we don't change '.key' - we keep referring
+            # to the renamed column by its old key in _create().  neat!
+            existing.name = name
+            existing_transfer["name"] = name
+
+            existing_type = kw.get("existing_type", None)
+            if existing_type:
+                resolved_existing_type = _resolve_for_variant(
+                    kw["existing_type"], self.impl.dialect
+                )
+
+                # pop named constraints for Boolean/Enum for rename
+                if (
+                    isinstance(resolved_existing_type, SchemaEventTarget)
+                    and resolved_existing_type.name  # type:ignore[attr-defined]  # noqa E501
+                ):
+                    self.named_constraints.pop(
+                        resolved_existing_type.name,  # type:ignore[attr-defined]  # noqa E501
+                        None,
+                    )
+
+        if type_ is not None:
+            type_ = sqltypes.to_instance(type_)
+            # old type is being discarded so turn off eventing
+            # rules. Alternatively we can
+            # erase the events set up by this type, but this is simpler.
+            # we also ignore the drop_constraint that will come here from
+            # Operations.implementation_for(alter_column)
+
+            if isinstance(existing.type, SchemaEventTarget):
+                existing.type._create_events = (  # type:ignore[attr-defined]
+                    existing.type.create_constraint  # type:ignore[attr-defined] # noqa
+                ) = False
+
+            self.impl.cast_for_batch_migrate(
+                existing, existing_transfer, type_
+            )
+
+            existing.type = type_
+
+            # we *dont* however set events for the new type, because
+            # alter_column is invoked from
+            # Operations.implementation_for(alter_column) which already
+            # will emit an add_constraint()
+
+        if nullable is not None:
+            existing.nullable = nullable
+        if server_default is not False:
+            if server_default is None:
+                existing.server_default = None
+            else:
+                sql_schema.DefaultClause(
+                    server_default  # type: ignore[arg-type]
+                )._set_parent(existing)
+        if autoincrement is not None:
+            existing.autoincrement = bool(autoincrement)
+
+        if comment is not False:
+            existing.comment = comment
+
+    def _setup_dependencies_for_add_column(
+        self,
+        colname: str,
+        insert_before: Optional[str],
+        insert_after: Optional[str],
+    ) -> None:
+        index_cols = self.existing_ordering
+        col_indexes = {name: i for i, name in enumerate(index_cols)}
+
+        if not self.partial_reordering:
+            if insert_after:
+                if not insert_before:
+                    if insert_after in col_indexes:
+                        # insert after an existing column
+                        idx = col_indexes[insert_after] + 1
+                        if idx < len(index_cols):
+                            insert_before = index_cols[idx]
+                    else:
+                        # insert after a column that is also new
+                        insert_before = dict(self.add_col_ordering)[
+                            insert_after
+                        ]
+            if insert_before:
+                if not insert_after:
+                    if insert_before in col_indexes:
+                        # insert before an existing column
+                        idx = col_indexes[insert_before] - 1
+                        if idx >= 0:
+                            insert_after = index_cols[idx]
+                    else:
+                        # insert before a column that is also new
+                        insert_after = {
+                            b: a for a, b in self.add_col_ordering
+                        }[insert_before]
+
+        if insert_before:
+            self.add_col_ordering += ((colname, insert_before),)
+        if insert_after:
+            self.add_col_ordering += ((insert_after, colname),)
+
+        if (
+            not self.partial_reordering
+            and not insert_before
+            and not insert_after
+            and col_indexes
+        ):
+            self.add_col_ordering += ((index_cols[-1], colname),)
+
+    def add_column(
+        self,
+        table_name: str,
+        column: Column[Any],
+        insert_before: Optional[str] = None,
+        insert_after: Optional[str] = None,
+        **kw,
+    ) -> None:
+        self._setup_dependencies_for_add_column(
+            column.name, insert_before, insert_after
+        )
+        # we copy the column because operations.add_column()
+        # gives us a Column that is part of a Table already.
+        self.columns[column.name] = _copy(column, schema=self.table.schema)
+        self.column_transfers[column.name] = {}
+
+    def drop_column(
+        self,
+        table_name: str,
+        column: Union[ColumnClause[Any], Column[Any]],
+        **kw,
+    ) -> None:
+        if column.name in self.table.primary_key.columns:
+            _remove_column_from_collection(
+                self.table.primary_key.columns, column
+            )
+        del self.columns[column.name]
+        del self.column_transfers[column.name]
+        self.existing_ordering.remove(column.name)
+
+        # pop named constraints for Boolean/Enum for rename
+        if (
+            "existing_type" in kw
+            and isinstance(kw["existing_type"], SchemaEventTarget)
+            and kw["existing_type"].name  # type:ignore[attr-defined]
+        ):
+            self.named_constraints.pop(
+                kw["existing_type"].name, None  # type:ignore[attr-defined]
+            )
+
+    def create_column_comment(self, column):
+        """the batch table creation function will issue create_column_comment
+        on the real "impl" as part of the create table process.
+
+        That is, the Column object will have the comment on it already,
+        so when it is received by add_column() it will be a normal part of
+        the CREATE TABLE and doesn't need an extra step here.
+
+        """
+
+    def create_table_comment(self, table):
+        """the batch table creation function will issue create_table_comment
+        on the real "impl" as part of the create table process.
+
+        """
+
+    def drop_table_comment(self, table):
+        """the batch table creation function will issue drop_table_comment
+        on the real "impl" as part of the create table process.
+
+        """
+
+    def add_constraint(self, const: Constraint) -> None:
+        if not constraint_name_defined(const.name):
+            raise ValueError("Constraint must have a name")
+        if isinstance(const, sql_schema.PrimaryKeyConstraint):
+            if self.table.primary_key in self.unnamed_constraints:
+                self.unnamed_constraints.remove(self.table.primary_key)
+
+        if constraint_name_string(const.name):
+            self.named_constraints[const.name] = const
+        else:
+            self.unnamed_constraints.append(const)
+
+    def drop_constraint(self, const: Constraint) -> None:
+        if not const.name:
+            raise ValueError("Constraint must have a name")
+        try:
+            if const.name in self.col_named_constraints:
+                col, const = self.col_named_constraints.pop(const.name)
+
+                for col_const in list(self.columns[col.name].constraints):
+                    if col_const.name == const.name:
+                        self.columns[col.name].constraints.remove(col_const)
+            elif constraint_name_string(const.name):
+                const = self.named_constraints.pop(const.name)
+            elif const in self.unnamed_constraints:
+                self.unnamed_constraints.remove(const)
+
+        except KeyError:
+            if _is_type_bound(const):
+                # type-bound constraints are only included in the new
+                # table via their type object in any case, so ignore the
+                # drop_constraint() that comes here via the
+                # Operations.implementation_for(alter_column)
+                return
+            raise ValueError("No such constraint: '%s'" % const.name)
+        else:
+            if isinstance(const, PrimaryKeyConstraint):
+                for col in const.columns:
+                    self.columns[col.name].primary_key = False
+
+    def create_index(self, idx: Index) -> None:
+        self.new_indexes[idx.name] = idx  # type: ignore[index]
+
+    def drop_index(self, idx: Index) -> None:
+        try:
+            del self.indexes[idx.name]  # type: ignore[arg-type]
+        except KeyError:
+            raise ValueError("No such index: '%s'" % idx.name)
+
+    def rename_table(self, *arg, **kw):
+        raise NotImplementedError("TODO")
diff --git a/.venv/lib/python3.12/site-packages/alembic/operations/ops.py b/.venv/lib/python3.12/site-packages/alembic/operations/ops.py
new file mode 100644
index 00000000..bb4d825b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/operations/ops.py
@@ -0,0 +1,2799 @@
+from __future__ import annotations
+
+from abc import abstractmethod
+import re
+from typing import Any
+from typing import Callable
+from typing import cast
+from typing import Dict
+from typing import FrozenSet
+from typing import Iterator
+from typing import List
+from typing import MutableMapping
+from typing import Optional
+from typing import Sequence
+from typing import Set
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy.types import NULLTYPE
+
+from . import schemaobj
+from .base import BatchOperations
+from .base import Operations
+from .. import util
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import conv
+    from sqlalchemy.sql.elements import quoted_name
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.schema import CheckConstraint
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Computed
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.schema import ForeignKeyConstraint
+    from sqlalchemy.sql.schema import Identity
+    from sqlalchemy.sql.schema import Index
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import PrimaryKeyConstraint
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.schema import UniqueConstraint
+    from sqlalchemy.sql.selectable import TableClause
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from ..autogenerate.rewriter import Rewriter
+    from ..runtime.migration import MigrationContext
+    from ..script.revision import _RevIdType
+
+_T = TypeVar("_T", bound=Any)
+_AC = TypeVar("_AC", bound="AddConstraintOp")
+
+
+class MigrateOperation:
+    """base class for migration command and organization objects.
+
+    This system is part of the operation extensibility API.
+
+    .. seealso::
+
+        :ref:`operation_objects`
+
+        :ref:`operation_plugins`
+
+        :ref:`customizing_revision`
+
+    """
+
+    @util.memoized_property
+    def info(self) -> Dict[Any, Any]:
+        """A dictionary that may be used to store arbitrary information
+        along with this :class:`.MigrateOperation` object.
+
+        """
+        return {}
+
+    _mutations: FrozenSet[Rewriter] = frozenset()
+
+    def reverse(self) -> MigrateOperation:
+        raise NotImplementedError
+
+    def to_diff_tuple(self) -> Tuple[Any, ...]:
+        raise NotImplementedError
+
+
+class AddConstraintOp(MigrateOperation):
+    """Represent an add constraint operation."""
+
+    add_constraint_ops = util.Dispatcher()
+
+    @property
+    def constraint_type(self) -> str:
+        raise NotImplementedError()
+
+    @classmethod
+    def register_add_constraint(
+        cls, type_: str
+    ) -> Callable[[Type[_AC]], Type[_AC]]:
+        def go(klass: Type[_AC]) -> Type[_AC]:
+            cls.add_constraint_ops.dispatch_for(type_)(klass.from_constraint)
+            return klass
+
+        return go
+
+    @classmethod
+    def from_constraint(cls, constraint: Constraint) -> AddConstraintOp:
+        return cls.add_constraint_ops.dispatch(constraint.__visit_name__)(  # type: ignore[no-any-return]  # noqa: E501
+            constraint
+        )
+
+    @abstractmethod
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Constraint:
+        pass
+
+    def reverse(self) -> DropConstraintOp:
+        return DropConstraintOp.from_constraint(self.to_constraint())
+
+    def to_diff_tuple(self) -> Tuple[str, Constraint]:
+        return ("add_constraint", self.to_constraint())
+
+
+@Operations.register_operation("drop_constraint")
+@BatchOperations.register_operation("drop_constraint", "batch_drop_constraint")
+class DropConstraintOp(MigrateOperation):
+    """Represent a drop constraint operation."""
+
+    def __init__(
+        self,
+        constraint_name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        type_: Optional[str] = None,
+        *,
+        schema: Optional[str] = None,
+        _reverse: Optional[AddConstraintOp] = None,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.table_name = table_name
+        self.constraint_type = type_
+        self.schema = schema
+        self._reverse = _reverse
+
+    def reverse(self) -> AddConstraintOp:
+        return AddConstraintOp.from_constraint(self.to_constraint())
+
+    def to_diff_tuple(
+        self,
+    ) -> Tuple[str, SchemaItem]:
+        if self.constraint_type == "foreignkey":
+            return ("remove_fk", self.to_constraint())
+        else:
+            return ("remove_constraint", self.to_constraint())
+
+    @classmethod
+    def from_constraint(cls, constraint: Constraint) -> DropConstraintOp:
+        types = {
+            "unique_constraint": "unique",
+            "foreign_key_constraint": "foreignkey",
+            "primary_key_constraint": "primary",
+            "check_constraint": "check",
+            "column_check_constraint": "check",
+            "table_or_column_check_constraint": "check",
+        }
+
+        constraint_table = sqla_compat._table_for_constraint(constraint)
+        return cls(
+            sqla_compat.constraint_name_or_none(constraint.name),
+            constraint_table.name,
+            schema=constraint_table.schema,
+            type_=types.get(constraint.__visit_name__),
+            _reverse=AddConstraintOp.from_constraint(constraint),
+        )
+
+    def to_constraint(self) -> Constraint:
+        if self._reverse is not None:
+            constraint = self._reverse.to_constraint()
+            constraint.name = self.constraint_name
+            constraint_table = sqla_compat._table_for_constraint(constraint)
+            constraint_table.name = self.table_name
+            constraint_table.schema = self.schema
+
+            return constraint
+        else:
+            raise ValueError(
+                "constraint cannot be produced; "
+                "original constraint is not present"
+            )
+
+    @classmethod
+    def drop_constraint(
+        cls,
+        operations: Operations,
+        constraint_name: str,
+        table_name: str,
+        type_: Optional[str] = None,
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        r"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
+
+        :param constraint_name: name of the constraint.
+        :param table_name: table name.
+        :param type\_: optional, required on MySQL.  can be
+         'foreignkey', 'primary', 'unique', or 'check'.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+
+        op = cls(constraint_name, table_name, type_=type_, schema=schema)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_drop_constraint(
+        cls,
+        operations: BatchOperations,
+        constraint_name: str,
+        type_: Optional[str] = None,
+    ) -> None:
+        """Issue a "drop constraint" instruction using the
+        current batch migration context.
+
+        The batch form of this call omits the ``table_name`` and ``schema``
+        arguments from the call.
+
+        .. seealso::
+
+            :meth:`.Operations.drop_constraint`
+
+        """
+        op = cls(
+            constraint_name,
+            operations.impl.table_name,
+            type_=type_,
+            schema=operations.impl.schema,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_primary_key")
+@BatchOperations.register_operation(
+    "create_primary_key", "batch_create_primary_key"
+)
+@AddConstraintOp.register_add_constraint("primary_key_constraint")
+class CreatePrimaryKeyOp(AddConstraintOp):
+    """Represent a create primary key operation."""
+
+    constraint_type = "primarykey"
+
+    def __init__(
+        self,
+        constraint_name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        columns: Sequence[str],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.table_name = table_name
+        self.columns = columns
+        self.schema = schema
+        self.kw = kw
+
+    @classmethod
+    def from_constraint(cls, constraint: Constraint) -> CreatePrimaryKeyOp:
+        constraint_table = sqla_compat._table_for_constraint(constraint)
+        pk_constraint = cast("PrimaryKeyConstraint", constraint)
+        return cls(
+            sqla_compat.constraint_name_or_none(pk_constraint.name),
+            constraint_table.name,
+            pk_constraint.columns.keys(),
+            schema=constraint_table.schema,
+            **pk_constraint.dialect_kwargs,
+        )
+
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> PrimaryKeyConstraint:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        return schema_obj.primary_key_constraint(
+            self.constraint_name,
+            self.table_name,
+            self.columns,
+            schema=self.schema,
+            **self.kw,
+        )
+
+    @classmethod
+    def create_primary_key(
+        cls,
+        operations: Operations,
+        constraint_name: Optional[str],
+        table_name: str,
+        columns: List[str],
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        """Issue a "create primary key" instruction using the current
+        migration context.
+
+        e.g.::
+
+            from alembic import op
+
+            op.create_primary_key("pk_my_table", "my_table", ["id", "version"])
+
+        This internally generates a :class:`~sqlalchemy.schema.Table` object
+        containing the necessary columns, then generates a new
+        :class:`~sqlalchemy.schema.PrimaryKeyConstraint`
+        object which it then associates with the
+        :class:`~sqlalchemy.schema.Table`.
+        Any event listeners associated with this action will be fired
+        off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+        construct is ultimately used to generate the ALTER statement.
+
+        :param constraint_name: Name of the primary key constraint.  The name
+         is necessary so that an ALTER statement can be emitted.  For setups
+         that use an automated naming scheme such as that described at
+         :ref:`sqla:constraint_naming_conventions`
+         ``name`` here can be ``None``, as the event listener will
+         apply the name to the constraint object when it is associated
+         with the table.
+        :param table_name: String name of the target table.
+        :param columns: a list of string column names to be applied to the
+         primary key constraint.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+        op = cls(constraint_name, table_name, columns, schema=schema)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_primary_key(
+        cls,
+        operations: BatchOperations,
+        constraint_name: Optional[str],
+        columns: List[str],
+    ) -> None:
+        """Issue a "create primary key" instruction using the
+        current batch migration context.
+
+        The batch form of this call omits the ``table_name`` and ``schema``
+        arguments from the call.
+
+        .. seealso::
+
+            :meth:`.Operations.create_primary_key`
+
+        """
+        op = cls(
+            constraint_name,
+            operations.impl.table_name,
+            columns,
+            schema=operations.impl.schema,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_unique_constraint")
+@BatchOperations.register_operation(
+    "create_unique_constraint", "batch_create_unique_constraint"
+)
+@AddConstraintOp.register_add_constraint("unique_constraint")
+class CreateUniqueConstraintOp(AddConstraintOp):
+    """Represent a create unique constraint operation."""
+
+    constraint_type = "unique"
+
+    def __init__(
+        self,
+        constraint_name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        columns: Sequence[str],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.table_name = table_name
+        self.columns = columns
+        self.schema = schema
+        self.kw = kw
+
+    @classmethod
+    def from_constraint(
+        cls, constraint: Constraint
+    ) -> CreateUniqueConstraintOp:
+        constraint_table = sqla_compat._table_for_constraint(constraint)
+
+        uq_constraint = cast("UniqueConstraint", constraint)
+
+        kw: Dict[str, Any] = {}
+        if uq_constraint.deferrable:
+            kw["deferrable"] = uq_constraint.deferrable
+        if uq_constraint.initially:
+            kw["initially"] = uq_constraint.initially
+        kw.update(uq_constraint.dialect_kwargs)
+        return cls(
+            sqla_compat.constraint_name_or_none(uq_constraint.name),
+            constraint_table.name,
+            [c.name for c in uq_constraint.columns],
+            schema=constraint_table.schema,
+            **kw,
+        )
+
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> UniqueConstraint:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        return schema_obj.unique_constraint(
+            self.constraint_name,
+            self.table_name,
+            self.columns,
+            schema=self.schema,
+            **self.kw,
+        )
+
+    @classmethod
+    def create_unique_constraint(
+        cls,
+        operations: Operations,
+        constraint_name: Optional[str],
+        table_name: str,
+        columns: Sequence[str],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> Any:
+        """Issue a "create unique constraint" instruction using the
+        current migration context.
+
+        e.g.::
+
+            from alembic import op
+            op.create_unique_constraint("uq_user_name", "user", ["name"])
+
+        This internally generates a :class:`~sqlalchemy.schema.Table` object
+        containing the necessary columns, then generates a new
+        :class:`~sqlalchemy.schema.UniqueConstraint`
+        object which it then associates with the
+        :class:`~sqlalchemy.schema.Table`.
+        Any event listeners associated with this action will be fired
+        off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+        construct is ultimately used to generate the ALTER statement.
+
+        :param name: Name of the unique constraint.  The name is necessary
+         so that an ALTER statement can be emitted.  For setups that
+         use an automated naming scheme such as that described at
+         :ref:`sqla:constraint_naming_conventions`,
+         ``name`` here can be ``None``, as the event listener will
+         apply the name to the constraint object when it is associated
+         with the table.
+        :param table_name: String name of the source table.
+        :param columns: a list of string column names in the
+         source table.
+        :param deferrable: optional bool. If set, emit DEFERRABLE or
+         NOT DEFERRABLE when issuing DDL for this constraint.
+        :param initially: optional string. If set, emit INITIALLY <value>
+         when issuing DDL for this constraint.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+
+        op = cls(constraint_name, table_name, columns, schema=schema, **kw)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_unique_constraint(
+        cls,
+        operations: BatchOperations,
+        constraint_name: str,
+        columns: Sequence[str],
+        **kw: Any,
+    ) -> Any:
+        """Issue a "create unique constraint" instruction using the
+        current batch migration context.
+
+        The batch form of this call omits the ``source`` and ``schema``
+        arguments from the call.
+
+        .. seealso::
+
+            :meth:`.Operations.create_unique_constraint`
+
+        """
+        kw["schema"] = operations.impl.schema
+        op = cls(constraint_name, operations.impl.table_name, columns, **kw)
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_foreign_key")
+@BatchOperations.register_operation(
+    "create_foreign_key", "batch_create_foreign_key"
+)
+@AddConstraintOp.register_add_constraint("foreign_key_constraint")
+class CreateForeignKeyOp(AddConstraintOp):
+    """Represent a create foreign key constraint operation."""
+
+    constraint_type = "foreignkey"
+
+    def __init__(
+        self,
+        constraint_name: Optional[sqla_compat._ConstraintNameDefined],
+        source_table: str,
+        referent_table: str,
+        local_cols: List[str],
+        remote_cols: List[str],
+        **kw: Any,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.source_table = source_table
+        self.referent_table = referent_table
+        self.local_cols = local_cols
+        self.remote_cols = remote_cols
+        self.kw = kw
+
+    def to_diff_tuple(self) -> Tuple[str, ForeignKeyConstraint]:
+        return ("add_fk", self.to_constraint())
+
+    @classmethod
+    def from_constraint(cls, constraint: Constraint) -> CreateForeignKeyOp:
+        fk_constraint = cast("ForeignKeyConstraint", constraint)
+        kw: Dict[str, Any] = {}
+        if fk_constraint.onupdate:
+            kw["onupdate"] = fk_constraint.onupdate
+        if fk_constraint.ondelete:
+            kw["ondelete"] = fk_constraint.ondelete
+        if fk_constraint.initially:
+            kw["initially"] = fk_constraint.initially
+        if fk_constraint.deferrable:
+            kw["deferrable"] = fk_constraint.deferrable
+        if fk_constraint.use_alter:
+            kw["use_alter"] = fk_constraint.use_alter
+        if fk_constraint.match:
+            kw["match"] = fk_constraint.match
+
+        (
+            source_schema,
+            source_table,
+            source_columns,
+            target_schema,
+            target_table,
+            target_columns,
+            onupdate,
+            ondelete,
+            deferrable,
+            initially,
+        ) = sqla_compat._fk_spec(fk_constraint)
+
+        kw["source_schema"] = source_schema
+        kw["referent_schema"] = target_schema
+        kw.update(fk_constraint.dialect_kwargs)
+        return cls(
+            sqla_compat.constraint_name_or_none(fk_constraint.name),
+            source_table,
+            target_table,
+            source_columns,
+            target_columns,
+            **kw,
+        )
+
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> ForeignKeyConstraint:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        return schema_obj.foreign_key_constraint(
+            self.constraint_name,
+            self.source_table,
+            self.referent_table,
+            self.local_cols,
+            self.remote_cols,
+            **self.kw,
+        )
+
+    @classmethod
+    def create_foreign_key(
+        cls,
+        operations: Operations,
+        constraint_name: Optional[str],
+        source_table: str,
+        referent_table: str,
+        local_cols: List[str],
+        remote_cols: List[str],
+        *,
+        onupdate: Optional[str] = None,
+        ondelete: Optional[str] = None,
+        deferrable: Optional[bool] = None,
+        initially: Optional[str] = None,
+        match: Optional[str] = None,
+        source_schema: Optional[str] = None,
+        referent_schema: Optional[str] = None,
+        **dialect_kw: Any,
+    ) -> None:
+        """Issue a "create foreign key" instruction using the
+        current migration context.
+
+        e.g.::
+
+            from alembic import op
+
+            op.create_foreign_key(
+                "fk_user_address",
+                "address",
+                "user",
+                ["user_id"],
+                ["id"],
+            )
+
+        This internally generates a :class:`~sqlalchemy.schema.Table` object
+        containing the necessary columns, then generates a new
+        :class:`~sqlalchemy.schema.ForeignKeyConstraint`
+        object which it then associates with the
+        :class:`~sqlalchemy.schema.Table`.
+        Any event listeners associated with this action will be fired
+        off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+        construct is ultimately used to generate the ALTER statement.
+
+        :param constraint_name: Name of the foreign key constraint.  The name
+         is necessary so that an ALTER statement can be emitted.  For setups
+         that use an automated naming scheme such as that described at
+         :ref:`sqla:constraint_naming_conventions`,
+         ``name`` here can be ``None``, as the event listener will
+         apply the name to the constraint object when it is associated
+         with the table.
+        :param source_table: String name of the source table.
+        :param referent_table: String name of the destination table.
+        :param local_cols: a list of string column names in the
+         source table.
+        :param remote_cols: a list of string column names in the
+         remote table.
+        :param onupdate: Optional string. If set, emit ON UPDATE <value> when
+         issuing DDL for this constraint. Typical values include CASCADE,
+         DELETE and RESTRICT.
+        :param ondelete: Optional string. If set, emit ON DELETE <value> when
+         issuing DDL for this constraint. Typical values include CASCADE,
+         DELETE and RESTRICT.
+        :param deferrable: optional bool. If set, emit DEFERRABLE or NOT
+         DEFERRABLE when issuing DDL for this constraint.
+        :param source_schema: Optional schema name of the source table.
+        :param referent_schema: Optional schema name of the destination table.
+
+        """
+
+        op = cls(
+            constraint_name,
+            source_table,
+            referent_table,
+            local_cols,
+            remote_cols,
+            onupdate=onupdate,
+            ondelete=ondelete,
+            deferrable=deferrable,
+            source_schema=source_schema,
+            referent_schema=referent_schema,
+            initially=initially,
+            match=match,
+            **dialect_kw,
+        )
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_foreign_key(
+        cls,
+        operations: BatchOperations,
+        constraint_name: Optional[str],
+        referent_table: str,
+        local_cols: List[str],
+        remote_cols: List[str],
+        *,
+        referent_schema: Optional[str] = None,
+        onupdate: Optional[str] = None,
+        ondelete: Optional[str] = None,
+        deferrable: Optional[bool] = None,
+        initially: Optional[str] = None,
+        match: Optional[str] = None,
+        **dialect_kw: Any,
+    ) -> None:
+        """Issue a "create foreign key" instruction using the
+        current batch migration context.
+
+        The batch form of this call omits the ``source`` and ``source_schema``
+        arguments from the call.
+
+        e.g.::
+
+            with batch_alter_table("address") as batch_op:
+                batch_op.create_foreign_key(
+                    "fk_user_address",
+                    "user",
+                    ["user_id"],
+                    ["id"],
+                )
+
+        .. seealso::
+
+            :meth:`.Operations.create_foreign_key`
+
+        """
+        op = cls(
+            constraint_name,
+            operations.impl.table_name,
+            referent_table,
+            local_cols,
+            remote_cols,
+            onupdate=onupdate,
+            ondelete=ondelete,
+            deferrable=deferrable,
+            source_schema=operations.impl.schema,
+            referent_schema=referent_schema,
+            initially=initially,
+            match=match,
+            **dialect_kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_check_constraint")
+@BatchOperations.register_operation(
+    "create_check_constraint", "batch_create_check_constraint"
+)
+@AddConstraintOp.register_add_constraint("check_constraint")
+@AddConstraintOp.register_add_constraint("table_or_column_check_constraint")
+@AddConstraintOp.register_add_constraint("column_check_constraint")
+class CreateCheckConstraintOp(AddConstraintOp):
+    """Represent a create check constraint operation."""
+
+    constraint_type = "check"
+
+    def __init__(
+        self,
+        constraint_name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        condition: Union[str, TextClause, ColumnElement[Any]],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.table_name = table_name
+        self.condition = condition
+        self.schema = schema
+        self.kw = kw
+
+    @classmethod
+    def from_constraint(
+        cls, constraint: Constraint
+    ) -> CreateCheckConstraintOp:
+        constraint_table = sqla_compat._table_for_constraint(constraint)
+
+        ck_constraint = cast("CheckConstraint", constraint)
+        return cls(
+            sqla_compat.constraint_name_or_none(ck_constraint.name),
+            constraint_table.name,
+            cast("ColumnElement[Any]", ck_constraint.sqltext),
+            schema=constraint_table.schema,
+            **ck_constraint.dialect_kwargs,
+        )
+
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> CheckConstraint:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        return schema_obj.check_constraint(
+            self.constraint_name,
+            self.table_name,
+            self.condition,
+            schema=self.schema,
+            **self.kw,
+        )
+
+    @classmethod
+    def create_check_constraint(
+        cls,
+        operations: Operations,
+        constraint_name: Optional[str],
+        table_name: str,
+        condition: Union[str, ColumnElement[bool], TextClause],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        """Issue a "create check constraint" instruction using the
+        current migration context.
+
+        e.g.::
+
+            from alembic import op
+            from sqlalchemy.sql import column, func
+
+            op.create_check_constraint(
+                "ck_user_name_len",
+                "user",
+                func.len(column("name")) > 5,
+            )
+
+        CHECK constraints are usually against a SQL expression, so ad-hoc
+        table metadata is usually needed.   The function will convert the given
+        arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
+        to an anonymous table in order to emit the CREATE statement.
+
+        :param name: Name of the check constraint.  The name is necessary
+         so that an ALTER statement can be emitted.  For setups that
+         use an automated naming scheme such as that described at
+         :ref:`sqla:constraint_naming_conventions`,
+         ``name`` here can be ``None``, as the event listener will
+         apply the name to the constraint object when it is associated
+         with the table.
+        :param table_name: String name of the source table.
+        :param condition: SQL expression that's the condition of the
+         constraint. Can be a string or SQLAlchemy expression language
+         structure.
+        :param deferrable: optional bool. If set, emit DEFERRABLE or
+         NOT DEFERRABLE when issuing DDL for this constraint.
+        :param initially: optional string. If set, emit INITIALLY <value>
+         when issuing DDL for this constraint.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+        op = cls(constraint_name, table_name, condition, schema=schema, **kw)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_check_constraint(
+        cls,
+        operations: BatchOperations,
+        constraint_name: str,
+        condition: Union[str, ColumnElement[bool], TextClause],
+        **kw: Any,
+    ) -> None:
+        """Issue a "create check constraint" instruction using the
+        current batch migration context.
+
+        The batch form of this call omits the ``source`` and ``schema``
+        arguments from the call.
+
+        .. seealso::
+
+            :meth:`.Operations.create_check_constraint`
+
+        """
+        op = cls(
+            constraint_name,
+            operations.impl.table_name,
+            condition,
+            schema=operations.impl.schema,
+            **kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_index")
+@BatchOperations.register_operation("create_index", "batch_create_index")
+class CreateIndexOp(MigrateOperation):
+    """Represent a create index operation."""
+
+    def __init__(
+        self,
+        index_name: Optional[str],
+        table_name: str,
+        columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
+        *,
+        schema: Optional[str] = None,
+        unique: bool = False,
+        if_not_exists: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        self.index_name = index_name
+        self.table_name = table_name
+        self.columns = columns
+        self.schema = schema
+        self.unique = unique
+        self.if_not_exists = if_not_exists
+        self.kw = kw
+
+    def reverse(self) -> DropIndexOp:
+        return DropIndexOp.from_index(self.to_index())
+
+    def to_diff_tuple(self) -> Tuple[str, Index]:
+        return ("add_index", self.to_index())
+
+    @classmethod
+    def from_index(cls, index: Index) -> CreateIndexOp:
+        assert index.table is not None
+        return cls(
+            index.name,
+            index.table.name,
+            index.expressions,
+            schema=index.table.schema,
+            unique=index.unique,
+            **index.kwargs,
+        )
+
+    def to_index(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Index:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        idx = schema_obj.index(
+            self.index_name,
+            self.table_name,
+            self.columns,
+            schema=self.schema,
+            unique=self.unique,
+            **self.kw,
+        )
+        return idx
+
+    @classmethod
+    def create_index(
+        cls,
+        operations: Operations,
+        index_name: Optional[str],
+        table_name: str,
+        columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
+        *,
+        schema: Optional[str] = None,
+        unique: bool = False,
+        if_not_exists: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        r"""Issue a "create index" instruction using the current
+        migration context.
+
+        e.g.::
+
+            from alembic import op
+
+            op.create_index("ik_test", "t1", ["foo", "bar"])
+
+        Functional indexes can be produced by using the
+        :func:`sqlalchemy.sql.expression.text` construct::
+
+            from alembic import op
+            from sqlalchemy import text
+
+            op.create_index("ik_test", "t1", [text("lower(foo)")])
+
+        :param index_name: name of the index.
+        :param table_name: name of the owning table.
+        :param columns: a list consisting of string column names and/or
+         :func:`~sqlalchemy.sql.expression.text` constructs.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+        :param unique: If True, create a unique index.
+
+        :param quote: Force quoting of this column's name on or off,
+         corresponding to ``True`` or ``False``. When left at its default
+         of ``None``, the column identifier will be quoted according to
+         whether the name is case sensitive (identifiers with at least one
+         upper case character are treated as case sensitive), or if it's a
+         reserved word. This flag is only needed to force quoting of a
+         reserved word which is not known by the SQLAlchemy dialect.
+
+        :param if_not_exists: If True, adds IF NOT EXISTS operator when
+         creating the new index.
+
+         .. versionadded:: 1.12.0
+
+        :param \**kw: Additional keyword arguments not mentioned above are
+         dialect specific, and passed in the form
+         ``<dialectname>_<argname>``.
+         See the documentation regarding an individual dialect at
+         :ref:`dialect_toplevel` for detail on documented arguments.
+
+        """
+        op = cls(
+            index_name,
+            table_name,
+            columns,
+            schema=schema,
+            unique=unique,
+            if_not_exists=if_not_exists,
+            **kw,
+        )
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_index(
+        cls,
+        operations: BatchOperations,
+        index_name: str,
+        columns: List[str],
+        **kw: Any,
+    ) -> None:
+        """Issue a "create index" instruction using the
+        current batch migration context.
+
+        .. seealso::
+
+            :meth:`.Operations.create_index`
+
+        """
+
+        op = cls(
+            index_name,
+            operations.impl.table_name,
+            columns,
+            schema=operations.impl.schema,
+            **kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("drop_index")
+@BatchOperations.register_operation("drop_index", "batch_drop_index")
+class DropIndexOp(MigrateOperation):
+    """Represent a drop index operation."""
+
+    def __init__(
+        self,
+        index_name: Union[quoted_name, str, conv],
+        table_name: Optional[str] = None,
+        *,
+        schema: Optional[str] = None,
+        if_exists: Optional[bool] = None,
+        _reverse: Optional[CreateIndexOp] = None,
+        **kw: Any,
+    ) -> None:
+        self.index_name = index_name
+        self.table_name = table_name
+        self.schema = schema
+        self.if_exists = if_exists
+        self._reverse = _reverse
+        self.kw = kw
+
+    def to_diff_tuple(self) -> Tuple[str, Index]:
+        return ("remove_index", self.to_index())
+
+    def reverse(self) -> CreateIndexOp:
+        return CreateIndexOp.from_index(self.to_index())
+
+    @classmethod
+    def from_index(cls, index: Index) -> DropIndexOp:
+        assert index.table is not None
+        return cls(
+            index.name,  # type: ignore[arg-type]
+            table_name=index.table.name,
+            schema=index.table.schema,
+            _reverse=CreateIndexOp.from_index(index),
+            unique=index.unique,
+            **index.kwargs,
+        )
+
+    def to_index(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Index:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        # need a dummy column name here since SQLAlchemy
+        # 0.7.6 and further raises on Index with no columns
+        return schema_obj.index(
+            self.index_name,
+            self.table_name,
+            self._reverse.columns if self._reverse else ["x"],
+            schema=self.schema,
+            **self.kw,
+        )
+
+    @classmethod
+    def drop_index(
+        cls,
+        operations: Operations,
+        index_name: str,
+        table_name: Optional[str] = None,
+        *,
+        schema: Optional[str] = None,
+        if_exists: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        r"""Issue a "drop index" instruction using the current
+        migration context.
+
+        e.g.::
+
+            drop_index("accounts")
+
+        :param index_name: name of the index.
+        :param table_name: name of the owning table.  Some
+         backends such as Microsoft SQL Server require this.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        :param if_exists: If True, adds IF EXISTS operator when
+         dropping the index.
+
+         .. versionadded:: 1.12.0
+
+        :param \**kw: Additional keyword arguments not mentioned above are
+         dialect specific, and passed in the form
+         ``<dialectname>_<argname>``.
+         See the documentation regarding an individual dialect at
+         :ref:`dialect_toplevel` for detail on documented arguments.
+
+        """
+        op = cls(
+            index_name,
+            table_name=table_name,
+            schema=schema,
+            if_exists=if_exists,
+            **kw,
+        )
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_drop_index(
+        cls, operations: BatchOperations, index_name: str, **kw: Any
+    ) -> None:
+        """Issue a "drop index" instruction using the
+        current batch migration context.
+
+        .. seealso::
+
+            :meth:`.Operations.drop_index`
+
+        """
+
+        op = cls(
+            index_name,
+            table_name=operations.impl.table_name,
+            schema=operations.impl.schema,
+            **kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_table")
+class CreateTableOp(MigrateOperation):
+    """Represent a create table operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        columns: Sequence[SchemaItem],
+        *,
+        schema: Optional[str] = None,
+        if_not_exists: Optional[bool] = None,
+        _namespace_metadata: Optional[MetaData] = None,
+        _constraints_included: bool = False,
+        **kw: Any,
+    ) -> None:
+        self.table_name = table_name
+        self.columns = columns
+        self.schema = schema
+        self.if_not_exists = if_not_exists
+        self.info = kw.pop("info", {})
+        self.comment = kw.pop("comment", None)
+        self.prefixes = kw.pop("prefixes", None)
+        self.kw = kw
+        self._namespace_metadata = _namespace_metadata
+        self._constraints_included = _constraints_included
+
+    def reverse(self) -> DropTableOp:
+        return DropTableOp.from_table(
+            self.to_table(), _namespace_metadata=self._namespace_metadata
+        )
+
+    def to_diff_tuple(self) -> Tuple[str, Table]:
+        return ("add_table", self.to_table())
+
+    @classmethod
+    def from_table(
+        cls, table: Table, *, _namespace_metadata: Optional[MetaData] = None
+    ) -> CreateTableOp:
+        if _namespace_metadata is None:
+            _namespace_metadata = table.metadata
+
+        return cls(
+            table.name,
+            list(table.c) + list(table.constraints),
+            schema=table.schema,
+            _namespace_metadata=_namespace_metadata,
+            # given a Table() object, this Table will contain full Index()
+            # and UniqueConstraint objects already constructed in response to
+            # each unique=True / index=True flag on a Column.  Carry this
+            # state along so that when we re-convert back into a Table, we
+            # skip unique=True/index=True so that these constraints are
+            # not doubled up. see #844 #848
+            _constraints_included=True,
+            comment=table.comment,
+            info=dict(table.info),
+            prefixes=list(table._prefixes),
+            **table.kwargs,
+        )
+
+    def to_table(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Table:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        return schema_obj.table(
+            self.table_name,
+            *self.columns,
+            schema=self.schema,
+            prefixes=list(self.prefixes) if self.prefixes else [],
+            comment=self.comment,
+            info=self.info.copy() if self.info else {},
+            _constraints_included=self._constraints_included,
+            **self.kw,
+        )
+
+    @classmethod
+    def create_table(
+        cls,
+        operations: Operations,
+        table_name: str,
+        *columns: SchemaItem,
+        if_not_exists: Optional[bool] = None,
+        **kw: Any,
+    ) -> Table:
+        r"""Issue a "create table" instruction using the current migration
+        context.
+
+        This directive receives an argument list similar to that of the
+        traditional :class:`sqlalchemy.schema.Table` construct, but without the
+        metadata::
+
+            from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+            from alembic import op
+
+            op.create_table(
+                "account",
+                Column("id", INTEGER, primary_key=True),
+                Column("name", VARCHAR(50), nullable=False),
+                Column("description", NVARCHAR(200)),
+                Column("timestamp", TIMESTAMP, server_default=func.now()),
+            )
+
+        Note that :meth:`.create_table` accepts
+        :class:`~sqlalchemy.schema.Column`
+        constructs directly from the SQLAlchemy library.  In particular,
+        default values to be created on the database side are
+        specified using the ``server_default`` parameter, and not
+        ``default`` which only specifies Python-side defaults::
+
+            from alembic import op
+            from sqlalchemy import Column, TIMESTAMP, func
+
+            # specify "DEFAULT NOW" along with the "timestamp" column
+            op.create_table(
+                "account",
+                Column("id", INTEGER, primary_key=True),
+                Column("timestamp", TIMESTAMP, server_default=func.now()),
+            )
+
+        The function also returns a newly created
+        :class:`~sqlalchemy.schema.Table` object, corresponding to the table
+        specification given, which is suitable for
+        immediate SQL operations, in particular
+        :meth:`.Operations.bulk_insert`::
+
+            from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+            from alembic import op
+
+            account_table = op.create_table(
+                "account",
+                Column("id", INTEGER, primary_key=True),
+                Column("name", VARCHAR(50), nullable=False),
+                Column("description", NVARCHAR(200)),
+                Column("timestamp", TIMESTAMP, server_default=func.now()),
+            )
+
+            op.bulk_insert(
+                account_table,
+                [
+                    {"name": "A1", "description": "account 1"},
+                    {"name": "A2", "description": "account 2"},
+                ],
+            )
+
+        :param table_name: Name of the table
+        :param \*columns: collection of :class:`~sqlalchemy.schema.Column`
+         objects within
+         the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
+         objects
+         and :class:`~.sqlalchemy.schema.Index` objects.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+        :param if_not_exists: If True, adds IF NOT EXISTS operator when
+         creating the new table.
+
+         .. versionadded:: 1.13.3
+        :param \**kw: Other keyword arguments are passed to the underlying
+         :class:`sqlalchemy.schema.Table` object created for the command.
+
+        :return: the :class:`~sqlalchemy.schema.Table` object corresponding
+         to the parameters given.
+
+        """
+        op = cls(table_name, columns, if_not_exists=if_not_exists, **kw)
+        return operations.invoke(op)
+
+
+@Operations.register_operation("drop_table")
+class DropTableOp(MigrateOperation):
+    """Represent a drop table operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        *,
+        schema: Optional[str] = None,
+        if_exists: Optional[bool] = None,
+        table_kw: Optional[MutableMapping[Any, Any]] = None,
+        _reverse: Optional[CreateTableOp] = None,
+    ) -> None:
+        self.table_name = table_name
+        self.schema = schema
+        self.if_exists = if_exists
+        self.table_kw = table_kw or {}
+        self.comment = self.table_kw.pop("comment", None)
+        self.info = self.table_kw.pop("info", None)
+        self.prefixes = self.table_kw.pop("prefixes", None)
+        self._reverse = _reverse
+
+    def to_diff_tuple(self) -> Tuple[str, Table]:
+        return ("remove_table", self.to_table())
+
+    def reverse(self) -> CreateTableOp:
+        return CreateTableOp.from_table(self.to_table())
+
+    @classmethod
+    def from_table(
+        cls, table: Table, *, _namespace_metadata: Optional[MetaData] = None
+    ) -> DropTableOp:
+        return cls(
+            table.name,
+            schema=table.schema,
+            table_kw={
+                "comment": table.comment,
+                "info": dict(table.info),
+                "prefixes": list(table._prefixes),
+                **table.kwargs,
+            },
+            _reverse=CreateTableOp.from_table(
+                table, _namespace_metadata=_namespace_metadata
+            ),
+        )
+
+    def to_table(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Table:
+        if self._reverse:
+            cols_and_constraints = self._reverse.columns
+        else:
+            cols_and_constraints = []
+
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        t = schema_obj.table(
+            self.table_name,
+            *cols_and_constraints,
+            comment=self.comment,
+            info=self.info.copy() if self.info else {},
+            prefixes=list(self.prefixes) if self.prefixes else [],
+            schema=self.schema,
+            _constraints_included=(
+                self._reverse._constraints_included if self._reverse else False
+            ),
+            **self.table_kw,
+        )
+        return t
+
+    @classmethod
+    def drop_table(
+        cls,
+        operations: Operations,
+        table_name: str,
+        *,
+        schema: Optional[str] = None,
+        if_exists: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        r"""Issue a "drop table" instruction using the current
+        migration context.
+
+
+        e.g.::
+
+            drop_table("accounts")
+
+        :param table_name: Name of the table
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+        :param if_exists: If True, adds IF EXISTS operator when
+         dropping the table.
+
+         .. versionadded:: 1.13.3
+        :param \**kw: Other keyword arguments are passed to the underlying
+         :class:`sqlalchemy.schema.Table` object created for the command.
+
+        """
+        op = cls(table_name, schema=schema, if_exists=if_exists, table_kw=kw)
+        operations.invoke(op)
+
+
+class AlterTableOp(MigrateOperation):
+    """Represent an alter table operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        self.table_name = table_name
+        self.schema = schema
+
+
+@Operations.register_operation("rename_table")
+class RenameTableOp(AlterTableOp):
+    """Represent a rename table operation."""
+
+    def __init__(
+        self,
+        old_table_name: str,
+        new_table_name: str,
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        super().__init__(old_table_name, schema=schema)
+        self.new_table_name = new_table_name
+
+    @classmethod
+    def rename_table(
+        cls,
+        operations: Operations,
+        old_table_name: str,
+        new_table_name: str,
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        """Emit an ALTER TABLE to rename a table.
+
+        :param old_table_name: old name.
+        :param new_table_name: new name.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+        op = cls(old_table_name, new_table_name, schema=schema)
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_table_comment")
+@BatchOperations.register_operation(
+    "create_table_comment", "batch_create_table_comment"
+)
+class CreateTableCommentOp(AlterTableOp):
+    """Represent a COMMENT ON `table` operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        comment: Optional[str],
+        *,
+        schema: Optional[str] = None,
+        existing_comment: Optional[str] = None,
+    ) -> None:
+        self.table_name = table_name
+        self.comment = comment
+        self.existing_comment = existing_comment
+        self.schema = schema
+
+    @classmethod
+    def create_table_comment(
+        cls,
+        operations: Operations,
+        table_name: str,
+        comment: Optional[str],
+        *,
+        existing_comment: Optional[str] = None,
+        schema: Optional[str] = None,
+    ) -> None:
+        """Emit a COMMENT ON operation to set the comment for a table.
+
+        :param table_name: string name of the target table.
+        :param comment: string value of the comment being registered against
+         the specified table.
+        :param existing_comment: String value of a comment
+         already registered on the specified table, used within autogenerate
+         so that the operation is reversible, but not required for direct
+         use.
+
+        .. seealso::
+
+            :meth:`.Operations.drop_table_comment`
+
+            :paramref:`.Operations.alter_column.comment`
+
+        """
+
+        op = cls(
+            table_name,
+            comment,
+            existing_comment=existing_comment,
+            schema=schema,
+        )
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_table_comment(
+        cls,
+        operations: BatchOperations,
+        comment: Optional[str],
+        *,
+        existing_comment: Optional[str] = None,
+    ) -> None:
+        """Emit a COMMENT ON operation to set the comment for a table
+        using the current batch migration context.
+
+        :param comment: string value of the comment being registered against
+         the specified table.
+        :param existing_comment: String value of a comment
+         already registered on the specified table, used within autogenerate
+         so that the operation is reversible, but not required for direct
+         use.
+
+        """
+
+        op = cls(
+            operations.impl.table_name,
+            comment,
+            existing_comment=existing_comment,
+            schema=operations.impl.schema,
+        )
+        return operations.invoke(op)
+
+    def reverse(self) -> Union[CreateTableCommentOp, DropTableCommentOp]:
+        """Reverses the COMMENT ON operation against a table."""
+        if self.existing_comment is None:
+            return DropTableCommentOp(
+                self.table_name,
+                existing_comment=self.comment,
+                schema=self.schema,
+            )
+        else:
+            return CreateTableCommentOp(
+                self.table_name,
+                self.existing_comment,
+                existing_comment=self.comment,
+                schema=self.schema,
+            )
+
+    def to_table(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Table:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        return schema_obj.table(
+            self.table_name, schema=self.schema, comment=self.comment
+        )
+
+    def to_diff_tuple(self) -> Tuple[Any, ...]:
+        return ("add_table_comment", self.to_table(), self.existing_comment)
+
+
+@Operations.register_operation("drop_table_comment")
+@BatchOperations.register_operation(
+    "drop_table_comment", "batch_drop_table_comment"
+)
+class DropTableCommentOp(AlterTableOp):
+    """Represent an operation to remove the comment from a table."""
+
+    def __init__(
+        self,
+        table_name: str,
+        *,
+        schema: Optional[str] = None,
+        existing_comment: Optional[str] = None,
+    ) -> None:
+        self.table_name = table_name
+        self.existing_comment = existing_comment
+        self.schema = schema
+
+    @classmethod
+    def drop_table_comment(
+        cls,
+        operations: Operations,
+        table_name: str,
+        *,
+        existing_comment: Optional[str] = None,
+        schema: Optional[str] = None,
+    ) -> None:
+        """Issue a "drop table comment" operation to
+        remove an existing comment set on a table.
+
+        :param table_name: string name of the target table.
+        :param existing_comment: An optional string value of a comment already
+         registered on the specified table.
+
+        .. seealso::
+
+            :meth:`.Operations.create_table_comment`
+
+            :paramref:`.Operations.alter_column.comment`
+
+        """
+
+        op = cls(table_name, existing_comment=existing_comment, schema=schema)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_drop_table_comment(
+        cls,
+        operations: BatchOperations,
+        *,
+        existing_comment: Optional[str] = None,
+    ) -> None:
+        """Issue a "drop table comment" operation to
+        remove an existing comment set on a table using the current
+        batch operations context.
+
+        :param existing_comment: An optional string value of a comment already
+         registered on the specified table.
+
+        """
+
+        op = cls(
+            operations.impl.table_name,
+            existing_comment=existing_comment,
+            schema=operations.impl.schema,
+        )
+        return operations.invoke(op)
+
+    def reverse(self) -> CreateTableCommentOp:
+        """Reverses the COMMENT ON operation against a table."""
+        return CreateTableCommentOp(
+            self.table_name, self.existing_comment, schema=self.schema
+        )
+
+    def to_table(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Table:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        return schema_obj.table(self.table_name, schema=self.schema)
+
+    def to_diff_tuple(self) -> Tuple[Any, ...]:
+        return ("remove_table_comment", self.to_table())
+
+
+@Operations.register_operation("alter_column")
+@BatchOperations.register_operation("alter_column", "batch_alter_column")
+class AlterColumnOp(AlterTableOp):
+    """Represent an alter column operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        column_name: str,
+        *,
+        schema: Optional[str] = None,
+        existing_type: Optional[Any] = None,
+        existing_server_default: Any = False,
+        existing_nullable: Optional[bool] = None,
+        existing_comment: Optional[str] = None,
+        modify_nullable: Optional[bool] = None,
+        modify_comment: Optional[Union[str, Literal[False]]] = False,
+        modify_server_default: Any = False,
+        modify_name: Optional[str] = None,
+        modify_type: Optional[Any] = None,
+        **kw: Any,
+    ) -> None:
+        super().__init__(table_name, schema=schema)
+        self.column_name = column_name
+        self.existing_type = existing_type
+        self.existing_server_default = existing_server_default
+        self.existing_nullable = existing_nullable
+        self.existing_comment = existing_comment
+        self.modify_nullable = modify_nullable
+        self.modify_comment = modify_comment
+        self.modify_server_default = modify_server_default
+        self.modify_name = modify_name
+        self.modify_type = modify_type
+        self.kw = kw
+
+    def to_diff_tuple(self) -> Any:
+        col_diff = []
+        schema, tname, cname = self.schema, self.table_name, self.column_name
+
+        if self.modify_type is not None:
+            col_diff.append(
+                (
+                    "modify_type",
+                    schema,
+                    tname,
+                    cname,
+                    {
+                        "existing_nullable": self.existing_nullable,
+                        "existing_server_default": (
+                            self.existing_server_default
+                        ),
+                        "existing_comment": self.existing_comment,
+                    },
+                    self.existing_type,
+                    self.modify_type,
+                )
+            )
+
+        if self.modify_nullable is not None:
+            col_diff.append(
+                (
+                    "modify_nullable",
+                    schema,
+                    tname,
+                    cname,
+                    {
+                        "existing_type": self.existing_type,
+                        "existing_server_default": (
+                            self.existing_server_default
+                        ),
+                        "existing_comment": self.existing_comment,
+                    },
+                    self.existing_nullable,
+                    self.modify_nullable,
+                )
+            )
+
+        if self.modify_server_default is not False:
+            col_diff.append(
+                (
+                    "modify_default",
+                    schema,
+                    tname,
+                    cname,
+                    {
+                        "existing_nullable": self.existing_nullable,
+                        "existing_type": self.existing_type,
+                        "existing_comment": self.existing_comment,
+                    },
+                    self.existing_server_default,
+                    self.modify_server_default,
+                )
+            )
+
+        if self.modify_comment is not False:
+            col_diff.append(
+                (
+                    "modify_comment",
+                    schema,
+                    tname,
+                    cname,
+                    {
+                        "existing_nullable": self.existing_nullable,
+                        "existing_type": self.existing_type,
+                        "existing_server_default": (
+                            self.existing_server_default
+                        ),
+                    },
+                    self.existing_comment,
+                    self.modify_comment,
+                )
+            )
+
+        return col_diff
+
+    def has_changes(self) -> bool:
+        hc1 = (
+            self.modify_nullable is not None
+            or self.modify_server_default is not False
+            or self.modify_type is not None
+            or self.modify_comment is not False
+        )
+        if hc1:
+            return True
+        for kw in self.kw:
+            if kw.startswith("modify_"):
+                return True
+        else:
+            return False
+
+    def reverse(self) -> AlterColumnOp:
+        kw = self.kw.copy()
+        kw["existing_type"] = self.existing_type
+        kw["existing_nullable"] = self.existing_nullable
+        kw["existing_server_default"] = self.existing_server_default
+        kw["existing_comment"] = self.existing_comment
+        if self.modify_type is not None:
+            kw["modify_type"] = self.modify_type
+        if self.modify_nullable is not None:
+            kw["modify_nullable"] = self.modify_nullable
+        if self.modify_server_default is not False:
+            kw["modify_server_default"] = self.modify_server_default
+        if self.modify_comment is not False:
+            kw["modify_comment"] = self.modify_comment
+
+        # TODO: make this a little simpler
+        all_keys = {
+            m.group(1)
+            for m in [re.match(r"^(?:existing_|modify_)(.+)$", k) for k in kw]
+            if m
+        }
+
+        for k in all_keys:
+            if "modify_%s" % k in kw:
+                swap = kw["existing_%s" % k]
+                kw["existing_%s" % k] = kw["modify_%s" % k]
+                kw["modify_%s" % k] = swap
+
+        return self.__class__(
+            self.table_name, self.column_name, schema=self.schema, **kw
+        )
+
+    @classmethod
+    def alter_column(
+        cls,
+        operations: Operations,
+        table_name: str,
+        column_name: str,
+        *,
+        nullable: Optional[bool] = None,
+        comment: Optional[Union[str, Literal[False]]] = False,
+        server_default: Any = False,
+        new_column_name: Optional[str] = None,
+        type_: Optional[Union[TypeEngine[Any], Type[TypeEngine[Any]]]] = None,
+        existing_type: Optional[
+            Union[TypeEngine[Any], Type[TypeEngine[Any]]]
+        ] = None,
+        existing_server_default: Optional[
+            Union[str, bool, Identity, Computed]
+        ] = False,
+        existing_nullable: Optional[bool] = None,
+        existing_comment: Optional[str] = None,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        r"""Issue an "alter column" instruction using the
+        current migration context.
+
+        Generally, only that aspect of the column which
+        is being changed, i.e. name, type, nullability,
+        default, needs to be specified.  Multiple changes
+        can also be specified at once and the backend should
+        "do the right thing", emitting each change either
+        separately or together as the backend allows.
+
+        MySQL has special requirements here, since MySQL
+        cannot ALTER a column without a full specification.
+        When producing MySQL-compatible migration files,
+        it is recommended that the ``existing_type``,
+        ``existing_server_default``, and ``existing_nullable``
+        parameters be present, if not being altered.
+
+        Type changes which are against the SQLAlchemy
+        "schema" types :class:`~sqlalchemy.types.Boolean`
+        and  :class:`~sqlalchemy.types.Enum` may also
+        add or drop constraints which accompany those
+        types on backends that don't support them natively.
+        The ``existing_type`` argument is
+        used in this case to identify and remove a previous
+        constraint that was bound to the type object.
+
+        :param table_name: string name of the target table.
+        :param column_name: string name of the target column,
+         as it exists before the operation begins.
+        :param nullable: Optional; specify ``True`` or ``False``
+         to alter the column's nullability.
+        :param server_default: Optional; specify a string
+         SQL expression, :func:`~sqlalchemy.sql.expression.text`,
+         or :class:`~sqlalchemy.schema.DefaultClause` to indicate
+         an alteration to the column's default value.
+         Set to ``None`` to have the default removed.
+        :param comment: optional string text of a new comment to add to the
+         column.
+        :param new_column_name: Optional; specify a string name here to
+         indicate the new name within a column rename operation.
+        :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine`
+         type object to specify a change to the column's type.
+         For SQLAlchemy types that also indicate a constraint (i.e.
+         :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
+         the constraint is also generated.
+        :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
+         currently understood by the MySQL dialect.
+        :param existing_type: Optional; a
+         :class:`~sqlalchemy.types.TypeEngine`
+         type object to specify the previous type.   This
+         is required for all MySQL column alter operations that
+         don't otherwise specify a new type, as well as for
+         when nullability is being changed on a SQL Server
+         column.  It is also used if the type is a so-called
+         SQLAlchemy "schema" type which may define a constraint (i.e.
+         :class:`~sqlalchemy.types.Boolean`,
+         :class:`~sqlalchemy.types.Enum`),
+         so that the constraint can be dropped.
+        :param existing_server_default: Optional; The existing
+         default value of the column.   Required on MySQL if
+         an existing default is not being changed; else MySQL
+         removes the default.
+        :param existing_nullable: Optional; the existing nullability
+         of the column.  Required on MySQL if the existing nullability
+         is not being changed; else MySQL sets this to NULL.
+        :param existing_autoincrement: Optional; the existing autoincrement
+         of the column.  Used for MySQL's system of altering a column
+         that specifies ``AUTO_INCREMENT``.
+        :param existing_comment: string text of the existing comment on the
+         column to be maintained.  Required on MySQL if the existing comment
+         on the column is not being changed.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+        :param postgresql_using: String argument which will indicate a
+         SQL expression to render within the Postgresql-specific USING clause
+         within ALTER COLUMN.    This string is taken directly as raw SQL which
+         must explicitly include any necessary quoting or escaping of tokens
+         within the expression.
+
+        """
+
+        alt = cls(
+            table_name,
+            column_name,
+            schema=schema,
+            existing_type=existing_type,
+            existing_server_default=existing_server_default,
+            existing_nullable=existing_nullable,
+            existing_comment=existing_comment,
+            modify_name=new_column_name,
+            modify_type=type_,
+            modify_server_default=server_default,
+            modify_nullable=nullable,
+            modify_comment=comment,
+            **kw,
+        )
+
+        return operations.invoke(alt)
+
+    @classmethod
+    def batch_alter_column(
+        cls,
+        operations: BatchOperations,
+        column_name: str,
+        *,
+        nullable: Optional[bool] = None,
+        comment: Optional[Union[str, Literal[False]]] = False,
+        server_default: Any = False,
+        new_column_name: Optional[str] = None,
+        type_: Optional[Union[TypeEngine[Any], Type[TypeEngine[Any]]]] = None,
+        existing_type: Optional[
+            Union[TypeEngine[Any], Type[TypeEngine[Any]]]
+        ] = None,
+        existing_server_default: Optional[
+            Union[str, bool, Identity, Computed]
+        ] = False,
+        existing_nullable: Optional[bool] = None,
+        existing_comment: Optional[str] = None,
+        insert_before: Optional[str] = None,
+        insert_after: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        """Issue an "alter column" instruction using the current
+        batch migration context.
+
+        Parameters are the same as that of :meth:`.Operations.alter_column`,
+        as well as the following option(s):
+
+        :param insert_before: String name of an existing column which this
+         column should be placed before, when creating the new table.
+
+        :param insert_after: String name of an existing column which this
+         column should be placed after, when creating the new table.  If
+         both :paramref:`.BatchOperations.alter_column.insert_before`
+         and :paramref:`.BatchOperations.alter_column.insert_after` are
+         omitted, the column is inserted after the last existing column
+         in the table.
+
+        .. seealso::
+
+            :meth:`.Operations.alter_column`
+
+
+        """
+        alt = cls(
+            operations.impl.table_name,
+            column_name,
+            schema=operations.impl.schema,
+            existing_type=existing_type,
+            existing_server_default=existing_server_default,
+            existing_nullable=existing_nullable,
+            existing_comment=existing_comment,
+            modify_name=new_column_name,
+            modify_type=type_,
+            modify_server_default=server_default,
+            modify_nullable=nullable,
+            modify_comment=comment,
+            insert_before=insert_before,
+            insert_after=insert_after,
+            **kw,
+        )
+
+        return operations.invoke(alt)
+
+
+@Operations.register_operation("add_column")
+@BatchOperations.register_operation("add_column", "batch_add_column")
+class AddColumnOp(AlterTableOp):
+    """Represent an add column operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        column: Column[Any],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        super().__init__(table_name, schema=schema)
+        self.column = column
+        self.kw = kw
+
+    def reverse(self) -> DropColumnOp:
+        return DropColumnOp.from_column_and_tablename(
+            self.schema, self.table_name, self.column
+        )
+
+    def to_diff_tuple(
+        self,
+    ) -> Tuple[str, Optional[str], str, Column[Any]]:
+        return ("add_column", self.schema, self.table_name, self.column)
+
+    def to_column(self) -> Column[Any]:
+        return self.column
+
+    @classmethod
+    def from_column(cls, col: Column[Any]) -> AddColumnOp:
+        return cls(col.table.name, col, schema=col.table.schema)
+
+    @classmethod
+    def from_column_and_tablename(
+        cls,
+        schema: Optional[str],
+        tname: str,
+        col: Column[Any],
+    ) -> AddColumnOp:
+        return cls(tname, col, schema=schema)
+
+    @classmethod
+    def add_column(
+        cls,
+        operations: Operations,
+        table_name: str,
+        column: Column[Any],
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        """Issue an "add column" instruction using the current
+        migration context.
+
+        e.g.::
+
+            from alembic import op
+            from sqlalchemy import Column, String
+
+            op.add_column("organization", Column("name", String()))
+
+        The :meth:`.Operations.add_column` method typically corresponds
+        to the SQL command "ALTER TABLE... ADD COLUMN".    Within the scope
+        of this command, the column's name, datatype, nullability,
+        and optional server-generated defaults may be indicated.
+
+        .. note::
+
+            With the exception of NOT NULL constraints or single-column FOREIGN
+            KEY constraints, other kinds of constraints such as PRIMARY KEY,
+            UNIQUE or CHECK constraints **cannot** be generated using this
+            method; for these constraints, refer to operations such as
+            :meth:`.Operations.create_primary_key` and
+            :meth:`.Operations.create_check_constraint`. In particular, the
+            following :class:`~sqlalchemy.schema.Column` parameters are
+            **ignored**:
+
+            * :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases
+              typically do not support an ALTER operation that can add
+              individual columns one at a time to an existing primary key
+              constraint, therefore it's less ambiguous to use the
+              :meth:`.Operations.create_primary_key` method, which assumes no
+              existing primary key constraint is present.
+            * :paramref:`~sqlalchemy.schema.Column.unique` - use the
+              :meth:`.Operations.create_unique_constraint` method
+            * :paramref:`~sqlalchemy.schema.Column.index` - use the
+              :meth:`.Operations.create_index` method
+
+
+        The provided :class:`~sqlalchemy.schema.Column` object may include a
+        :class:`~sqlalchemy.schema.ForeignKey` constraint directive,
+        referencing a remote table name. For this specific type of constraint,
+        Alembic will automatically emit a second ALTER statement in order to
+        add the single-column FOREIGN KEY constraint separately::
+
+            from alembic import op
+            from sqlalchemy import Column, INTEGER, ForeignKey
+
+            op.add_column(
+                "organization",
+                Column("account_id", INTEGER, ForeignKey("accounts.id")),
+            )
+
+        The column argument passed to :meth:`.Operations.add_column` is a
+        :class:`~sqlalchemy.schema.Column` construct, used in the same way it's
+        used in SQLAlchemy. In particular, values or functions to be indicated
+        as producing the column's default value on the database side are
+        specified using the ``server_default`` parameter, and not ``default``
+        which only specifies Python-side defaults::
+
+            from alembic import op
+            from sqlalchemy import Column, TIMESTAMP, func
+
+            # specify "DEFAULT NOW" along with the column add
+            op.add_column(
+                "account",
+                Column("timestamp", TIMESTAMP, server_default=func.now()),
+            )
+
+        :param table_name: String name of the parent table.
+        :param column: a :class:`sqlalchemy.schema.Column` object
+         representing the new column.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+
+        op = cls(table_name, column, schema=schema)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_add_column(
+        cls,
+        operations: BatchOperations,
+        column: Column[Any],
+        *,
+        insert_before: Optional[str] = None,
+        insert_after: Optional[str] = None,
+    ) -> None:
+        """Issue an "add column" instruction using the current
+        batch migration context.
+
+        .. seealso::
+
+            :meth:`.Operations.add_column`
+
+        """
+
+        kw = {}
+        if insert_before:
+            kw["insert_before"] = insert_before
+        if insert_after:
+            kw["insert_after"] = insert_after
+
+        op = cls(
+            operations.impl.table_name,
+            column,
+            schema=operations.impl.schema,
+            **kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("drop_column")
+@BatchOperations.register_operation("drop_column", "batch_drop_column")
+class DropColumnOp(AlterTableOp):
+    """Represent a drop column operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        column_name: str,
+        *,
+        schema: Optional[str] = None,
+        _reverse: Optional[AddColumnOp] = None,
+        **kw: Any,
+    ) -> None:
+        super().__init__(table_name, schema=schema)
+        self.column_name = column_name
+        self.kw = kw
+        self._reverse = _reverse
+
+    def to_diff_tuple(
+        self,
+    ) -> Tuple[str, Optional[str], str, Column[Any]]:
+        return (
+            "remove_column",
+            self.schema,
+            self.table_name,
+            self.to_column(),
+        )
+
+    def reverse(self) -> AddColumnOp:
+        if self._reverse is None:
+            raise ValueError(
+                "operation is not reversible; "
+                "original column is not present"
+            )
+
+        return AddColumnOp.from_column_and_tablename(
+            self.schema, self.table_name, self._reverse.column
+        )
+
+    @classmethod
+    def from_column_and_tablename(
+        cls,
+        schema: Optional[str],
+        tname: str,
+        col: Column[Any],
+    ) -> DropColumnOp:
+        return cls(
+            tname,
+            col.name,
+            schema=schema,
+            _reverse=AddColumnOp.from_column_and_tablename(schema, tname, col),
+        )
+
+    def to_column(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Column[Any]:
+        if self._reverse is not None:
+            return self._reverse.column
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        return schema_obj.column(self.column_name, NULLTYPE)
+
+    @classmethod
+    def drop_column(
+        cls,
+        operations: Operations,
+        table_name: str,
+        column_name: str,
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        """Issue a "drop column" instruction using the current
+        migration context.
+
+        e.g.::
+
+            drop_column("organization", "account_id")
+
+        :param table_name: name of table
+        :param column_name: name of column
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+        :param mssql_drop_check: Optional boolean.  When ``True``, on
+         Microsoft SQL Server only, first
+         drop the CHECK constraint on the column using a
+         SQL-script-compatible
+         block that selects into a @variable from sys.check_constraints,
+         then exec's a separate DROP CONSTRAINT for that constraint.
+        :param mssql_drop_default: Optional boolean.  When ``True``, on
+         Microsoft SQL Server only, first
+         drop the DEFAULT constraint on the column using a
+         SQL-script-compatible
+         block that selects into a @variable from sys.default_constraints,
+         then exec's a separate DROP CONSTRAINT for that default.
+        :param mssql_drop_foreign_key: Optional boolean.  When ``True``, on
+         Microsoft SQL Server only, first
+         drop a single FOREIGN KEY constraint on the column using a
+         SQL-script-compatible
+         block that selects into a @variable from
+         sys.foreign_keys/sys.foreign_key_columns,
+         then exec's a separate DROP CONSTRAINT for that default.  Only
+         works if the column has exactly one FK constraint which refers to
+         it, at the moment.
+
+        """
+
+        op = cls(table_name, column_name, schema=schema, **kw)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_drop_column(
+        cls, operations: BatchOperations, column_name: str, **kw: Any
+    ) -> None:
+        """Issue a "drop column" instruction using the current
+        batch migration context.
+
+        .. seealso::
+
+            :meth:`.Operations.drop_column`
+
+        """
+        op = cls(
+            operations.impl.table_name,
+            column_name,
+            schema=operations.impl.schema,
+            **kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("bulk_insert")
+class BulkInsertOp(MigrateOperation):
+    """Represent a bulk insert operation."""
+
+    def __init__(
+        self,
+        table: Union[Table, TableClause],
+        rows: List[Dict[str, Any]],
+        *,
+        multiinsert: bool = True,
+    ) -> None:
+        self.table = table
+        self.rows = rows
+        self.multiinsert = multiinsert
+
+    @classmethod
+    def bulk_insert(
+        cls,
+        operations: Operations,
+        table: Union[Table, TableClause],
+        rows: List[Dict[str, Any]],
+        *,
+        multiinsert: bool = True,
+    ) -> None:
+        """Issue a "bulk insert" operation using the current
+        migration context.
+
+        This provides a means of representing an INSERT of multiple rows
+        which works equally well in the context of executing on a live
+        connection as well as that of generating a SQL script.   In the
+        case of a SQL script, the values are rendered inline into the
+        statement.
+
+        e.g.::
+
+            from alembic import op
+            from datetime import date
+            from sqlalchemy.sql import table, column
+            from sqlalchemy import String, Integer, Date
+
+            # Create an ad-hoc table to use for the insert statement.
+            accounts_table = table(
+                "account",
+                column("id", Integer),
+                column("name", String),
+                column("create_date", Date),
+            )
+
+            op.bulk_insert(
+                accounts_table,
+                [
+                    {
+                        "id": 1,
+                        "name": "John Smith",
+                        "create_date": date(2010, 10, 5),
+                    },
+                    {
+                        "id": 2,
+                        "name": "Ed Williams",
+                        "create_date": date(2007, 5, 27),
+                    },
+                    {
+                        "id": 3,
+                        "name": "Wendy Jones",
+                        "create_date": date(2008, 8, 15),
+                    },
+                ],
+            )
+
+        When using --sql mode, some datatypes may not render inline
+        automatically, such as dates and other special types.   When this
+        issue is present, :meth:`.Operations.inline_literal` may be used::
+
+            op.bulk_insert(
+                accounts_table,
+                [
+                    {
+                        "id": 1,
+                        "name": "John Smith",
+                        "create_date": op.inline_literal("2010-10-05"),
+                    },
+                    {
+                        "id": 2,
+                        "name": "Ed Williams",
+                        "create_date": op.inline_literal("2007-05-27"),
+                    },
+                    {
+                        "id": 3,
+                        "name": "Wendy Jones",
+                        "create_date": op.inline_literal("2008-08-15"),
+                    },
+                ],
+                multiinsert=False,
+            )
+
+        When using :meth:`.Operations.inline_literal` in conjunction with
+        :meth:`.Operations.bulk_insert`, in order for the statement to work
+        in "online" (e.g. non --sql) mode, the
+        :paramref:`~.Operations.bulk_insert.multiinsert`
+        flag should be set to ``False``, which will have the effect of
+        individual INSERT statements being emitted to the database, each
+        with a distinct VALUES clause, so that the "inline" values can
+        still be rendered, rather than attempting to pass the values
+        as bound parameters.
+
+        :param table: a table object which represents the target of the INSERT.
+
+        :param rows: a list of dictionaries indicating rows.
+
+        :param multiinsert: when at its default of True and --sql mode is not
+           enabled, the INSERT statement will be executed using
+           "executemany()" style, where all elements in the list of
+           dictionaries are passed as bound parameters in a single
+           list.   Setting this to False results in individual INSERT
+           statements being emitted per parameter set, and is needed
+           in those cases where non-literal values are present in the
+           parameter sets.
+
+        """
+
+        op = cls(table, rows, multiinsert=multiinsert)
+        operations.invoke(op)
+
+
+@Operations.register_operation("execute")
+@BatchOperations.register_operation("execute", "batch_execute")
+class ExecuteSQLOp(MigrateOperation):
+    """Represent an execute SQL operation."""
+
+    def __init__(
+        self,
+        sqltext: Union[Executable, str],
+        *,
+        execution_options: Optional[dict[str, Any]] = None,
+    ) -> None:
+        self.sqltext = sqltext
+        self.execution_options = execution_options
+
+    @classmethod
+    def execute(
+        cls,
+        operations: Operations,
+        sqltext: Union[Executable, str],
+        *,
+        execution_options: Optional[dict[str, Any]] = None,
+    ) -> None:
+        r"""Execute the given SQL using the current migration context.
+
+        The given SQL can be a plain string, e.g.::
+
+            op.execute("INSERT INTO table (foo) VALUES ('some value')")
+
+        Or it can be any kind of Core SQL Expression construct, such as
+        below where we use an update construct::
+
+            from sqlalchemy.sql import table, column
+            from sqlalchemy import String
+            from alembic import op
+
+            account = table("account", column("name", String))
+            op.execute(
+                account.update()
+                .where(account.c.name == op.inline_literal("account 1"))
+                .values({"name": op.inline_literal("account 2")})
+            )
+
+        Above, we made use of the SQLAlchemy
+        :func:`sqlalchemy.sql.expression.table` and
+        :func:`sqlalchemy.sql.expression.column` constructs to make a brief,
+        ad-hoc table construct just for our UPDATE statement.  A full
+        :class:`~sqlalchemy.schema.Table` construct of course works perfectly
+        fine as well, though note it's a recommended practice to at least
+        ensure the definition of a table is self-contained within the migration
+        script, rather than imported from a module that may break compatibility
+        with older migrations.
+
+        In a SQL script context, the statement is emitted directly to the
+        output stream.   There is *no* return result, however, as this
+        function is oriented towards generating a change script
+        that can run in "offline" mode.     Additionally, parameterized
+        statements are discouraged here, as they *will not work* in offline
+        mode.  Above, we use :meth:`.inline_literal` where parameters are
+        to be used.
+
+        For full interaction with a connected database where parameters can
+        also be used normally, use the "bind" available from the context::
+
+            from alembic import op
+
+            connection = op.get_bind()
+
+            connection.execute(
+                account.update()
+                .where(account.c.name == "account 1")
+                .values({"name": "account 2"})
+            )
+
+        Additionally, when passing the statement as a plain string, it is first
+        coerced into a :func:`sqlalchemy.sql.expression.text` construct
+        before being passed along.  In the less likely case that the
+        literal SQL string contains a colon, it must be escaped with a
+        backslash, as::
+
+           op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')")
+
+
+        :param sqltext: Any legal SQLAlchemy expression, including:
+
+        * a string
+        * a :func:`sqlalchemy.sql.expression.text` construct.
+        * a :func:`sqlalchemy.sql.expression.insert` construct.
+        * a :func:`sqlalchemy.sql.expression.update` construct.
+        * a :func:`sqlalchemy.sql.expression.delete` construct.
+        * Any "executable" described in SQLAlchemy Core documentation,
+          noting that no result set is returned.
+
+        .. note::  when passing a plain string, the statement is coerced into
+           a :func:`sqlalchemy.sql.expression.text` construct. This construct
+           considers symbols with colons, e.g. ``:foo`` to be bound parameters.
+           To avoid this, ensure that colon symbols are escaped, e.g.
+           ``\:foo``.
+
+        :param execution_options: Optional dictionary of
+         execution options, will be passed to
+         :meth:`sqlalchemy.engine.Connection.execution_options`.
+        """
+        op = cls(sqltext, execution_options=execution_options)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_execute(
+        cls,
+        operations: Operations,
+        sqltext: Union[Executable, str],
+        *,
+        execution_options: Optional[dict[str, Any]] = None,
+    ) -> None:
+        """Execute the given SQL using the current migration context.
+
+        .. seealso::
+
+            :meth:`.Operations.execute`
+
+        """
+        return cls.execute(
+            operations, sqltext, execution_options=execution_options
+        )
+
+    def to_diff_tuple(self) -> Tuple[str, Union[Executable, str]]:
+        return ("execute", self.sqltext)
+
+
+class OpContainer(MigrateOperation):
+    """Represent a sequence of operations operation."""
+
+    def __init__(self, ops: Sequence[MigrateOperation] = ()) -> None:
+        self.ops = list(ops)
+
+    def is_empty(self) -> bool:
+        return not self.ops
+
+    def as_diffs(self) -> Any:
+        return list(OpContainer._ops_as_diffs(self))
+
+    @classmethod
+    def _ops_as_diffs(
+        cls, migrations: OpContainer
+    ) -> Iterator[Tuple[Any, ...]]:
+        for op in migrations.ops:
+            if hasattr(op, "ops"):
+                yield from cls._ops_as_diffs(cast("OpContainer", op))
+            else:
+                yield op.to_diff_tuple()
+
+
+class ModifyTableOps(OpContainer):
+    """Contains a sequence of operations that all apply to a single Table."""
+
+    def __init__(
+        self,
+        table_name: str,
+        ops: Sequence[MigrateOperation],
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        super().__init__(ops)
+        self.table_name = table_name
+        self.schema = schema
+
+    def reverse(self) -> ModifyTableOps:
+        return ModifyTableOps(
+            self.table_name,
+            ops=list(reversed([op.reverse() for op in self.ops])),
+            schema=self.schema,
+        )
+
+
+class UpgradeOps(OpContainer):
+    """contains a sequence of operations that would apply to the
+    'upgrade' stream of a script.
+
+    .. seealso::
+
+        :ref:`customizing_revision`
+
+    """
+
+    def __init__(
+        self,
+        ops: Sequence[MigrateOperation] = (),
+        upgrade_token: str = "upgrades",
+    ) -> None:
+        super().__init__(ops=ops)
+        self.upgrade_token = upgrade_token
+
+    def reverse_into(self, downgrade_ops: DowngradeOps) -> DowngradeOps:
+        downgrade_ops.ops[:] = list(
+            reversed([op.reverse() for op in self.ops])
+        )
+        return downgrade_ops
+
+    def reverse(self) -> DowngradeOps:
+        return self.reverse_into(DowngradeOps(ops=[]))
+
+
+class DowngradeOps(OpContainer):
+    """contains a sequence of operations that would apply to the
+    'downgrade' stream of a script.
+
+    .. seealso::
+
+        :ref:`customizing_revision`
+
+    """
+
+    def __init__(
+        self,
+        ops: Sequence[MigrateOperation] = (),
+        downgrade_token: str = "downgrades",
+    ) -> None:
+        super().__init__(ops=ops)
+        self.downgrade_token = downgrade_token
+
+    def reverse(self) -> UpgradeOps:
+        return UpgradeOps(
+            ops=list(reversed([op.reverse() for op in self.ops]))
+        )
+
+
+class MigrationScript(MigrateOperation):
+    """represents a migration script.
+
+    E.g. when autogenerate encounters this object, this corresponds to the
+    production of an actual script file.
+
+    A normal :class:`.MigrationScript` object would contain a single
+    :class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive.
+    These are accessible via the ``.upgrade_ops`` and ``.downgrade_ops``
+    attributes.
+
+    In the case of an autogenerate operation that runs multiple times,
+    such as the multiple database example in the "multidb" template,
+    the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are disabled,
+    and instead these objects should be accessed via the ``.upgrade_ops_list``
+    and ``.downgrade_ops_list`` list-based attributes.  These latter
+    attributes are always available at the very least as single-element lists.
+
+    .. seealso::
+
+        :ref:`customizing_revision`
+
+    """
+
+    _needs_render: Optional[bool]
+    _upgrade_ops: List[UpgradeOps]
+    _downgrade_ops: List[DowngradeOps]
+
+    def __init__(
+        self,
+        rev_id: Optional[str],
+        upgrade_ops: UpgradeOps,
+        downgrade_ops: DowngradeOps,
+        *,
+        message: Optional[str] = None,
+        imports: Set[str] = set(),
+        head: Optional[str] = None,
+        splice: Optional[bool] = None,
+        branch_label: Optional[_RevIdType] = None,
+        version_path: Optional[str] = None,
+        depends_on: Optional[_RevIdType] = None,
+    ) -> None:
+        self.rev_id = rev_id
+        self.message = message
+        self.imports = imports
+        self.head = head
+        self.splice = splice
+        self.branch_label = branch_label
+        self.version_path = version_path
+        self.depends_on = depends_on
+        self.upgrade_ops = upgrade_ops
+        self.downgrade_ops = downgrade_ops
+
+    @property
+    def upgrade_ops(self) -> Optional[UpgradeOps]:
+        """An instance of :class:`.UpgradeOps`.
+
+        .. seealso::
+
+            :attr:`.MigrationScript.upgrade_ops_list`
+        """
+        if len(self._upgrade_ops) > 1:
+            raise ValueError(
+                "This MigrationScript instance has a multiple-entry "
+                "list for UpgradeOps; please use the "
+                "upgrade_ops_list attribute."
+            )
+        elif not self._upgrade_ops:
+            return None
+        else:
+            return self._upgrade_ops[0]
+
+    @upgrade_ops.setter
+    def upgrade_ops(
+        self, upgrade_ops: Union[UpgradeOps, List[UpgradeOps]]
+    ) -> None:
+        self._upgrade_ops = util.to_list(upgrade_ops)
+        for elem in self._upgrade_ops:
+            assert isinstance(elem, UpgradeOps)
+
+    @property
+    def downgrade_ops(self) -> Optional[DowngradeOps]:
+        """An instance of :class:`.DowngradeOps`.
+
+        .. seealso::
+
+            :attr:`.MigrationScript.downgrade_ops_list`
+        """
+        if len(self._downgrade_ops) > 1:
+            raise ValueError(
+                "This MigrationScript instance has a multiple-entry "
+                "list for DowngradeOps; please use the "
+                "downgrade_ops_list attribute."
+            )
+        elif not self._downgrade_ops:
+            return None
+        else:
+            return self._downgrade_ops[0]
+
+    @downgrade_ops.setter
+    def downgrade_ops(
+        self, downgrade_ops: Union[DowngradeOps, List[DowngradeOps]]
+    ) -> None:
+        self._downgrade_ops = util.to_list(downgrade_ops)
+        for elem in self._downgrade_ops:
+            assert isinstance(elem, DowngradeOps)
+
+    @property
+    def upgrade_ops_list(self) -> List[UpgradeOps]:
+        """A list of :class:`.UpgradeOps` instances.
+
+        This is used in place of the :attr:`.MigrationScript.upgrade_ops`
+        attribute when dealing with a revision operation that does
+        multiple autogenerate passes.
+
+        """
+        return self._upgrade_ops
+
+    @property
+    def downgrade_ops_list(self) -> List[DowngradeOps]:
+        """A list of :class:`.DowngradeOps` instances.
+
+        This is used in place of the :attr:`.MigrationScript.downgrade_ops`
+        attribute when dealing with a revision operation that does
+        multiple autogenerate passes.
+
+        """
+        return self._downgrade_ops
diff --git a/.venv/lib/python3.12/site-packages/alembic/operations/schemaobj.py b/.venv/lib/python3.12/site-packages/alembic/operations/schemaobj.py
new file mode 100644
index 00000000..59c1002f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/operations/schemaobj.py
@@ -0,0 +1,290 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+from typing import Any
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import Sequence
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import schema as sa_schema
+from sqlalchemy.sql.schema import Column
+from sqlalchemy.sql.schema import Constraint
+from sqlalchemy.sql.schema import Index
+from sqlalchemy.types import Integer
+from sqlalchemy.types import NULLTYPE
+
+from .. import util
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.schema import CheckConstraint
+    from sqlalchemy.sql.schema import ForeignKey
+    from sqlalchemy.sql.schema import ForeignKeyConstraint
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import PrimaryKeyConstraint
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.schema import UniqueConstraint
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from ..runtime.migration import MigrationContext
+
+
+class SchemaObjects:
+    def __init__(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> None:
+        self.migration_context = migration_context
+
+    def primary_key_constraint(
+        self,
+        name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        cols: Sequence[str],
+        schema: Optional[str] = None,
+        **dialect_kw,
+    ) -> PrimaryKeyConstraint:
+        m = self.metadata()
+        columns = [sa_schema.Column(n, NULLTYPE) for n in cols]
+        t = sa_schema.Table(table_name, m, *columns, schema=schema)
+        # SQLAlchemy primary key constraint name arg is wrongly typed on
+        # the SQLAlchemy side through 2.0.5 at least
+        p = sa_schema.PrimaryKeyConstraint(
+            *[t.c[n] for n in cols], name=name, **dialect_kw  # type: ignore
+        )
+        return p
+
+    def foreign_key_constraint(
+        self,
+        name: Optional[sqla_compat._ConstraintNameDefined],
+        source: str,
+        referent: str,
+        local_cols: List[str],
+        remote_cols: List[str],
+        onupdate: Optional[str] = None,
+        ondelete: Optional[str] = None,
+        deferrable: Optional[bool] = None,
+        source_schema: Optional[str] = None,
+        referent_schema: Optional[str] = None,
+        initially: Optional[str] = None,
+        match: Optional[str] = None,
+        **dialect_kw,
+    ) -> ForeignKeyConstraint:
+        m = self.metadata()
+        if source == referent and source_schema == referent_schema:
+            t1_cols = local_cols + remote_cols
+        else:
+            t1_cols = local_cols
+            sa_schema.Table(
+                referent,
+                m,
+                *[sa_schema.Column(n, NULLTYPE) for n in remote_cols],
+                schema=referent_schema,
+            )
+
+        t1 = sa_schema.Table(
+            source,
+            m,
+            *[
+                sa_schema.Column(n, NULLTYPE)
+                for n in util.unique_list(t1_cols)
+            ],
+            schema=source_schema,
+        )
+
+        tname = (
+            "%s.%s" % (referent_schema, referent)
+            if referent_schema
+            else referent
+        )
+
+        dialect_kw["match"] = match
+
+        f = sa_schema.ForeignKeyConstraint(
+            local_cols,
+            ["%s.%s" % (tname, n) for n in remote_cols],
+            name=name,
+            onupdate=onupdate,
+            ondelete=ondelete,
+            deferrable=deferrable,
+            initially=initially,
+            **dialect_kw,
+        )
+        t1.append_constraint(f)
+
+        return f
+
+    def unique_constraint(
+        self,
+        name: Optional[sqla_compat._ConstraintNameDefined],
+        source: str,
+        local_cols: Sequence[str],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> UniqueConstraint:
+        t = sa_schema.Table(
+            source,
+            self.metadata(),
+            *[sa_schema.Column(n, NULLTYPE) for n in local_cols],
+            schema=schema,
+        )
+        kw["name"] = name
+        uq = sa_schema.UniqueConstraint(*[t.c[n] for n in local_cols], **kw)
+        # TODO: need event tests to ensure the event
+        # is fired off here
+        t.append_constraint(uq)
+        return uq
+
+    def check_constraint(
+        self,
+        name: Optional[sqla_compat._ConstraintNameDefined],
+        source: str,
+        condition: Union[str, TextClause, ColumnElement[Any]],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> Union[CheckConstraint]:
+        t = sa_schema.Table(
+            source,
+            self.metadata(),
+            sa_schema.Column("x", Integer),
+            schema=schema,
+        )
+        ck = sa_schema.CheckConstraint(condition, name=name, **kw)
+        t.append_constraint(ck)
+        return ck
+
+    def generic_constraint(
+        self,
+        name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        type_: Optional[str],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> Any:
+        t = self.table(table_name, schema=schema)
+        types: Dict[Optional[str], Any] = {
+            "foreignkey": lambda name: sa_schema.ForeignKeyConstraint(
+                [], [], name=name
+            ),
+            "primary": sa_schema.PrimaryKeyConstraint,
+            "unique": sa_schema.UniqueConstraint,
+            "check": lambda name: sa_schema.CheckConstraint("", name=name),
+            None: sa_schema.Constraint,
+        }
+        try:
+            const = types[type_]
+        except KeyError as ke:
+            raise TypeError(
+                "'type' can be one of %s"
+                % ", ".join(sorted(repr(x) for x in types))
+            ) from ke
+        else:
+            const = const(name=name)
+            t.append_constraint(const)
+            return const
+
+    def metadata(self) -> MetaData:
+        kw = {}
+        if (
+            self.migration_context is not None
+            and "target_metadata" in self.migration_context.opts
+        ):
+            mt = self.migration_context.opts["target_metadata"]
+            if hasattr(mt, "naming_convention"):
+                kw["naming_convention"] = mt.naming_convention
+        return sa_schema.MetaData(**kw)
+
+    def table(self, name: str, *columns, **kw) -> Table:
+        m = self.metadata()
+
+        cols = [
+            sqla_compat._copy(c) if c.table is not None else c
+            for c in columns
+            if isinstance(c, Column)
+        ]
+        # these flags have already added their UniqueConstraint /
+        # Index objects to the table, so flip them off here.
+        # SQLAlchemy tometadata() avoids this instead by preserving the
+        # flags and skipping the constraints that have _type_bound on them,
+        # but for a migration we'd rather list out the constraints
+        # explicitly.
+        _constraints_included = kw.pop("_constraints_included", False)
+        if _constraints_included:
+            for c in cols:
+                c.unique = c.index = False
+
+        t = sa_schema.Table(name, m, *cols, **kw)
+
+        constraints = [
+            (
+                sqla_compat._copy(elem, target_table=t)
+                if getattr(elem, "parent", None) is not t
+                and getattr(elem, "parent", None) is not None
+                else elem
+            )
+            for elem in columns
+            if isinstance(elem, (Constraint, Index))
+        ]
+
+        for const in constraints:
+            t.append_constraint(const)
+
+        for f in t.foreign_keys:
+            self._ensure_table_for_fk(m, f)
+        return t
+
+    def column(self, name: str, type_: TypeEngine, **kw) -> Column:
+        return sa_schema.Column(name, type_, **kw)
+
+    def index(
+        self,
+        name: Optional[str],
+        tablename: Optional[str],
+        columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> Index:
+        t = sa_schema.Table(
+            tablename or "no_table",
+            self.metadata(),
+            schema=schema,
+        )
+        kw["_table"] = t
+        idx = sa_schema.Index(
+            name,
+            *[util.sqla_compat._textual_index_column(t, n) for n in columns],
+            **kw,
+        )
+        return idx
+
+    def _parse_table_key(self, table_key: str) -> Tuple[Optional[str], str]:
+        if "." in table_key:
+            tokens = table_key.split(".")
+            sname: Optional[str] = ".".join(tokens[0:-1])
+            tname = tokens[-1]
+        else:
+            tname = table_key
+            sname = None
+        return (sname, tname)
+
+    def _ensure_table_for_fk(self, metadata: MetaData, fk: ForeignKey) -> None:
+        """create a placeholder Table object for the referent of a
+        ForeignKey.
+
+        """
+        if isinstance(fk._colspec, str):
+            table_key, cname = fk._colspec.rsplit(".", 1)
+            sname, tname = self._parse_table_key(table_key)
+            if table_key not in metadata.tables:
+                rel_t = sa_schema.Table(tname, metadata, schema=sname)
+            else:
+                rel_t = metadata.tables[table_key]
+            if cname not in rel_t.c:
+                rel_t.append_column(sa_schema.Column(cname, NULLTYPE))
diff --git a/.venv/lib/python3.12/site-packages/alembic/operations/toimpl.py b/.venv/lib/python3.12/site-packages/alembic/operations/toimpl.py
new file mode 100644
index 00000000..528c0542
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/operations/toimpl.py
@@ -0,0 +1,225 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from typing import TYPE_CHECKING
+
+from sqlalchemy import schema as sa_schema
+
+from . import ops
+from .base import Operations
+from ..util.sqla_compat import _copy
+
+if TYPE_CHECKING:
+    from sqlalchemy.sql.schema import Table
+
+
+@Operations.implementation_for(ops.AlterColumnOp)
+def alter_column(
+    operations: "Operations", operation: "ops.AlterColumnOp"
+) -> None:
+    compiler = operations.impl.dialect.statement_compiler(
+        operations.impl.dialect, None
+    )
+
+    existing_type = operation.existing_type
+    existing_nullable = operation.existing_nullable
+    existing_server_default = operation.existing_server_default
+    type_ = operation.modify_type
+    column_name = operation.column_name
+    table_name = operation.table_name
+    schema = operation.schema
+    server_default = operation.modify_server_default
+    new_column_name = operation.modify_name
+    nullable = operation.modify_nullable
+    comment = operation.modify_comment
+    existing_comment = operation.existing_comment
+
+    def _count_constraint(constraint):
+        return not isinstance(constraint, sa_schema.PrimaryKeyConstraint) and (
+            not constraint._create_rule or constraint._create_rule(compiler)
+        )
+
+    if existing_type and type_:
+        t = operations.schema_obj.table(
+            table_name,
+            sa_schema.Column(column_name, existing_type),
+            schema=schema,
+        )
+        for constraint in t.constraints:
+            if _count_constraint(constraint):
+                operations.impl.drop_constraint(constraint)
+
+    operations.impl.alter_column(
+        table_name,
+        column_name,
+        nullable=nullable,
+        server_default=server_default,
+        name=new_column_name,
+        type_=type_,
+        schema=schema,
+        existing_type=existing_type,
+        existing_server_default=existing_server_default,
+        existing_nullable=existing_nullable,
+        comment=comment,
+        existing_comment=existing_comment,
+        **operation.kw,
+    )
+
+    if type_:
+        t = operations.schema_obj.table(
+            table_name,
+            operations.schema_obj.column(column_name, type_),
+            schema=schema,
+        )
+        for constraint in t.constraints:
+            if _count_constraint(constraint):
+                operations.impl.add_constraint(constraint)
+
+
+@Operations.implementation_for(ops.DropTableOp)
+def drop_table(operations: "Operations", operation: "ops.DropTableOp") -> None:
+    kw = {}
+    if operation.if_exists is not None:
+        kw["if_exists"] = operation.if_exists
+    operations.impl.drop_table(
+        operation.to_table(operations.migration_context), **kw
+    )
+
+
+@Operations.implementation_for(ops.DropColumnOp)
+def drop_column(
+    operations: "Operations", operation: "ops.DropColumnOp"
+) -> None:
+    column = operation.to_column(operations.migration_context)
+    operations.impl.drop_column(
+        operation.table_name, column, schema=operation.schema, **operation.kw
+    )
+
+
+@Operations.implementation_for(ops.CreateIndexOp)
+def create_index(
+    operations: "Operations", operation: "ops.CreateIndexOp"
+) -> None:
+    idx = operation.to_index(operations.migration_context)
+    kw = {}
+    if operation.if_not_exists is not None:
+        kw["if_not_exists"] = operation.if_not_exists
+    operations.impl.create_index(idx, **kw)
+
+
+@Operations.implementation_for(ops.DropIndexOp)
+def drop_index(operations: "Operations", operation: "ops.DropIndexOp") -> None:
+    kw = {}
+    if operation.if_exists is not None:
+        kw["if_exists"] = operation.if_exists
+
+    operations.impl.drop_index(
+        operation.to_index(operations.migration_context),
+        **kw,
+    )
+
+
+@Operations.implementation_for(ops.CreateTableOp)
+def create_table(
+    operations: "Operations", operation: "ops.CreateTableOp"
+) -> "Table":
+    kw = {}
+    if operation.if_not_exists is not None:
+        kw["if_not_exists"] = operation.if_not_exists
+    table = operation.to_table(operations.migration_context)
+    operations.impl.create_table(table, **kw)
+    return table
+
+
+@Operations.implementation_for(ops.RenameTableOp)
+def rename_table(
+    operations: "Operations", operation: "ops.RenameTableOp"
+) -> None:
+    operations.impl.rename_table(
+        operation.table_name, operation.new_table_name, schema=operation.schema
+    )
+
+
+@Operations.implementation_for(ops.CreateTableCommentOp)
+def create_table_comment(
+    operations: "Operations", operation: "ops.CreateTableCommentOp"
+) -> None:
+    table = operation.to_table(operations.migration_context)
+    operations.impl.create_table_comment(table)
+
+
+@Operations.implementation_for(ops.DropTableCommentOp)
+def drop_table_comment(
+    operations: "Operations", operation: "ops.DropTableCommentOp"
+) -> None:
+    table = operation.to_table(operations.migration_context)
+    operations.impl.drop_table_comment(table)
+
+
+@Operations.implementation_for(ops.AddColumnOp)
+def add_column(operations: "Operations", operation: "ops.AddColumnOp") -> None:
+    table_name = operation.table_name
+    column = operation.column
+    schema = operation.schema
+    kw = operation.kw
+
+    if column.table is not None:
+        column = _copy(column)
+
+    t = operations.schema_obj.table(table_name, column, schema=schema)
+    operations.impl.add_column(table_name, column, schema=schema, **kw)
+
+    for constraint in t.constraints:
+        if not isinstance(constraint, sa_schema.PrimaryKeyConstraint):
+            operations.impl.add_constraint(constraint)
+    for index in t.indexes:
+        operations.impl.create_index(index)
+
+    with_comment = (
+        operations.impl.dialect.supports_comments
+        and not operations.impl.dialect.inline_comments
+    )
+    comment = column.comment
+    if comment and with_comment:
+        operations.impl.create_column_comment(column)
+
+
+@Operations.implementation_for(ops.AddConstraintOp)
+def create_constraint(
+    operations: "Operations", operation: "ops.AddConstraintOp"
+) -> None:
+    operations.impl.add_constraint(
+        operation.to_constraint(operations.migration_context)
+    )
+
+
+@Operations.implementation_for(ops.DropConstraintOp)
+def drop_constraint(
+    operations: "Operations", operation: "ops.DropConstraintOp"
+) -> None:
+    operations.impl.drop_constraint(
+        operations.schema_obj.generic_constraint(
+            operation.constraint_name,
+            operation.table_name,
+            operation.constraint_type,
+            schema=operation.schema,
+        )
+    )
+
+
+@Operations.implementation_for(ops.BulkInsertOp)
+def bulk_insert(
+    operations: "Operations", operation: "ops.BulkInsertOp"
+) -> None:
+    operations.impl.bulk_insert(  # type: ignore[union-attr]
+        operation.table, operation.rows, multiinsert=operation.multiinsert
+    )
+
+
+@Operations.implementation_for(ops.ExecuteSQLOp)
+def execute_sql(
+    operations: "Operations", operation: "ops.ExecuteSQLOp"
+) -> None:
+    operations.migration_context.impl.execute(
+        operation.sqltext, execution_options=operation.execution_options
+    )
diff --git a/.venv/lib/python3.12/site-packages/alembic/py.typed b/.venv/lib/python3.12/site-packages/alembic/py.typed
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/py.typed
diff --git a/.venv/lib/python3.12/site-packages/alembic/runtime/__init__.py b/.venv/lib/python3.12/site-packages/alembic/runtime/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/runtime/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/alembic/runtime/environment.py b/.venv/lib/python3.12/site-packages/alembic/runtime/environment.py
new file mode 100644
index 00000000..1ff71eef
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/runtime/environment.py
@@ -0,0 +1,1051 @@
+from __future__ import annotations
+
+from typing import Any
+from typing import Callable
+from typing import Collection
+from typing import Dict
+from typing import List
+from typing import Mapping
+from typing import MutableMapping
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import TextIO
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy.sql.schema import Column
+from sqlalchemy.sql.schema import FetchedValue
+from typing_extensions import ContextManager
+from typing_extensions import Literal
+
+from .migration import _ProxyTransaction
+from .migration import MigrationContext
+from .. import util
+from ..operations import Operations
+from ..script.revision import _GetRevArg
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine import URL
+    from sqlalchemy.engine.base import Connection
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .migration import MigrationInfo
+    from ..autogenerate.api import AutogenContext
+    from ..config import Config
+    from ..ddl import DefaultImpl
+    from ..operations.ops import MigrationScript
+    from ..script.base import ScriptDirectory
+
+_RevNumber = Optional[Union[str, Tuple[str, ...]]]
+
+ProcessRevisionDirectiveFn = Callable[
+    [MigrationContext, _GetRevArg, List["MigrationScript"]], None
+]
+
+RenderItemFn = Callable[
+    [str, Any, "AutogenContext"], Union[str, Literal[False]]
+]
+
+NameFilterType = Literal[
+    "schema",
+    "table",
+    "column",
+    "index",
+    "unique_constraint",
+    "foreign_key_constraint",
+]
+NameFilterParentNames = MutableMapping[
+    Literal["schema_name", "table_name", "schema_qualified_table_name"],
+    Optional[str],
+]
+IncludeNameFn = Callable[
+    [Optional[str], NameFilterType, NameFilterParentNames], bool
+]
+
+IncludeObjectFn = Callable[
+    [
+        "SchemaItem",
+        Optional[str],
+        NameFilterType,
+        bool,
+        Optional["SchemaItem"],
+    ],
+    bool,
+]
+
+OnVersionApplyFn = Callable[
+    [MigrationContext, "MigrationInfo", Collection[Any], Mapping[str, Any]],
+    None,
+]
+
+CompareServerDefault = Callable[
+    [
+        MigrationContext,
+        "Column[Any]",
+        "Column[Any]",
+        Optional[str],
+        Optional[FetchedValue],
+        Optional[str],
+    ],
+    Optional[bool],
+]
+
+CompareType = Callable[
+    [
+        MigrationContext,
+        "Column[Any]",
+        "Column[Any]",
+        "TypeEngine[Any]",
+        "TypeEngine[Any]",
+    ],
+    Optional[bool],
+]
+
+
+class EnvironmentContext(util.ModuleClsProxy):
+    """A configurational facade made available in an ``env.py`` script.
+
+    The :class:`.EnvironmentContext` acts as a *facade* to the more
+    nuts-and-bolts objects of :class:`.MigrationContext` as well as certain
+    aspects of :class:`.Config`,
+    within the context of the ``env.py`` script that is invoked by
+    most Alembic commands.
+
+    :class:`.EnvironmentContext` is normally instantiated
+    when a command in :mod:`alembic.command` is run.  It then makes
+    itself available in the ``alembic.context`` module for the scope
+    of the command.   From within an ``env.py`` script, the current
+    :class:`.EnvironmentContext` is available by importing this module.
+
+    :class:`.EnvironmentContext` also supports programmatic usage.
+    At this level, it acts as a Python context manager, that is, is
+    intended to be used using the
+    ``with:`` statement.  A typical use of :class:`.EnvironmentContext`::
+
+        from alembic.config import Config
+        from alembic.script import ScriptDirectory
+
+        config = Config()
+        config.set_main_option("script_location", "myapp:migrations")
+        script = ScriptDirectory.from_config(config)
+
+
+        def my_function(rev, context):
+            '''do something with revision "rev", which
+            will be the current database revision,
+            and "context", which is the MigrationContext
+            that the env.py will create'''
+
+
+        with EnvironmentContext(
+            config,
+            script,
+            fn=my_function,
+            as_sql=False,
+            starting_rev="base",
+            destination_rev="head",
+            tag="sometag",
+        ):
+            script.run_env()
+
+    The above script will invoke the ``env.py`` script
+    within the migration environment.  If and when ``env.py``
+    calls :meth:`.MigrationContext.run_migrations`, the
+    ``my_function()`` function above will be called
+    by the :class:`.MigrationContext`, given the context
+    itself as well as the current revision in the database.
+
+    .. note::
+
+        For most API usages other than full blown
+        invocation of migration scripts, the :class:`.MigrationContext`
+        and :class:`.ScriptDirectory` objects can be created and
+        used directly.  The :class:`.EnvironmentContext` object
+        is *only* needed when you need to actually invoke the
+        ``env.py`` module present in the migration environment.
+
+    """
+
+    _migration_context: Optional[MigrationContext] = None
+
+    config: Config = None  # type:ignore[assignment]
+    """An instance of :class:`.Config` representing the
+    configuration file contents as well as other variables
+    set programmatically within it."""
+
+    script: ScriptDirectory = None  # type:ignore[assignment]
+    """An instance of :class:`.ScriptDirectory` which provides
+    programmatic access to version files within the ``versions/``
+    directory.
+
+    """
+
+    def __init__(
+        self, config: Config, script: ScriptDirectory, **kw: Any
+    ) -> None:
+        r"""Construct a new :class:`.EnvironmentContext`.
+
+        :param config: a :class:`.Config` instance.
+        :param script: a :class:`.ScriptDirectory` instance.
+        :param \**kw: keyword options that will be ultimately
+         passed along to the :class:`.MigrationContext` when
+         :meth:`.EnvironmentContext.configure` is called.
+
+        """
+        self.config = config
+        self.script = script
+        self.context_opts = kw
+
+    def __enter__(self) -> EnvironmentContext:
+        """Establish a context which provides a
+        :class:`.EnvironmentContext` object to
+        env.py scripts.
+
+        The :class:`.EnvironmentContext` will
+        be made available as ``from alembic import context``.
+
+        """
+        self._install_proxy()
+        return self
+
+    def __exit__(self, *arg: Any, **kw: Any) -> None:
+        self._remove_proxy()
+
+    def is_offline_mode(self) -> bool:
+        """Return True if the current migrations environment
+        is running in "offline mode".
+
+        This is ``True`` or ``False`` depending
+        on the ``--sql`` flag passed.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        """
+        return self.context_opts.get("as_sql", False)  # type: ignore[no-any-return]  # noqa: E501
+
+    def is_transactional_ddl(self) -> bool:
+        """Return True if the context is configured to expect a
+        transactional DDL capable backend.
+
+        This defaults to the type of database in use, and
+        can be overridden by the ``transactional_ddl`` argument
+        to :meth:`.configure`
+
+        This function requires that a :class:`.MigrationContext`
+        has first been made available via :meth:`.configure`.
+
+        """
+        return self.get_context().impl.transactional_ddl
+
+    def requires_connection(self) -> bool:
+        return not self.is_offline_mode()
+
+    def get_head_revision(self) -> _RevNumber:
+        """Return the hex identifier of the 'head' script revision.
+
+        If the script directory has multiple heads, this
+        method raises a :class:`.CommandError`;
+        :meth:`.EnvironmentContext.get_head_revisions` should be preferred.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        .. seealso:: :meth:`.EnvironmentContext.get_head_revisions`
+
+        """
+        return self.script.as_revision_number("head")
+
+    def get_head_revisions(self) -> _RevNumber:
+        """Return the hex identifier of the 'heads' script revision(s).
+
+        This returns a tuple containing the version number of all
+        heads in the script directory.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        """
+        return self.script.as_revision_number("heads")
+
+    def get_starting_revision_argument(self) -> _RevNumber:
+        """Return the 'starting revision' argument,
+        if the revision was passed using ``start:end``.
+
+        This is only meaningful in "offline" mode.
+        Returns ``None`` if no value is available
+        or was configured.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        """
+        if self._migration_context is not None:
+            return self.script.as_revision_number(
+                self.get_context()._start_from_rev
+            )
+        elif "starting_rev" in self.context_opts:
+            return self.script.as_revision_number(
+                self.context_opts["starting_rev"]
+            )
+        else:
+            # this should raise only in the case that a command
+            # is being run where the "starting rev" is never applicable;
+            # this is to catch scripts which rely upon this in
+            # non-sql mode or similar
+            raise util.CommandError(
+                "No starting revision argument is available."
+            )
+
+    def get_revision_argument(self) -> _RevNumber:
+        """Get the 'destination' revision argument.
+
+        This is typically the argument passed to the
+        ``upgrade`` or ``downgrade`` command.
+
+        If it was specified as ``head``, the actual
+        version number is returned; if specified
+        as ``base``, ``None`` is returned.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        """
+        return self.script.as_revision_number(
+            self.context_opts["destination_rev"]
+        )
+
+    def get_tag_argument(self) -> Optional[str]:
+        """Return the value passed for the ``--tag`` argument, if any.
+
+        The ``--tag`` argument is not used directly by Alembic,
+        but is available for custom ``env.py`` configurations that
+        wish to use it; particularly for offline generation scripts
+        that wish to generate tagged filenames.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        .. seealso::
+
+            :meth:`.EnvironmentContext.get_x_argument` - a newer and more
+            open ended system of extending ``env.py`` scripts via the command
+            line.
+
+        """
+        return self.context_opts.get("tag", None)  # type: ignore[no-any-return]  # noqa: E501
+
+    @overload
+    def get_x_argument(self, as_dictionary: Literal[False]) -> List[str]: ...
+
+    @overload
+    def get_x_argument(
+        self, as_dictionary: Literal[True]
+    ) -> Dict[str, str]: ...
+
+    @overload
+    def get_x_argument(
+        self, as_dictionary: bool = ...
+    ) -> Union[List[str], Dict[str, str]]: ...
+
+    def get_x_argument(
+        self, as_dictionary: bool = False
+    ) -> Union[List[str], Dict[str, str]]:
+        """Return the value(s) passed for the ``-x`` argument, if any.
+
+        The ``-x`` argument is an open ended flag that allows any user-defined
+        value or values to be passed on the command line, then available
+        here for consumption by a custom ``env.py`` script.
+
+        The return value is a list, returned directly from the ``argparse``
+        structure.  If ``as_dictionary=True`` is passed, the ``x`` arguments
+        are parsed using ``key=value`` format into a dictionary that is
+        then returned. If there is no ``=`` in the argument, value is an empty
+        string.
+
+        .. versionchanged:: 1.13.1 Support ``as_dictionary=True`` when
+           arguments are passed without the ``=`` symbol.
+
+        For example, to support passing a database URL on the command line,
+        the standard ``env.py`` script can be modified like this::
+
+            cmd_line_url = context.get_x_argument(
+                as_dictionary=True).get('dbname')
+            if cmd_line_url:
+                engine = create_engine(cmd_line_url)
+            else:
+                engine = engine_from_config(
+                        config.get_section(config.config_ini_section),
+                        prefix='sqlalchemy.',
+                        poolclass=pool.NullPool)
+
+        This then takes effect by running the ``alembic`` script as::
+
+            alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        .. seealso::
+
+            :meth:`.EnvironmentContext.get_tag_argument`
+
+            :attr:`.Config.cmd_opts`
+
+        """
+        if self.config.cmd_opts is not None:
+            value = self.config.cmd_opts.x or []
+        else:
+            value = []
+        if as_dictionary:
+            dict_value = {}
+            for arg in value:
+                x_key, _, x_value = arg.partition("=")
+                dict_value[x_key] = x_value
+            value = dict_value
+
+        return value
+
+    def configure(
+        self,
+        connection: Optional[Connection] = None,
+        url: Optional[Union[str, URL]] = None,
+        dialect_name: Optional[str] = None,
+        dialect_opts: Optional[Dict[str, Any]] = None,
+        transactional_ddl: Optional[bool] = None,
+        transaction_per_migration: bool = False,
+        output_buffer: Optional[TextIO] = None,
+        starting_rev: Optional[str] = None,
+        tag: Optional[str] = None,
+        template_args: Optional[Dict[str, Any]] = None,
+        render_as_batch: bool = False,
+        target_metadata: Union[MetaData, Sequence[MetaData], None] = None,
+        include_name: Optional[IncludeNameFn] = None,
+        include_object: Optional[IncludeObjectFn] = None,
+        include_schemas: bool = False,
+        process_revision_directives: Optional[
+            ProcessRevisionDirectiveFn
+        ] = None,
+        compare_type: Union[bool, CompareType] = True,
+        compare_server_default: Union[bool, CompareServerDefault] = False,
+        render_item: Optional[RenderItemFn] = None,
+        literal_binds: bool = False,
+        upgrade_token: str = "upgrades",
+        downgrade_token: str = "downgrades",
+        alembic_module_prefix: str = "op.",
+        sqlalchemy_module_prefix: str = "sa.",
+        user_module_prefix: Optional[str] = None,
+        on_version_apply: Optional[OnVersionApplyFn] = None,
+        **kw: Any,
+    ) -> None:
+        """Configure a :class:`.MigrationContext` within this
+        :class:`.EnvironmentContext` which will provide database
+        connectivity and other configuration to a series of
+        migration scripts.
+
+        Many methods on :class:`.EnvironmentContext` require that
+        this method has been called in order to function, as they
+        ultimately need to have database access or at least access
+        to the dialect in use.  Those which do are documented as such.
+
+        The important thing needed by :meth:`.configure` is a
+        means to determine what kind of database dialect is in use.
+        An actual connection to that database is needed only if
+        the :class:`.MigrationContext` is to be used in
+        "online" mode.
+
+        If the :meth:`.is_offline_mode` function returns ``True``,
+        then no connection is needed here.  Otherwise, the
+        ``connection`` parameter should be present as an
+        instance of :class:`sqlalchemy.engine.Connection`.
+
+        This function is typically called from the ``env.py``
+        script within a migration environment.  It can be called
+        multiple times for an invocation.  The most recent
+        :class:`~sqlalchemy.engine.Connection`
+        for which it was called is the one that will be operated upon
+        by the next call to :meth:`.run_migrations`.
+
+        General parameters:
+
+        :param connection: a :class:`~sqlalchemy.engine.Connection`
+         to use
+         for SQL execution in "online" mode.  When present, is also
+         used to determine the type of dialect in use.
+        :param url: a string database url, or a
+         :class:`sqlalchemy.engine.url.URL` object.
+         The type of dialect to be used will be derived from this if
+         ``connection`` is not passed.
+        :param dialect_name: string name of a dialect, such as
+         "postgresql", "mssql", etc.
+         The type of dialect to be used will be derived from this if
+         ``connection`` and ``url`` are not passed.
+        :param dialect_opts: dictionary of options to be passed to dialect
+         constructor.
+        :param transactional_ddl: Force the usage of "transactional"
+         DDL on or off;
+         this otherwise defaults to whether or not the dialect in
+         use supports it.
+        :param transaction_per_migration: if True, nest each migration script
+         in a transaction rather than the full series of migrations to
+         run.
+        :param output_buffer: a file-like object that will be used
+         for textual output
+         when the ``--sql`` option is used to generate SQL scripts.
+         Defaults to
+         ``sys.stdout`` if not passed here and also not present on
+         the :class:`.Config`
+         object.  The value here overrides that of the :class:`.Config`
+         object.
+        :param output_encoding: when using ``--sql`` to generate SQL
+         scripts, apply this encoding to the string output.
+        :param literal_binds: when using ``--sql`` to generate SQL
+         scripts, pass through the ``literal_binds`` flag to the compiler
+         so that any literal values that would ordinarily be bound
+         parameters are converted to plain strings.
+
+         .. warning:: Dialects can typically only handle simple datatypes
+            like strings and numbers for auto-literal generation.  Datatypes
+            like dates, intervals, and others may still require manual
+            formatting, typically using :meth:`.Operations.inline_literal`.
+
+         .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy
+            versions prior to 0.8 where this feature is not supported.
+
+         .. seealso::
+
+            :meth:`.Operations.inline_literal`
+
+        :param starting_rev: Override the "starting revision" argument
+         when using ``--sql`` mode.
+        :param tag: a string tag for usage by custom ``env.py`` scripts.
+         Set via the ``--tag`` option, can be overridden here.
+        :param template_args: dictionary of template arguments which
+         will be added to the template argument environment when
+         running the "revision" command.   Note that the script environment
+         is only run within the "revision" command if the --autogenerate
+         option is used, or if the option "revision_environment=true"
+         is present in the alembic.ini file.
+
+        :param version_table: The name of the Alembic version table.
+         The default is ``'alembic_version'``.
+        :param version_table_schema: Optional schema to place version
+         table within.
+        :param version_table_pk: boolean, whether the Alembic version table
+         should use a primary key constraint for the "value" column; this
+         only takes effect when the table is first created.
+         Defaults to True; setting to False should not be necessary and is
+         here for backwards compatibility reasons.
+        :param on_version_apply: a callable or collection of callables to be
+            run for each migration step.
+            The callables will be run in the order they are given, once for
+            each migration step, after the respective operation has been
+            applied but before its transaction is finalized.
+            Each callable accepts no positional arguments and the following
+            keyword arguments:
+
+            * ``ctx``: the :class:`.MigrationContext` running the migration,
+            * ``step``: a :class:`.MigrationInfo` representing the
+              step currently being applied,
+            * ``heads``: a collection of version strings representing the
+              current heads,
+            * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`.
+
+        Parameters specific to the autogenerate feature, when
+        ``alembic revision`` is run with the ``--autogenerate`` feature:
+
+        :param target_metadata: a :class:`sqlalchemy.schema.MetaData`
+         object, or a sequence of :class:`~sqlalchemy.schema.MetaData`
+         objects, that will be consulted during autogeneration.
+         The tables present in each :class:`~sqlalchemy.schema.MetaData`
+         will be compared against
+         what is locally available on the target
+         :class:`~sqlalchemy.engine.Connection`
+         to produce candidate upgrade/downgrade operations.
+        :param compare_type: Indicates type comparison behavior during
+         an autogenerate
+         operation.  Defaults to ``True`` turning on type comparison, which
+         has good accuracy on most backends.   See :ref:`compare_types`
+         for an example as well as information on other type
+         comparison options. Set to ``False`` which disables type
+         comparison. A callable can also be passed to provide custom type
+         comparison, see :ref:`compare_types` for additional details.
+
+         .. versionchanged:: 1.12.0 The default value of
+            :paramref:`.EnvironmentContext.configure.compare_type` has been
+            changed to ``True``.
+
+         .. seealso::
+
+            :ref:`compare_types`
+
+            :paramref:`.EnvironmentContext.configure.compare_server_default`
+
+        :param compare_server_default: Indicates server default comparison
+         behavior during
+         an autogenerate operation.  Defaults to ``False`` which disables
+         server default
+         comparison.  Set to  ``True`` to turn on server default comparison,
+         which has
+         varied accuracy depending on backend.
+
+         To customize server default comparison behavior, a callable may
+         be specified
+         which can filter server default comparisons during an
+         autogenerate operation.
+         defaults during an autogenerate operation.   The format of this
+         callable is::
+
+            def my_compare_server_default(context, inspected_column,
+                        metadata_column, inspected_default, metadata_default,
+                        rendered_metadata_default):
+                # return True if the defaults are different,
+                # False if not, or None to allow the default implementation
+                # to compare these defaults
+                return None
+
+            context.configure(
+                # ...
+                compare_server_default = my_compare_server_default
+            )
+
+         ``inspected_column`` is a dictionary structure as returned by
+         :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
+         ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
+         the local model environment.
+
+         A return value of ``None`` indicates to allow default server default
+         comparison
+         to proceed.  Note that some backends such as Postgresql actually
+         execute
+         the two defaults on the database side to compare for equivalence.
+
+         .. seealso::
+
+            :paramref:`.EnvironmentContext.configure.compare_type`
+
+        :param include_name: A callable function which is given
+         the chance to return ``True`` or ``False`` for any database reflected
+         object based on its name, including database schema names when
+         the :paramref:`.EnvironmentContext.configure.include_schemas` flag
+         is set to ``True``.
+
+         The function accepts the following positional arguments:
+
+         * ``name``: the name of the object, such as schema name or table name.
+           Will be ``None`` when indicating the default schema name of the
+           database connection.
+         * ``type``: a string describing the type of object; currently
+           ``"schema"``, ``"table"``, ``"column"``, ``"index"``,
+           ``"unique_constraint"``, or ``"foreign_key_constraint"``
+         * ``parent_names``: a dictionary of "parent" object names, that are
+           relative to the name being given.  Keys in this dictionary may
+           include:  ``"schema_name"``, ``"table_name"`` or
+           ``"schema_qualified_table_name"``.
+
+         E.g.::
+
+            def include_name(name, type_, parent_names):
+                if type_ == "schema":
+                    return name in ["schema_one", "schema_two"]
+                else:
+                    return True
+
+            context.configure(
+                # ...
+                include_schemas = True,
+                include_name = include_name
+            )
+
+         .. seealso::
+
+            :ref:`autogenerate_include_hooks`
+
+            :paramref:`.EnvironmentContext.configure.include_object`
+
+            :paramref:`.EnvironmentContext.configure.include_schemas`
+
+
+        :param include_object: A callable function which is given
+         the chance to return ``True`` or ``False`` for any object,
+         indicating if the given object should be considered in the
+         autogenerate sweep.
+
+         The function accepts the following positional arguments:
+
+         * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such
+           as a :class:`~sqlalchemy.schema.Table`,
+           :class:`~sqlalchemy.schema.Column`,
+           :class:`~sqlalchemy.schema.Index`
+           :class:`~sqlalchemy.schema.UniqueConstraint`,
+           or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object
+         * ``name``: the name of the object. This is typically available
+           via ``object.name``.
+         * ``type``: a string describing the type of object; currently
+           ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``,
+           or ``"foreign_key_constraint"``
+         * ``reflected``: ``True`` if the given object was produced based on
+           table reflection, ``False`` if it's from a local :class:`.MetaData`
+           object.
+         * ``compare_to``: the object being compared against, if available,
+           else ``None``.
+
+         E.g.::
+
+            def include_object(object, name, type_, reflected, compare_to):
+                if (type_ == "column" and
+                    not reflected and
+                    object.info.get("skip_autogenerate", False)):
+                    return False
+                else:
+                    return True
+
+            context.configure(
+                # ...
+                include_object = include_object
+            )
+
+         For the use case of omitting specific schemas from a target database
+         when :paramref:`.EnvironmentContext.configure.include_schemas` is
+         set to ``True``, the :attr:`~sqlalchemy.schema.Table.schema`
+         attribute can be checked for each :class:`~sqlalchemy.schema.Table`
+         object passed to the hook, however it is much more efficient
+         to filter on schemas before reflection of objects takes place
+         using the :paramref:`.EnvironmentContext.configure.include_name`
+         hook.
+
+         .. seealso::
+
+            :ref:`autogenerate_include_hooks`
+
+            :paramref:`.EnvironmentContext.configure.include_name`
+
+            :paramref:`.EnvironmentContext.configure.include_schemas`
+
+        :param render_as_batch: if True, commands which alter elements
+         within a table will be placed under a ``with batch_alter_table():``
+         directive, so that batch migrations will take place.
+
+         .. seealso::
+
+            :ref:`batch_migrations`
+
+        :param include_schemas: If True, autogenerate will scan across
+         all schemas located by the SQLAlchemy
+         :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names`
+         method, and include all differences in tables found across all
+         those schemas.  When using this option, you may want to also
+         use the :paramref:`.EnvironmentContext.configure.include_name`
+         parameter to specify a callable which
+         can filter the tables/schemas that get included.
+
+         .. seealso::
+
+            :ref:`autogenerate_include_hooks`
+
+            :paramref:`.EnvironmentContext.configure.include_name`
+
+            :paramref:`.EnvironmentContext.configure.include_object`
+
+        :param render_item: Callable that can be used to override how
+         any schema item, i.e. column, constraint, type,
+         etc., is rendered for autogenerate.  The callable receives a
+         string describing the type of object, the object, and
+         the autogen context.  If it returns False, the
+         default rendering method will be used.  If it returns None,
+         the item will not be rendered in the context of a Table
+         construct, that is, can be used to skip columns or constraints
+         within op.create_table()::
+
+            def my_render_column(type_, col, autogen_context):
+                if type_ == "column" and isinstance(col, MySpecialCol):
+                    return repr(col)
+                else:
+                    return False
+
+            context.configure(
+                # ...
+                render_item = my_render_column
+            )
+
+         Available values for the type string include: ``"column"``,
+         ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``,
+         ``"type"``, ``"server_default"``.
+
+         .. seealso::
+
+            :ref:`autogen_render_types`
+
+        :param upgrade_token: When autogenerate completes, the text of the
+         candidate upgrade operations will be present in this template
+         variable when ``script.py.mako`` is rendered.  Defaults to
+         ``upgrades``.
+        :param downgrade_token: When autogenerate completes, the text of the
+         candidate downgrade operations will be present in this
+         template variable when ``script.py.mako`` is rendered.  Defaults to
+         ``downgrades``.
+
+        :param alembic_module_prefix: When autogenerate refers to Alembic
+         :mod:`alembic.operations` constructs, this prefix will be used
+         (i.e. ``op.create_table``)  Defaults to "``op.``".
+         Can be ``None`` to indicate no prefix.
+
+        :param sqlalchemy_module_prefix: When autogenerate refers to
+         SQLAlchemy
+         :class:`~sqlalchemy.schema.Column` or type classes, this prefix
+         will be used
+         (i.e. ``sa.Column("somename", sa.Integer)``)  Defaults to "``sa.``".
+         Can be ``None`` to indicate no prefix.
+         Note that when dialect-specific types are rendered, autogenerate
+         will render them using the dialect module name, i.e. ``mssql.BIT()``,
+         ``postgresql.UUID()``.
+
+        :param user_module_prefix: When autogenerate refers to a SQLAlchemy
+         type (e.g. :class:`.TypeEngine`) where the module name is not
+         under the ``sqlalchemy`` namespace, this prefix will be used
+         within autogenerate.  If left at its default of
+         ``None``, the ``__module__`` attribute of the type is used to
+         render the import module.   It's a good practice to set this
+         and to have all custom types be available from a fixed module space,
+         in order to future-proof migration files against reorganizations
+         in modules.
+
+         .. seealso::
+
+            :ref:`autogen_module_prefix`
+
+        :param process_revision_directives: a callable function that will
+         be passed a structure representing the end result of an autogenerate
+         or plain "revision" operation, which can be manipulated to affect
+         how the ``alembic revision`` command ultimately outputs new
+         revision scripts.   The structure of the callable is::
+
+            def process_revision_directives(context, revision, directives):
+                pass
+
+         The ``directives`` parameter is a Python list containing
+         a single :class:`.MigrationScript` directive, which represents
+         the revision file to be generated.    This list as well as its
+         contents may be freely modified to produce any set of commands.
+         The section :ref:`customizing_revision` shows an example of
+         doing this.  The ``context`` parameter is the
+         :class:`.MigrationContext` in use,
+         and ``revision`` is a tuple of revision identifiers representing the
+         current revision of the database.
+
+         The callable is invoked at all times when the ``--autogenerate``
+         option is passed to ``alembic revision``.  If ``--autogenerate``
+         is not passed, the callable is invoked only if the
+         ``revision_environment`` variable is set to True in the Alembic
+         configuration, in which case the given ``directives`` collection
+         will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps`
+         collections for ``.upgrade_ops`` and ``.downgrade_ops``.  The
+         ``--autogenerate`` option itself can be inferred by inspecting
+         ``context.config.cmd_opts.autogenerate``.
+
+         The callable function may optionally be an instance of
+         a :class:`.Rewriter` object.  This is a helper object that
+         assists in the production of autogenerate-stream rewriter functions.
+
+         .. seealso::
+
+             :ref:`customizing_revision`
+
+             :ref:`autogen_rewriter`
+
+             :paramref:`.command.revision.process_revision_directives`
+
+        Parameters specific to individual backends:
+
+        :param mssql_batch_separator: The "batch separator" which will
+         be placed between each statement when generating offline SQL Server
+         migrations.  Defaults to ``GO``.  Note this is in addition to the
+         customary semicolon ``;`` at the end of each statement; SQL Server
+         considers the "batch separator" to denote the end of an
+         individual statement execution, and cannot group certain
+         dependent operations in one step.
+        :param oracle_batch_separator: The "batch separator" which will
+         be placed between each statement when generating offline
+         Oracle migrations.  Defaults to ``/``.  Oracle doesn't add a
+         semicolon between statements like most other backends.
+
+        """
+        opts = self.context_opts
+        if transactional_ddl is not None:
+            opts["transactional_ddl"] = transactional_ddl
+        if output_buffer is not None:
+            opts["output_buffer"] = output_buffer
+        elif self.config.output_buffer is not None:
+            opts["output_buffer"] = self.config.output_buffer
+        if starting_rev:
+            opts["starting_rev"] = starting_rev
+        if tag:
+            opts["tag"] = tag
+        if template_args and "template_args" in opts:
+            opts["template_args"].update(template_args)
+        opts["transaction_per_migration"] = transaction_per_migration
+        opts["target_metadata"] = target_metadata
+        opts["include_name"] = include_name
+        opts["include_object"] = include_object
+        opts["include_schemas"] = include_schemas
+        opts["render_as_batch"] = render_as_batch
+        opts["upgrade_token"] = upgrade_token
+        opts["downgrade_token"] = downgrade_token
+        opts["sqlalchemy_module_prefix"] = sqlalchemy_module_prefix
+        opts["alembic_module_prefix"] = alembic_module_prefix
+        opts["user_module_prefix"] = user_module_prefix
+        opts["literal_binds"] = literal_binds
+        opts["process_revision_directives"] = process_revision_directives
+        opts["on_version_apply"] = util.to_tuple(on_version_apply, default=())
+
+        if render_item is not None:
+            opts["render_item"] = render_item
+        opts["compare_type"] = compare_type
+        if compare_server_default is not None:
+            opts["compare_server_default"] = compare_server_default
+        opts["script"] = self.script
+
+        opts.update(kw)
+
+        self._migration_context = MigrationContext.configure(
+            connection=connection,
+            url=url,
+            dialect_name=dialect_name,
+            environment_context=self,
+            dialect_opts=dialect_opts,
+            opts=opts,
+        )
+
+    def run_migrations(self, **kw: Any) -> None:
+        """Run migrations as determined by the current command line
+        configuration
+        as well as versioning information present (or not) in the current
+        database connection (if one is present).
+
+        The function accepts optional ``**kw`` arguments.   If these are
+        passed, they are sent directly to the ``upgrade()`` and
+        ``downgrade()``
+        functions within each target revision file.   By modifying the
+        ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
+        functions accept arguments, parameters can be passed here so that
+        contextual information, usually information to identify a particular
+        database in use, can be passed from a custom ``env.py`` script
+        to the migration functions.
+
+        This function requires that a :class:`.MigrationContext` has
+        first been made available via :meth:`.configure`.
+
+        """
+        assert self._migration_context is not None
+        with Operations.context(self._migration_context):
+            self.get_context().run_migrations(**kw)
+
+    def execute(
+        self,
+        sql: Union[Executable, str],
+        execution_options: Optional[Dict[str, Any]] = None,
+    ) -> None:
+        """Execute the given SQL using the current change context.
+
+        The behavior of :meth:`.execute` is the same
+        as that of :meth:`.Operations.execute`.  Please see that
+        function's documentation for full detail including
+        caveats and limitations.
+
+        This function requires that a :class:`.MigrationContext` has
+        first been made available via :meth:`.configure`.
+
+        """
+        self.get_context().execute(sql, execution_options=execution_options)
+
+    def static_output(self, text: str) -> None:
+        """Emit text directly to the "offline" SQL stream.
+
+        Typically this is for emitting comments that
+        start with --.  The statement is not treated
+        as a SQL execution, no ; or batch separator
+        is added, etc.
+
+        """
+        self.get_context().impl.static_output(text)
+
+    def begin_transaction(
+        self,
+    ) -> Union[_ProxyTransaction, ContextManager[None, Optional[bool]]]:
+        """Return a context manager that will
+        enclose an operation within a "transaction",
+        as defined by the environment's offline
+        and transactional DDL settings.
+
+        e.g.::
+
+            with context.begin_transaction():
+                context.run_migrations()
+
+        :meth:`.begin_transaction` is intended to
+        "do the right thing" regardless of
+        calling context:
+
+        * If :meth:`.is_transactional_ddl` is ``False``,
+          returns a "do nothing" context manager
+          which otherwise produces no transactional
+          state or directives.
+        * If :meth:`.is_offline_mode` is ``True``,
+          returns a context manager that will
+          invoke the :meth:`.DefaultImpl.emit_begin`
+          and :meth:`.DefaultImpl.emit_commit`
+          methods, which will produce the string
+          directives ``BEGIN`` and ``COMMIT`` on
+          the output stream, as rendered by the
+          target backend (e.g. SQL Server would
+          emit ``BEGIN TRANSACTION``).
+        * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin`
+          on the current online connection, which
+          returns a :class:`sqlalchemy.engine.Transaction`
+          object.  This object demarcates a real
+          transaction and is itself a context manager,
+          which will roll back if an exception
+          is raised.
+
+        Note that a custom ``env.py`` script which
+        has more specific transactional needs can of course
+        manipulate the :class:`~sqlalchemy.engine.Connection`
+        directly to produce transactional state in "online"
+        mode.
+
+        """
+
+        return self.get_context().begin_transaction()
+
+    def get_context(self) -> MigrationContext:
+        """Return the current :class:`.MigrationContext` object.
+
+        If :meth:`.EnvironmentContext.configure` has not been
+        called yet, raises an exception.
+
+        """
+
+        if self._migration_context is None:
+            raise Exception("No context has been configured yet.")
+        return self._migration_context
+
+    def get_bind(self) -> Connection:
+        """Return the current 'bind'.
+
+        In "online" mode, this is the
+        :class:`sqlalchemy.engine.Connection` currently being used
+        to emit SQL to the database.
+
+        This function requires that a :class:`.MigrationContext`
+        has first been made available via :meth:`.configure`.
+
+        """
+        return self.get_context().bind  # type: ignore[return-value]
+
+    def get_impl(self) -> DefaultImpl:
+        return self.get_context().impl
diff --git a/.venv/lib/python3.12/site-packages/alembic/runtime/migration.py b/.venv/lib/python3.12/site-packages/alembic/runtime/migration.py
new file mode 100644
index 00000000..ac431a62
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/runtime/migration.py
@@ -0,0 +1,1391 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+from contextlib import contextmanager
+from contextlib import nullcontext
+import logging
+import sys
+from typing import Any
+from typing import Callable
+from typing import cast
+from typing import Collection
+from typing import Dict
+from typing import Iterable
+from typing import Iterator
+from typing import List
+from typing import Optional
+from typing import Set
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import Column
+from sqlalchemy import literal_column
+from sqlalchemy import select
+from sqlalchemy.engine import Engine
+from sqlalchemy.engine import url as sqla_url
+from sqlalchemy.engine.strategies import MockEngineStrategy
+from typing_extensions import ContextManager
+
+from .. import ddl
+from .. import util
+from ..util import sqla_compat
+from ..util.compat import EncodedIO
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine import Dialect
+    from sqlalchemy.engine import URL
+    from sqlalchemy.engine.base import Connection
+    from sqlalchemy.engine.base import Transaction
+    from sqlalchemy.engine.mock import MockConnection
+    from sqlalchemy.sql import Executable
+
+    from .environment import EnvironmentContext
+    from ..config import Config
+    from ..script.base import Script
+    from ..script.base import ScriptDirectory
+    from ..script.revision import _RevisionOrBase
+    from ..script.revision import Revision
+    from ..script.revision import RevisionMap
+
+log = logging.getLogger(__name__)
+
+
+class _ProxyTransaction:
+    def __init__(self, migration_context: MigrationContext) -> None:
+        self.migration_context = migration_context
+
+    @property
+    def _proxied_transaction(self) -> Optional[Transaction]:
+        return self.migration_context._transaction
+
+    def rollback(self) -> None:
+        t = self._proxied_transaction
+        assert t is not None
+        t.rollback()
+        self.migration_context._transaction = None
+
+    def commit(self) -> None:
+        t = self._proxied_transaction
+        assert t is not None
+        t.commit()
+        self.migration_context._transaction = None
+
+    def __enter__(self) -> _ProxyTransaction:
+        return self
+
+    def __exit__(self, type_: Any, value: Any, traceback: Any) -> None:
+        if self._proxied_transaction is not None:
+            self._proxied_transaction.__exit__(type_, value, traceback)
+            self.migration_context._transaction = None
+
+
+class MigrationContext:
+    """Represent the database state made available to a migration
+    script.
+
+    :class:`.MigrationContext` is the front end to an actual
+    database connection, or alternatively a string output
+    stream given a particular database dialect,
+    from an Alembic perspective.
+
+    When inside the ``env.py`` script, the :class:`.MigrationContext`
+    is available via the
+    :meth:`.EnvironmentContext.get_context` method,
+    which is available at ``alembic.context``::
+
+        # from within env.py script
+        from alembic import context
+
+        migration_context = context.get_context()
+
+    For usage outside of an ``env.py`` script, such as for
+    utility routines that want to check the current version
+    in the database, the :meth:`.MigrationContext.configure`
+    method to create new :class:`.MigrationContext` objects.
+    For example, to get at the current revision in the
+    database using :meth:`.MigrationContext.get_current_revision`::
+
+        # in any application, outside of an env.py script
+        from alembic.migration import MigrationContext
+        from sqlalchemy import create_engine
+
+        engine = create_engine("postgresql://mydatabase")
+        conn = engine.connect()
+
+        context = MigrationContext.configure(conn)
+        current_rev = context.get_current_revision()
+
+    The above context can also be used to produce
+    Alembic migration operations with an :class:`.Operations`
+    instance::
+
+        # in any application, outside of the normal Alembic environment
+        from alembic.operations import Operations
+
+        op = Operations(context)
+        op.alter_column("mytable", "somecolumn", nullable=True)
+
+    """
+
+    def __init__(
+        self,
+        dialect: Dialect,
+        connection: Optional[Connection],
+        opts: Dict[str, Any],
+        environment_context: Optional[EnvironmentContext] = None,
+    ) -> None:
+        self.environment_context = environment_context
+        self.opts = opts
+        self.dialect = dialect
+        self.script: Optional[ScriptDirectory] = opts.get("script")
+        as_sql: bool = opts.get("as_sql", False)
+        transactional_ddl = opts.get("transactional_ddl")
+        self._transaction_per_migration = opts.get(
+            "transaction_per_migration", False
+        )
+        self.on_version_apply_callbacks = opts.get("on_version_apply", ())
+        self._transaction: Optional[Transaction] = None
+
+        if as_sql:
+            self.connection = cast(
+                Optional["Connection"], self._stdout_connection(connection)
+            )
+            assert self.connection is not None
+            self._in_external_transaction = False
+        else:
+            self.connection = connection
+            self._in_external_transaction = (
+                sqla_compat._get_connection_in_transaction(connection)
+            )
+
+        self._migrations_fn: Optional[
+            Callable[..., Iterable[RevisionStep]]
+        ] = opts.get("fn")
+        self.as_sql = as_sql
+
+        self.purge = opts.get("purge", False)
+
+        if "output_encoding" in opts:
+            self.output_buffer = EncodedIO(
+                opts.get("output_buffer")
+                or sys.stdout,  # type:ignore[arg-type]
+                opts["output_encoding"],
+            )
+        else:
+            self.output_buffer = opts.get("output_buffer", sys.stdout)
+
+        self._user_compare_type = opts.get("compare_type", True)
+        self._user_compare_server_default = opts.get(
+            "compare_server_default", False
+        )
+        self.version_table = version_table = opts.get(
+            "version_table", "alembic_version"
+        )
+        self.version_table_schema = version_table_schema = opts.get(
+            "version_table_schema", None
+        )
+
+        self._start_from_rev: Optional[str] = opts.get("starting_rev")
+        self.impl = ddl.DefaultImpl.get_by_dialect(dialect)(
+            dialect,
+            self.connection,
+            self.as_sql,
+            transactional_ddl,
+            self.output_buffer,
+            opts,
+        )
+
+        self._version = self.impl.version_table_impl(
+            version_table=version_table,
+            version_table_schema=version_table_schema,
+            version_table_pk=opts.get("version_table_pk", True),
+        )
+
+        log.info("Context impl %s.", self.impl.__class__.__name__)
+        if self.as_sql:
+            log.info("Generating static SQL")
+        log.info(
+            "Will assume %s DDL.",
+            (
+                "transactional"
+                if self.impl.transactional_ddl
+                else "non-transactional"
+            ),
+        )
+
+    @classmethod
+    def configure(
+        cls,
+        connection: Optional[Connection] = None,
+        url: Optional[Union[str, URL]] = None,
+        dialect_name: Optional[str] = None,
+        dialect: Optional[Dialect] = None,
+        environment_context: Optional[EnvironmentContext] = None,
+        dialect_opts: Optional[Dict[str, str]] = None,
+        opts: Optional[Any] = None,
+    ) -> MigrationContext:
+        """Create a new :class:`.MigrationContext`.
+
+        This is a factory method usually called
+        by :meth:`.EnvironmentContext.configure`.
+
+        :param connection: a :class:`~sqlalchemy.engine.Connection`
+         to use for SQL execution in "online" mode.  When present,
+         is also used to determine the type of dialect in use.
+        :param url: a string database url, or a
+         :class:`sqlalchemy.engine.url.URL` object.
+         The type of dialect to be used will be derived from this if
+         ``connection`` is not passed.
+        :param dialect_name: string name of a dialect, such as
+         "postgresql", "mssql", etc.  The type of dialect to be used will be
+         derived from this if ``connection`` and ``url`` are not passed.
+        :param opts: dictionary of options.  Most other options
+         accepted by :meth:`.EnvironmentContext.configure` are passed via
+         this dictionary.
+
+        """
+        if opts is None:
+            opts = {}
+        if dialect_opts is None:
+            dialect_opts = {}
+
+        if connection:
+            if isinstance(connection, Engine):
+                raise util.CommandError(
+                    "'connection' argument to configure() is expected "
+                    "to be a sqlalchemy.engine.Connection instance, "
+                    "got %r" % connection,
+                )
+
+            dialect = connection.dialect
+        elif url:
+            url_obj = sqla_url.make_url(url)
+            dialect = url_obj.get_dialect()(**dialect_opts)
+        elif dialect_name:
+            url_obj = sqla_url.make_url("%s://" % dialect_name)
+            dialect = url_obj.get_dialect()(**dialect_opts)
+        elif not dialect:
+            raise Exception("Connection, url, or dialect_name is required.")
+        assert dialect is not None
+        return MigrationContext(dialect, connection, opts, environment_context)
+
+    @contextmanager
+    def autocommit_block(self) -> Iterator[None]:
+        """Enter an "autocommit" block, for databases that support AUTOCOMMIT
+        isolation levels.
+
+        This special directive is intended to support the occasional database
+        DDL or system operation that specifically has to be run outside of
+        any kind of transaction block.   The PostgreSQL database platform
+        is the most common target for this style of operation, as many
+        of its DDL operations must be run outside of transaction blocks, even
+        though the database overall supports transactional DDL.
+
+        The method is used as a context manager within a migration script, by
+        calling on :meth:`.Operations.get_context` to retrieve the
+        :class:`.MigrationContext`, then invoking
+        :meth:`.MigrationContext.autocommit_block` using the ``with:``
+        statement::
+
+            def upgrade():
+                with op.get_context().autocommit_block():
+                    op.execute("ALTER TYPE mood ADD VALUE 'soso'")
+
+        Above, a PostgreSQL "ALTER TYPE..ADD VALUE" directive is emitted,
+        which must be run outside of a transaction block at the database level.
+        The :meth:`.MigrationContext.autocommit_block` method makes use of the
+        SQLAlchemy ``AUTOCOMMIT`` isolation level setting, which against the
+        psycogp2 DBAPI corresponds to the ``connection.autocommit`` setting,
+        to ensure that the database driver is not inside of a DBAPI level
+        transaction block.
+
+        .. warning::
+
+            As is necessary, **the database transaction preceding the block is
+            unconditionally committed**.  This means that the run of migrations
+            preceding the operation will be committed, before the overall
+            migration operation is complete.
+
+            It is recommended that when an application includes migrations with
+            "autocommit" blocks, that
+            :paramref:`.EnvironmentContext.transaction_per_migration` be used
+            so that the calling environment is tuned to expect short per-file
+            migrations whether or not one of them has an autocommit block.
+
+
+        """
+        _in_connection_transaction = self._in_connection_transaction()
+
+        if self.impl.transactional_ddl and self.as_sql:
+            self.impl.emit_commit()
+
+        elif _in_connection_transaction:
+            assert self._transaction is not None
+
+            self._transaction.commit()
+            self._transaction = None
+
+        if not self.as_sql:
+            assert self.connection is not None
+            current_level = self.connection.get_isolation_level()
+            base_connection = self.connection
+
+            # in 1.3 and 1.4 non-future mode, the connection gets switched
+            # out.  we can use the base connection with the new mode
+            # except that it will not know it's in "autocommit" and will
+            # emit deprecation warnings when an autocommit action takes
+            # place.
+            self.connection = self.impl.connection = (
+                base_connection.execution_options(isolation_level="AUTOCOMMIT")
+            )
+
+            # sqlalchemy future mode will "autobegin" in any case, so take
+            # control of that "transaction" here
+            fake_trans: Optional[Transaction] = self.connection.begin()
+        else:
+            fake_trans = None
+        try:
+            yield
+        finally:
+            if not self.as_sql:
+                assert self.connection is not None
+                if fake_trans is not None:
+                    fake_trans.commit()
+                self.connection.execution_options(
+                    isolation_level=current_level
+                )
+                self.connection = self.impl.connection = base_connection
+
+            if self.impl.transactional_ddl and self.as_sql:
+                self.impl.emit_begin()
+
+            elif _in_connection_transaction:
+                assert self.connection is not None
+                self._transaction = self.connection.begin()
+
+    def begin_transaction(
+        self, _per_migration: bool = False
+    ) -> Union[_ProxyTransaction, ContextManager[None, Optional[bool]]]:
+        """Begin a logical transaction for migration operations.
+
+        This method is used within an ``env.py`` script to demarcate where
+        the outer "transaction" for a series of migrations begins.  Example::
+
+            def run_migrations_online():
+                connectable = create_engine(...)
+
+                with connectable.connect() as connection:
+                    context.configure(
+                        connection=connection, target_metadata=target_metadata
+                    )
+
+                    with context.begin_transaction():
+                        context.run_migrations()
+
+        Above, :meth:`.MigrationContext.begin_transaction` is used to demarcate
+        where the outer logical transaction occurs around the
+        :meth:`.MigrationContext.run_migrations` operation.
+
+        A "Logical" transaction means that the operation may or may not
+        correspond to a real database transaction.   If the target database
+        supports transactional DDL (or
+        :paramref:`.EnvironmentContext.configure.transactional_ddl` is true),
+        the :paramref:`.EnvironmentContext.configure.transaction_per_migration`
+        flag is not set, and the migration is against a real database
+        connection (as opposed to using "offline" ``--sql`` mode), a real
+        transaction will be started.   If ``--sql`` mode is in effect, the
+        operation would instead correspond to a string such as "BEGIN" being
+        emitted to the string output.
+
+        The returned object is a Python context manager that should only be
+        used in the context of a ``with:`` statement as indicated above.
+        The object has no other guaranteed API features present.
+
+        .. seealso::
+
+            :meth:`.MigrationContext.autocommit_block`
+
+        """
+
+        if self._in_external_transaction:
+            return nullcontext()
+
+        if self.impl.transactional_ddl:
+            transaction_now = _per_migration == self._transaction_per_migration
+        else:
+            transaction_now = _per_migration is True
+
+        if not transaction_now:
+            return nullcontext()
+
+        elif not self.impl.transactional_ddl:
+            assert _per_migration
+
+            if self.as_sql:
+                return nullcontext()
+            else:
+                # track our own notion of a "transaction block", which must be
+                # committed when complete.   Don't rely upon whether or not the
+                # SQLAlchemy connection reports as "in transaction"; this
+                # because SQLAlchemy future connection features autobegin
+                # behavior, so it may already be in a transaction from our
+                # emitting of queries like "has_version_table", etc. While we
+                # could track these operations as well, that leaves open the
+                # possibility of new operations or other things happening in
+                # the user environment that still may be triggering
+                # "autobegin".
+
+                in_transaction = self._transaction is not None
+
+                if in_transaction:
+                    return nullcontext()
+                else:
+                    assert self.connection is not None
+                    self._transaction = (
+                        sqla_compat._safe_begin_connection_transaction(
+                            self.connection
+                        )
+                    )
+                    return _ProxyTransaction(self)
+        elif self.as_sql:
+
+            @contextmanager
+            def begin_commit():
+                self.impl.emit_begin()
+                yield
+                self.impl.emit_commit()
+
+            return begin_commit()
+        else:
+            assert self.connection is not None
+            self._transaction = sqla_compat._safe_begin_connection_transaction(
+                self.connection
+            )
+            return _ProxyTransaction(self)
+
+    def get_current_revision(self) -> Optional[str]:
+        """Return the current revision, usually that which is present
+        in the ``alembic_version`` table in the database.
+
+        This method intends to be used only for a migration stream that
+        does not contain unmerged branches in the target database;
+        if there are multiple branches present, an exception is raised.
+        The :meth:`.MigrationContext.get_current_heads` should be preferred
+        over this method going forward in order to be compatible with
+        branch migration support.
+
+        If this :class:`.MigrationContext` was configured in "offline"
+        mode, that is with ``as_sql=True``, the ``starting_rev``
+        parameter is returned instead, if any.
+
+        """
+        heads = self.get_current_heads()
+        if len(heads) == 0:
+            return None
+        elif len(heads) > 1:
+            raise util.CommandError(
+                "Version table '%s' has more than one head present; "
+                "please use get_current_heads()" % self.version_table
+            )
+        else:
+            return heads[0]
+
+    def get_current_heads(self) -> Tuple[str, ...]:
+        """Return a tuple of the current 'head versions' that are represented
+        in the target database.
+
+        For a migration stream without branches, this will be a single
+        value, synonymous with that of
+        :meth:`.MigrationContext.get_current_revision`.   However when multiple
+        unmerged branches exist within the target database, the returned tuple
+        will contain a value for each head.
+
+        If this :class:`.MigrationContext` was configured in "offline"
+        mode, that is with ``as_sql=True``, the ``starting_rev``
+        parameter is returned in a one-length tuple.
+
+        If no version table is present, or if there are no revisions
+        present, an empty tuple is returned.
+
+        """
+        if self.as_sql:
+            start_from_rev: Any = self._start_from_rev
+            if start_from_rev == "base":
+                start_from_rev = None
+            elif start_from_rev is not None and self.script:
+                start_from_rev = [
+                    self.script.get_revision(sfr).revision
+                    for sfr in util.to_list(start_from_rev)
+                    if sfr not in (None, "base")
+                ]
+            return util.to_tuple(start_from_rev, default=())
+        else:
+            if self._start_from_rev:
+                raise util.CommandError(
+                    "Can't specify current_rev to context "
+                    "when using a database connection"
+                )
+            if not self._has_version_table():
+                return ()
+        assert self.connection is not None
+        return tuple(
+            row[0]
+            for row in self.connection.execute(
+                select(self._version.c.version_num)
+            )
+        )
+
+    def _ensure_version_table(self, purge: bool = False) -> None:
+        with sqla_compat._ensure_scope_for_ddl(self.connection):
+            assert self.connection is not None
+            self._version.create(self.connection, checkfirst=True)
+            if purge:
+                assert self.connection is not None
+                self.connection.execute(self._version.delete())
+
+    def _has_version_table(self) -> bool:
+        assert self.connection is not None
+        return sqla_compat._connectable_has_table(
+            self.connection, self.version_table, self.version_table_schema
+        )
+
+    def stamp(self, script_directory: ScriptDirectory, revision: str) -> None:
+        """Stamp the version table with a specific revision.
+
+        This method calculates those branches to which the given revision
+        can apply, and updates those branches as though they were migrated
+        towards that revision (either up or down).  If no current branches
+        include the revision, it is added as a new branch head.
+
+        """
+        heads = self.get_current_heads()
+        if not self.as_sql and not heads:
+            self._ensure_version_table()
+        head_maintainer = HeadMaintainer(self, heads)
+        for step in script_directory._stamp_revs(revision, heads):
+            head_maintainer.update_to_step(step)
+
+    def run_migrations(self, **kw: Any) -> None:
+        r"""Run the migration scripts established for this
+        :class:`.MigrationContext`, if any.
+
+        The commands in :mod:`alembic.command` will set up a function
+        that is ultimately passed to the :class:`.MigrationContext`
+        as the ``fn`` argument.  This function represents the "work"
+        that will be done when :meth:`.MigrationContext.run_migrations`
+        is called, typically from within the ``env.py`` script of the
+        migration environment.  The "work function" then provides an iterable
+        of version callables and other version information which
+        in the case of the ``upgrade`` or ``downgrade`` commands are the
+        list of version scripts to invoke.  Other commands yield nothing,
+        in the case that a command wants to run some other operation
+        against the database such as the ``current`` or ``stamp`` commands.
+
+        :param \**kw: keyword arguments here will be passed to each
+         migration callable, that is the ``upgrade()`` or ``downgrade()``
+         method within revision scripts.
+
+        """
+        self.impl.start_migrations()
+
+        heads: Tuple[str, ...]
+        if self.purge:
+            if self.as_sql:
+                raise util.CommandError("Can't use --purge with --sql mode")
+            self._ensure_version_table(purge=True)
+            heads = ()
+        else:
+            heads = self.get_current_heads()
+
+            dont_mutate = self.opts.get("dont_mutate", False)
+
+            if not self.as_sql and not heads and not dont_mutate:
+                self._ensure_version_table()
+
+        head_maintainer = HeadMaintainer(self, heads)
+
+        assert self._migrations_fn is not None
+        for step in self._migrations_fn(heads, self):
+            with self.begin_transaction(_per_migration=True):
+                if self.as_sql and not head_maintainer.heads:
+                    # for offline mode, include a CREATE TABLE from
+                    # the base
+                    assert self.connection is not None
+                    self._version.create(self.connection)
+                log.info("Running %s", step)
+                if self.as_sql:
+                    self.impl.static_output(
+                        "-- Running %s" % (step.short_log,)
+                    )
+                step.migration_fn(**kw)
+
+                # previously, we wouldn't stamp per migration
+                # if we were in a transaction, however given the more
+                # complex model that involves any number of inserts
+                # and row-targeted updates and deletes, it's simpler for now
+                # just to run the operations on every version
+                head_maintainer.update_to_step(step)
+                for callback in self.on_version_apply_callbacks:
+                    callback(
+                        ctx=self,
+                        step=step.info,
+                        heads=set(head_maintainer.heads),
+                        run_args=kw,
+                    )
+
+        if self.as_sql and not head_maintainer.heads:
+            assert self.connection is not None
+            self._version.drop(self.connection)
+
+    def _in_connection_transaction(self) -> bool:
+        try:
+            meth = self.connection.in_transaction  # type:ignore[union-attr]
+        except AttributeError:
+            return False
+        else:
+            return meth()
+
+    def execute(
+        self,
+        sql: Union[Executable, str],
+        execution_options: Optional[Dict[str, Any]] = None,
+    ) -> None:
+        """Execute a SQL construct or string statement.
+
+        The underlying execution mechanics are used, that is
+        if this is "offline mode" the SQL is written to the
+        output buffer, otherwise the SQL is emitted on
+        the current SQLAlchemy connection.
+
+        """
+        self.impl._exec(sql, execution_options)
+
+    def _stdout_connection(
+        self, connection: Optional[Connection]
+    ) -> MockConnection:
+        def dump(construct, *multiparams, **params):
+            self.impl._exec(construct)
+
+        return MockEngineStrategy.MockConnection(self.dialect, dump)
+
+    @property
+    def bind(self) -> Optional[Connection]:
+        """Return the current "bind".
+
+        In online mode, this is an instance of
+        :class:`sqlalchemy.engine.Connection`, and is suitable
+        for ad-hoc execution of any kind of usage described
+        in SQLAlchemy Core documentation as well as
+        for usage with the :meth:`sqlalchemy.schema.Table.create`
+        and :meth:`sqlalchemy.schema.MetaData.create_all` methods
+        of :class:`~sqlalchemy.schema.Table`,
+        :class:`~sqlalchemy.schema.MetaData`.
+
+        Note that when "standard output" mode is enabled,
+        this bind will be a "mock" connection handler that cannot
+        return results and is only appropriate for a very limited
+        subset of commands.
+
+        """
+        return self.connection
+
+    @property
+    def config(self) -> Optional[Config]:
+        """Return the :class:`.Config` used by the current environment,
+        if any."""
+
+        if self.environment_context:
+            return self.environment_context.config
+        else:
+            return None
+
+    def _compare_type(
+        self, inspector_column: Column[Any], metadata_column: Column
+    ) -> bool:
+        if self._user_compare_type is False:
+            return False
+
+        if callable(self._user_compare_type):
+            user_value = self._user_compare_type(
+                self,
+                inspector_column,
+                metadata_column,
+                inspector_column.type,
+                metadata_column.type,
+            )
+            if user_value is not None:
+                return user_value
+
+        return self.impl.compare_type(inspector_column, metadata_column)
+
+    def _compare_server_default(
+        self,
+        inspector_column: Column[Any],
+        metadata_column: Column[Any],
+        rendered_metadata_default: Optional[str],
+        rendered_column_default: Optional[str],
+    ) -> bool:
+        if self._user_compare_server_default is False:
+            return False
+
+        if callable(self._user_compare_server_default):
+            user_value = self._user_compare_server_default(
+                self,
+                inspector_column,
+                metadata_column,
+                rendered_column_default,
+                metadata_column.server_default,
+                rendered_metadata_default,
+            )
+            if user_value is not None:
+                return user_value
+
+        return self.impl.compare_server_default(
+            inspector_column,
+            metadata_column,
+            rendered_metadata_default,
+            rendered_column_default,
+        )
+
+
+class HeadMaintainer:
+    def __init__(self, context: MigrationContext, heads: Any) -> None:
+        self.context = context
+        self.heads = set(heads)
+
+    def _insert_version(self, version: str) -> None:
+        assert version not in self.heads
+        self.heads.add(version)
+
+        self.context.impl._exec(
+            self.context._version.insert().values(
+                version_num=literal_column("'%s'" % version)
+            )
+        )
+
+    def _delete_version(self, version: str) -> None:
+        self.heads.remove(version)
+
+        ret = self.context.impl._exec(
+            self.context._version.delete().where(
+                self.context._version.c.version_num
+                == literal_column("'%s'" % version)
+            )
+        )
+
+        if (
+            not self.context.as_sql
+            and self.context.dialect.supports_sane_rowcount
+            and ret is not None
+            and ret.rowcount != 1
+        ):
+            raise util.CommandError(
+                "Online migration expected to match one "
+                "row when deleting '%s' in '%s'; "
+                "%d found"
+                % (version, self.context.version_table, ret.rowcount)
+            )
+
+    def _update_version(self, from_: str, to_: str) -> None:
+        assert to_ not in self.heads
+        self.heads.remove(from_)
+        self.heads.add(to_)
+
+        ret = self.context.impl._exec(
+            self.context._version.update()
+            .values(version_num=literal_column("'%s'" % to_))
+            .where(
+                self.context._version.c.version_num
+                == literal_column("'%s'" % from_)
+            )
+        )
+
+        if (
+            not self.context.as_sql
+            and self.context.dialect.supports_sane_rowcount
+            and ret is not None
+            and ret.rowcount != 1
+        ):
+            raise util.CommandError(
+                "Online migration expected to match one "
+                "row when updating '%s' to '%s' in '%s'; "
+                "%d found"
+                % (from_, to_, self.context.version_table, ret.rowcount)
+            )
+
+    def update_to_step(self, step: Union[RevisionStep, StampStep]) -> None:
+        if step.should_delete_branch(self.heads):
+            vers = step.delete_version_num
+            log.debug("branch delete %s", vers)
+            self._delete_version(vers)
+        elif step.should_create_branch(self.heads):
+            vers = step.insert_version_num
+            log.debug("new branch insert %s", vers)
+            self._insert_version(vers)
+        elif step.should_merge_branches(self.heads):
+            # delete revs, update from rev, update to rev
+            (
+                delete_revs,
+                update_from_rev,
+                update_to_rev,
+            ) = step.merge_branch_idents(self.heads)
+            log.debug(
+                "merge, delete %s, update %s to %s",
+                delete_revs,
+                update_from_rev,
+                update_to_rev,
+            )
+            for delrev in delete_revs:
+                self._delete_version(delrev)
+            self._update_version(update_from_rev, update_to_rev)
+        elif step.should_unmerge_branches(self.heads):
+            (
+                update_from_rev,
+                update_to_rev,
+                insert_revs,
+            ) = step.unmerge_branch_idents(self.heads)
+            log.debug(
+                "unmerge, insert %s, update %s to %s",
+                insert_revs,
+                update_from_rev,
+                update_to_rev,
+            )
+            for insrev in insert_revs:
+                self._insert_version(insrev)
+            self._update_version(update_from_rev, update_to_rev)
+        else:
+            from_, to_ = step.update_version_num(self.heads)
+            log.debug("update %s to %s", from_, to_)
+            self._update_version(from_, to_)
+
+
+class MigrationInfo:
+    """Exposes information about a migration step to a callback listener.
+
+    The :class:`.MigrationInfo` object is available exclusively for the
+    benefit of the :paramref:`.EnvironmentContext.on_version_apply`
+    callback hook.
+
+    """
+
+    is_upgrade: bool
+    """True/False: indicates whether this operation ascends or descends the
+    version tree."""
+
+    is_stamp: bool
+    """True/False: indicates whether this operation is a stamp (i.e. whether
+    it results in any actual database operations)."""
+
+    up_revision_id: Optional[str]
+    """Version string corresponding to :attr:`.Revision.revision`.
+
+    In the case of a stamp operation, it is advised to use the
+    :attr:`.MigrationInfo.up_revision_ids` tuple as a stamp operation can
+    make a single movement from one or more branches down to a single
+    branchpoint, in which case there will be multiple "up" revisions.
+
+    .. seealso::
+
+        :attr:`.MigrationInfo.up_revision_ids`
+
+    """
+
+    up_revision_ids: Tuple[str, ...]
+    """Tuple of version strings corresponding to :attr:`.Revision.revision`.
+
+    In the majority of cases, this tuple will be a single value, synonymous
+    with the scalar value of :attr:`.MigrationInfo.up_revision_id`.
+    It can be multiple revision identifiers only in the case of an
+    ``alembic stamp`` operation which is moving downwards from multiple
+    branches down to their common branch point.
+
+    """
+
+    down_revision_ids: Tuple[str, ...]
+    """Tuple of strings representing the base revisions of this migration step.
+
+    If empty, this represents a root revision; otherwise, the first item
+    corresponds to :attr:`.Revision.down_revision`, and the rest are inferred
+    from dependencies.
+    """
+
+    revision_map: RevisionMap
+    """The revision map inside of which this operation occurs."""
+
+    def __init__(
+        self,
+        revision_map: RevisionMap,
+        is_upgrade: bool,
+        is_stamp: bool,
+        up_revisions: Union[str, Tuple[str, ...]],
+        down_revisions: Union[str, Tuple[str, ...]],
+    ) -> None:
+        self.revision_map = revision_map
+        self.is_upgrade = is_upgrade
+        self.is_stamp = is_stamp
+        self.up_revision_ids = util.to_tuple(up_revisions, default=())
+        if self.up_revision_ids:
+            self.up_revision_id = self.up_revision_ids[0]
+        else:
+            # this should never be the case with
+            # "upgrade", "downgrade", or "stamp" as we are always
+            # measuring movement in terms of at least one upgrade version
+            self.up_revision_id = None
+        self.down_revision_ids = util.to_tuple(down_revisions, default=())
+
+    @property
+    def is_migration(self) -> bool:
+        """True/False: indicates whether this operation is a migration.
+
+        At present this is true if and only the migration is not a stamp.
+        If other operation types are added in the future, both this attribute
+        and :attr:`~.MigrationInfo.is_stamp` will be false.
+        """
+        return not self.is_stamp
+
+    @property
+    def source_revision_ids(self) -> Tuple[str, ...]:
+        """Active revisions before this migration step is applied."""
+        return (
+            self.down_revision_ids if self.is_upgrade else self.up_revision_ids
+        )
+
+    @property
+    def destination_revision_ids(self) -> Tuple[str, ...]:
+        """Active revisions after this migration step is applied."""
+        return (
+            self.up_revision_ids if self.is_upgrade else self.down_revision_ids
+        )
+
+    @property
+    def up_revision(self) -> Optional[Revision]:
+        """Get :attr:`~.MigrationInfo.up_revision_id` as
+        a :class:`.Revision`.
+
+        """
+        return self.revision_map.get_revision(self.up_revision_id)
+
+    @property
+    def up_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """Get :attr:`~.MigrationInfo.up_revision_ids` as a
+        :class:`.Revision`."""
+        return self.revision_map.get_revisions(self.up_revision_ids)
+
+    @property
+    def down_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """Get :attr:`~.MigrationInfo.down_revision_ids` as a tuple of
+        :class:`Revisions <.Revision>`."""
+        return self.revision_map.get_revisions(self.down_revision_ids)
+
+    @property
+    def source_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """Get :attr:`~MigrationInfo.source_revision_ids` as a tuple of
+        :class:`Revisions <.Revision>`."""
+        return self.revision_map.get_revisions(self.source_revision_ids)
+
+    @property
+    def destination_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """Get :attr:`~MigrationInfo.destination_revision_ids` as a tuple of
+        :class:`Revisions <.Revision>`."""
+        return self.revision_map.get_revisions(self.destination_revision_ids)
+
+
+class MigrationStep:
+    from_revisions_no_deps: Tuple[str, ...]
+    to_revisions_no_deps: Tuple[str, ...]
+    is_upgrade: bool
+    migration_fn: Any
+
+    if TYPE_CHECKING:
+
+        @property
+        def doc(self) -> Optional[str]: ...
+
+    @property
+    def name(self) -> str:
+        return self.migration_fn.__name__
+
+    @classmethod
+    def upgrade_from_script(
+        cls, revision_map: RevisionMap, script: Script
+    ) -> RevisionStep:
+        return RevisionStep(revision_map, script, True)
+
+    @classmethod
+    def downgrade_from_script(
+        cls, revision_map: RevisionMap, script: Script
+    ) -> RevisionStep:
+        return RevisionStep(revision_map, script, False)
+
+    @property
+    def is_downgrade(self) -> bool:
+        return not self.is_upgrade
+
+    @property
+    def short_log(self) -> str:
+        return "%s %s -> %s" % (
+            self.name,
+            util.format_as_comma(self.from_revisions_no_deps),
+            util.format_as_comma(self.to_revisions_no_deps),
+        )
+
+    def __str__(self):
+        if self.doc:
+            return "%s %s -> %s, %s" % (
+                self.name,
+                util.format_as_comma(self.from_revisions_no_deps),
+                util.format_as_comma(self.to_revisions_no_deps),
+                self.doc,
+            )
+        else:
+            return self.short_log
+
+
+class RevisionStep(MigrationStep):
+    def __init__(
+        self, revision_map: RevisionMap, revision: Script, is_upgrade: bool
+    ) -> None:
+        self.revision_map = revision_map
+        self.revision = revision
+        self.is_upgrade = is_upgrade
+        if is_upgrade:
+            self.migration_fn = revision.module.upgrade
+        else:
+            self.migration_fn = revision.module.downgrade
+
+    def __repr__(self):
+        return "RevisionStep(%r, is_upgrade=%r)" % (
+            self.revision.revision,
+            self.is_upgrade,
+        )
+
+    def __eq__(self, other: object) -> bool:
+        return (
+            isinstance(other, RevisionStep)
+            and other.revision == self.revision
+            and self.is_upgrade == other.is_upgrade
+        )
+
+    @property
+    def doc(self) -> Optional[str]:
+        return self.revision.doc
+
+    @property
+    def from_revisions(self) -> Tuple[str, ...]:
+        if self.is_upgrade:
+            return self.revision._normalized_down_revisions
+        else:
+            return (self.revision.revision,)
+
+    @property
+    def from_revisions_no_deps(  # type:ignore[override]
+        self,
+    ) -> Tuple[str, ...]:
+        if self.is_upgrade:
+            return self.revision._versioned_down_revisions
+        else:
+            return (self.revision.revision,)
+
+    @property
+    def to_revisions(self) -> Tuple[str, ...]:
+        if self.is_upgrade:
+            return (self.revision.revision,)
+        else:
+            return self.revision._normalized_down_revisions
+
+    @property
+    def to_revisions_no_deps(  # type:ignore[override]
+        self,
+    ) -> Tuple[str, ...]:
+        if self.is_upgrade:
+            return (self.revision.revision,)
+        else:
+            return self.revision._versioned_down_revisions
+
+    @property
+    def _has_scalar_down_revision(self) -> bool:
+        return len(self.revision._normalized_down_revisions) == 1
+
+    def should_delete_branch(self, heads: Set[str]) -> bool:
+        """A delete is when we are a. in a downgrade and b.
+        we are going to the "base" or we are going to a version that
+        is implied as a dependency on another version that is remaining.
+
+        """
+        if not self.is_downgrade:
+            return False
+
+        if self.revision.revision not in heads:
+            return False
+
+        downrevs = self.revision._normalized_down_revisions
+
+        if not downrevs:
+            # is a base
+            return True
+        else:
+            # determine what the ultimate "to_revisions" for an
+            # unmerge would be.  If there are none, then we're a delete.
+            to_revisions = self._unmerge_to_revisions(heads)
+            return not to_revisions
+
+    def merge_branch_idents(
+        self, heads: Set[str]
+    ) -> Tuple[List[str], str, str]:
+        other_heads = set(heads).difference(self.from_revisions)
+
+        if other_heads:
+            ancestors = {
+                r.revision
+                for r in self.revision_map._get_ancestor_nodes(
+                    self.revision_map.get_revisions(other_heads), check=False
+                )
+            }
+            from_revisions = list(
+                set(self.from_revisions).difference(ancestors)
+            )
+        else:
+            from_revisions = list(self.from_revisions)
+
+        return (
+            # delete revs, update from rev, update to rev
+            list(from_revisions[0:-1]),
+            from_revisions[-1],
+            self.to_revisions[0],
+        )
+
+    def _unmerge_to_revisions(self, heads: Set[str]) -> Tuple[str, ...]:
+        other_heads = set(heads).difference([self.revision.revision])
+        if other_heads:
+            ancestors = {
+                r.revision
+                for r in self.revision_map._get_ancestor_nodes(
+                    self.revision_map.get_revisions(other_heads), check=False
+                )
+            }
+            return tuple(set(self.to_revisions).difference(ancestors))
+        else:
+            # for each revision we plan to return, compute its ancestors
+            # (excluding self), and remove those from the final output since
+            # they are already accounted for.
+            ancestors = {
+                r.revision
+                for to_revision in self.to_revisions
+                for r in self.revision_map._get_ancestor_nodes(
+                    self.revision_map.get_revisions(to_revision), check=False
+                )
+                if r.revision != to_revision
+            }
+            return tuple(set(self.to_revisions).difference(ancestors))
+
+    def unmerge_branch_idents(
+        self, heads: Set[str]
+    ) -> Tuple[str, str, Tuple[str, ...]]:
+        to_revisions = self._unmerge_to_revisions(heads)
+
+        return (
+            # update from rev, update to rev, insert revs
+            self.from_revisions[0],
+            to_revisions[-1],
+            to_revisions[0:-1],
+        )
+
+    def should_create_branch(self, heads: Set[str]) -> bool:
+        if not self.is_upgrade:
+            return False
+
+        downrevs = self.revision._normalized_down_revisions
+
+        if not downrevs:
+            # is a base
+            return True
+        else:
+            # none of our downrevs are present, so...
+            # we have to insert our version.   This is true whether
+            # or not there is only one downrev, or multiple (in the latter
+            # case, we're a merge point.)
+            if not heads.intersection(downrevs):
+                return True
+            else:
+                return False
+
+    def should_merge_branches(self, heads: Set[str]) -> bool:
+        if not self.is_upgrade:
+            return False
+
+        downrevs = self.revision._normalized_down_revisions
+
+        if len(downrevs) > 1 and len(heads.intersection(downrevs)) > 1:
+            return True
+
+        return False
+
+    def should_unmerge_branches(self, heads: Set[str]) -> bool:
+        if not self.is_downgrade:
+            return False
+
+        downrevs = self.revision._normalized_down_revisions
+
+        if self.revision.revision in heads and len(downrevs) > 1:
+            return True
+
+        return False
+
+    def update_version_num(self, heads: Set[str]) -> Tuple[str, str]:
+        if not self._has_scalar_down_revision:
+            downrev = heads.intersection(
+                self.revision._normalized_down_revisions
+            )
+            assert (
+                len(downrev) == 1
+            ), "Can't do an UPDATE because downrevision is ambiguous"
+            down_revision = list(downrev)[0]
+        else:
+            down_revision = self.revision._normalized_down_revisions[0]
+
+        if self.is_upgrade:
+            return down_revision, self.revision.revision
+        else:
+            return self.revision.revision, down_revision
+
+    @property
+    def delete_version_num(self) -> str:
+        return self.revision.revision
+
+    @property
+    def insert_version_num(self) -> str:
+        return self.revision.revision
+
+    @property
+    def info(self) -> MigrationInfo:
+        return MigrationInfo(
+            revision_map=self.revision_map,
+            up_revisions=self.revision.revision,
+            down_revisions=self.revision._normalized_down_revisions,
+            is_upgrade=self.is_upgrade,
+            is_stamp=False,
+        )
+
+
+class StampStep(MigrationStep):
+    def __init__(
+        self,
+        from_: Optional[Union[str, Collection[str]]],
+        to_: Optional[Union[str, Collection[str]]],
+        is_upgrade: bool,
+        branch_move: bool,
+        revision_map: Optional[RevisionMap] = None,
+    ) -> None:
+        self.from_: Tuple[str, ...] = util.to_tuple(from_, default=())
+        self.to_: Tuple[str, ...] = util.to_tuple(to_, default=())
+        self.is_upgrade = is_upgrade
+        self.branch_move = branch_move
+        self.migration_fn = self.stamp_revision
+        self.revision_map = revision_map
+
+    doc: Optional[str] = None
+
+    def stamp_revision(self, **kw: Any) -> None:
+        return None
+
+    def __eq__(self, other):
+        return (
+            isinstance(other, StampStep)
+            and other.from_revisions == self.from_revisions
+            and other.to_revisions == self.to_revisions
+            and other.branch_move == self.branch_move
+            and self.is_upgrade == other.is_upgrade
+        )
+
+    @property
+    def from_revisions(self):
+        return self.from_
+
+    @property
+    def to_revisions(self) -> Tuple[str, ...]:
+        return self.to_
+
+    @property
+    def from_revisions_no_deps(  # type:ignore[override]
+        self,
+    ) -> Tuple[str, ...]:
+        return self.from_
+
+    @property
+    def to_revisions_no_deps(  # type:ignore[override]
+        self,
+    ) -> Tuple[str, ...]:
+        return self.to_
+
+    @property
+    def delete_version_num(self) -> str:
+        assert len(self.from_) == 1
+        return self.from_[0]
+
+    @property
+    def insert_version_num(self) -> str:
+        assert len(self.to_) == 1
+        return self.to_[0]
+
+    def update_version_num(self, heads: Set[str]) -> Tuple[str, str]:
+        assert len(self.from_) == 1
+        assert len(self.to_) == 1
+        return self.from_[0], self.to_[0]
+
+    def merge_branch_idents(
+        self, heads: Union[Set[str], List[str]]
+    ) -> Union[Tuple[List[Any], str, str], Tuple[List[str], str, str]]:
+        return (
+            # delete revs, update from rev, update to rev
+            list(self.from_[0:-1]),
+            self.from_[-1],
+            self.to_[0],
+        )
+
+    def unmerge_branch_idents(
+        self, heads: Set[str]
+    ) -> Tuple[str, str, List[str]]:
+        return (
+            # update from rev, update to rev, insert revs
+            self.from_[0],
+            self.to_[-1],
+            list(self.to_[0:-1]),
+        )
+
+    def should_delete_branch(self, heads: Set[str]) -> bool:
+        # TODO: we probably need to look for self.to_ inside of heads,
+        # in a similar manner as should_create_branch, however we have
+        # no tests for this yet (stamp downgrades w/ branches)
+        return self.is_downgrade and self.branch_move
+
+    def should_create_branch(self, heads: Set[str]) -> Union[Set[str], bool]:
+        return (
+            self.is_upgrade
+            and (self.branch_move or set(self.from_).difference(heads))
+            and set(self.to_).difference(heads)
+        )
+
+    def should_merge_branches(self, heads: Set[str]) -> bool:
+        return len(self.from_) > 1
+
+    def should_unmerge_branches(self, heads: Set[str]) -> bool:
+        return len(self.to_) > 1
+
+    @property
+    def info(self) -> MigrationInfo:
+        up, down = (
+            (self.to_, self.from_)
+            if self.is_upgrade
+            else (self.from_, self.to_)
+        )
+        assert self.revision_map is not None
+        return MigrationInfo(
+            revision_map=self.revision_map,
+            up_revisions=up,
+            down_revisions=down,
+            is_upgrade=self.is_upgrade,
+            is_stamp=True,
+        )
diff --git a/.venv/lib/python3.12/site-packages/alembic/script/__init__.py b/.venv/lib/python3.12/site-packages/alembic/script/__init__.py
new file mode 100644
index 00000000..d78f3f1d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/script/__init__.py
@@ -0,0 +1,4 @@
+from .base import Script
+from .base import ScriptDirectory
+
+__all__ = ["ScriptDirectory", "Script"]
diff --git a/.venv/lib/python3.12/site-packages/alembic/script/base.py b/.venv/lib/python3.12/site-packages/alembic/script/base.py
new file mode 100644
index 00000000..30df6ddb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/script/base.py
@@ -0,0 +1,1066 @@
+from __future__ import annotations
+
+from contextlib import contextmanager
+import datetime
+import os
+import re
+import shutil
+import sys
+from types import ModuleType
+from typing import Any
+from typing import cast
+from typing import Iterator
+from typing import List
+from typing import Mapping
+from typing import Optional
+from typing import Sequence
+from typing import Set
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from . import revision
+from . import write_hooks
+from .. import util
+from ..runtime import migration
+from ..util import compat
+from ..util import not_none
+
+if TYPE_CHECKING:
+    from .revision import _GetRevArg
+    from .revision import _RevIdType
+    from .revision import Revision
+    from ..config import Config
+    from ..config import MessagingOptions
+    from ..runtime.migration import RevisionStep
+    from ..runtime.migration import StampStep
+
+try:
+    if compat.py39:
+        from zoneinfo import ZoneInfo
+        from zoneinfo import ZoneInfoNotFoundError
+    else:
+        from backports.zoneinfo import ZoneInfo  # type: ignore[import-not-found,no-redef] # noqa: E501
+        from backports.zoneinfo import ZoneInfoNotFoundError  # type: ignore[no-redef] # noqa: E501
+except ImportError:
+    ZoneInfo = None  # type: ignore[assignment, misc]
+
+_sourceless_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)(c|o)?$")
+_only_source_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)$")
+_legacy_rev = re.compile(r"([a-f0-9]+)\.py$")
+_slug_re = re.compile(r"\w+")
+_default_file_template = "%(rev)s_%(slug)s"
+_split_on_space_comma = re.compile(r", *|(?: +)")
+
+_split_on_space_comma_colon = re.compile(r", *|(?: +)|\:")
+
+
+class ScriptDirectory:
+    """Provides operations upon an Alembic script directory.
+
+    This object is useful to get information as to current revisions,
+    most notably being able to get at the "head" revision, for schemes
+    that want to test if the current revision in the database is the most
+    recent::
+
+        from alembic.script import ScriptDirectory
+        from alembic.config import Config
+        config = Config()
+        config.set_main_option("script_location", "myapp:migrations")
+        script = ScriptDirectory.from_config(config)
+
+        head_revision = script.get_current_head()
+
+
+
+    """
+
+    def __init__(
+        self,
+        dir: str,  # noqa
+        file_template: str = _default_file_template,
+        truncate_slug_length: Optional[int] = 40,
+        version_locations: Optional[List[str]] = None,
+        sourceless: bool = False,
+        output_encoding: str = "utf-8",
+        timezone: Optional[str] = None,
+        hook_config: Optional[Mapping[str, str]] = None,
+        recursive_version_locations: bool = False,
+        messaging_opts: MessagingOptions = cast(
+            "MessagingOptions", util.EMPTY_DICT
+        ),
+    ) -> None:
+        self.dir = dir
+        self.file_template = file_template
+        self.version_locations = version_locations
+        self.truncate_slug_length = truncate_slug_length or 40
+        self.sourceless = sourceless
+        self.output_encoding = output_encoding
+        self.revision_map = revision.RevisionMap(self._load_revisions)
+        self.timezone = timezone
+        self.hook_config = hook_config
+        self.recursive_version_locations = recursive_version_locations
+        self.messaging_opts = messaging_opts
+
+        if not os.access(dir, os.F_OK):
+            raise util.CommandError(
+                "Path doesn't exist: %r.  Please use "
+                "the 'init' command to create a new "
+                "scripts folder." % os.path.abspath(dir)
+            )
+
+    @property
+    def versions(self) -> str:
+        loc = self._version_locations
+        if len(loc) > 1:
+            raise util.CommandError("Multiple version_locations present")
+        else:
+            return loc[0]
+
+    @util.memoized_property
+    def _version_locations(self) -> Sequence[str]:
+        if self.version_locations:
+            return [
+                os.path.abspath(util.coerce_resource_to_filename(location))
+                for location in self.version_locations
+            ]
+        else:
+            return (os.path.abspath(os.path.join(self.dir, "versions")),)
+
+    def _load_revisions(self) -> Iterator[Script]:
+        if self.version_locations:
+            paths = [
+                vers
+                for vers in self._version_locations
+                if os.path.exists(vers)
+            ]
+        else:
+            paths = [self.versions]
+
+        dupes = set()
+        for vers in paths:
+            for file_path in Script._list_py_dir(self, vers):
+                real_path = os.path.realpath(file_path)
+                if real_path in dupes:
+                    util.warn(
+                        "File %s loaded twice! ignoring. Please ensure "
+                        "version_locations is unique." % real_path
+                    )
+                    continue
+                dupes.add(real_path)
+
+                filename = os.path.basename(real_path)
+                dir_name = os.path.dirname(real_path)
+                script = Script._from_filename(self, dir_name, filename)
+                if script is None:
+                    continue
+                yield script
+
+    @classmethod
+    def from_config(cls, config: Config) -> ScriptDirectory:
+        """Produce a new :class:`.ScriptDirectory` given a :class:`.Config`
+        instance.
+
+        The :class:`.Config` need only have the ``script_location`` key
+        present.
+
+        """
+        script_location = config.get_main_option("script_location")
+        if script_location is None:
+            raise util.CommandError(
+                "No 'script_location' key " "found in configuration."
+            )
+        truncate_slug_length: Optional[int]
+        tsl = config.get_main_option("truncate_slug_length")
+        if tsl is not None:
+            truncate_slug_length = int(tsl)
+        else:
+            truncate_slug_length = None
+
+        version_locations_str = config.get_main_option("version_locations")
+        version_locations: Optional[List[str]]
+        if version_locations_str:
+            version_path_separator = config.get_main_option(
+                "version_path_separator"
+            )
+
+            split_on_path = {
+                None: None,
+                "space": " ",
+                "newline": "\n",
+                "os": os.pathsep,
+                ":": ":",
+                ";": ";",
+            }
+
+            try:
+                split_char: Optional[str] = split_on_path[
+                    version_path_separator
+                ]
+            except KeyError as ke:
+                raise ValueError(
+                    "'%s' is not a valid value for "
+                    "version_path_separator; "
+                    "expected 'space', 'newline', 'os', ':', ';'"
+                    % version_path_separator
+                ) from ke
+            else:
+                if split_char is None:
+                    # legacy behaviour for backwards compatibility
+                    version_locations = _split_on_space_comma.split(
+                        version_locations_str
+                    )
+                else:
+                    version_locations = [
+                        x.strip()
+                        for x in version_locations_str.split(split_char)
+                        if x
+                    ]
+        else:
+            version_locations = None
+
+        prepend_sys_path = config.get_main_option("prepend_sys_path")
+        if prepend_sys_path:
+            sys.path[:0] = list(
+                _split_on_space_comma_colon.split(prepend_sys_path)
+            )
+
+        rvl = config.get_main_option("recursive_version_locations") == "true"
+        return ScriptDirectory(
+            util.coerce_resource_to_filename(script_location),
+            file_template=config.get_main_option(
+                "file_template", _default_file_template
+            ),
+            truncate_slug_length=truncate_slug_length,
+            sourceless=config.get_main_option("sourceless") == "true",
+            output_encoding=config.get_main_option("output_encoding", "utf-8"),
+            version_locations=version_locations,
+            timezone=config.get_main_option("timezone"),
+            hook_config=config.get_section("post_write_hooks", {}),
+            recursive_version_locations=rvl,
+            messaging_opts=config.messaging_opts,
+        )
+
+    @contextmanager
+    def _catch_revision_errors(
+        self,
+        ancestor: Optional[str] = None,
+        multiple_heads: Optional[str] = None,
+        start: Optional[str] = None,
+        end: Optional[str] = None,
+        resolution: Optional[str] = None,
+    ) -> Iterator[None]:
+        try:
+            yield
+        except revision.RangeNotAncestorError as rna:
+            if start is None:
+                start = cast(Any, rna.lower)
+            if end is None:
+                end = cast(Any, rna.upper)
+            if not ancestor:
+                ancestor = (
+                    "Requested range %(start)s:%(end)s does not refer to "
+                    "ancestor/descendant revisions along the same branch"
+                )
+            ancestor = ancestor % {"start": start, "end": end}
+            raise util.CommandError(ancestor) from rna
+        except revision.MultipleHeads as mh:
+            if not multiple_heads:
+                multiple_heads = (
+                    "Multiple head revisions are present for given "
+                    "argument '%(head_arg)s'; please "
+                    "specify a specific target revision, "
+                    "'<branchname>@%(head_arg)s' to "
+                    "narrow to a specific head, or 'heads' for all heads"
+                )
+            multiple_heads = multiple_heads % {
+                "head_arg": end or mh.argument,
+                "heads": util.format_as_comma(mh.heads),
+            }
+            raise util.CommandError(multiple_heads) from mh
+        except revision.ResolutionError as re:
+            if resolution is None:
+                resolution = "Can't locate revision identified by '%s'" % (
+                    re.argument
+                )
+            raise util.CommandError(resolution) from re
+        except revision.RevisionError as err:
+            raise util.CommandError(err.args[0]) from err
+
+    def walk_revisions(
+        self, base: str = "base", head: str = "heads"
+    ) -> Iterator[Script]:
+        """Iterate through all revisions.
+
+        :param base: the base revision, or "base" to start from the
+         empty revision.
+
+        :param head: the head revision; defaults to "heads" to indicate
+         all head revisions.  May also be "head" to indicate a single
+         head revision.
+
+        """
+        with self._catch_revision_errors(start=base, end=head):
+            for rev in self.revision_map.iterate_revisions(
+                head, base, inclusive=True, assert_relative_length=False
+            ):
+                yield cast(Script, rev)
+
+    def get_revisions(self, id_: _GetRevArg) -> Tuple[Script, ...]:
+        """Return the :class:`.Script` instance with the given rev identifier,
+        symbolic name, or sequence of identifiers.
+
+        """
+        with self._catch_revision_errors():
+            return cast(
+                Tuple[Script, ...],
+                self.revision_map.get_revisions(id_),
+            )
+
+    def get_all_current(self, id_: Tuple[str, ...]) -> Set[Script]:
+        with self._catch_revision_errors():
+            return cast(Set[Script], self.revision_map._get_all_current(id_))
+
+    def get_revision(self, id_: str) -> Script:
+        """Return the :class:`.Script` instance with the given rev id.
+
+        .. seealso::
+
+            :meth:`.ScriptDirectory.get_revisions`
+
+        """
+
+        with self._catch_revision_errors():
+            return cast(Script, self.revision_map.get_revision(id_))
+
+    def as_revision_number(
+        self, id_: Optional[str]
+    ) -> Optional[Union[str, Tuple[str, ...]]]:
+        """Convert a symbolic revision, i.e. 'head' or 'base', into
+        an actual revision number."""
+
+        with self._catch_revision_errors():
+            rev, branch_name = self.revision_map._resolve_revision_number(id_)
+
+        if not rev:
+            # convert () to None
+            return None
+        elif id_ == "heads":
+            return rev
+        else:
+            return rev[0]
+
+    def iterate_revisions(
+        self,
+        upper: Union[str, Tuple[str, ...], None],
+        lower: Union[str, Tuple[str, ...], None],
+        **kw: Any,
+    ) -> Iterator[Script]:
+        """Iterate through script revisions, starting at the given
+        upper revision identifier and ending at the lower.
+
+        The traversal uses strictly the `down_revision`
+        marker inside each migration script, so
+        it is a requirement that upper >= lower,
+        else you'll get nothing back.
+
+        The iterator yields :class:`.Script` objects.
+
+        .. seealso::
+
+            :meth:`.RevisionMap.iterate_revisions`
+
+        """
+        return cast(
+            Iterator[Script],
+            self.revision_map.iterate_revisions(upper, lower, **kw),
+        )
+
+    def get_current_head(self) -> Optional[str]:
+        """Return the current head revision.
+
+        If the script directory has multiple heads
+        due to branching, an error is raised;
+        :meth:`.ScriptDirectory.get_heads` should be
+        preferred.
+
+        :return: a string revision number.
+
+        .. seealso::
+
+            :meth:`.ScriptDirectory.get_heads`
+
+        """
+        with self._catch_revision_errors(
+            multiple_heads=(
+                "The script directory has multiple heads (due to branching)."
+                "Please use get_heads(), or merge the branches using "
+                "alembic merge."
+            )
+        ):
+            return self.revision_map.get_current_head()
+
+    def get_heads(self) -> List[str]:
+        """Return all "versioned head" revisions as strings.
+
+        This is normally a list of length one,
+        unless branches are present.  The
+        :meth:`.ScriptDirectory.get_current_head()` method
+        can be used normally when a script directory
+        has only one head.
+
+        :return: a tuple of string revision numbers.
+        """
+        return list(self.revision_map.heads)
+
+    def get_base(self) -> Optional[str]:
+        """Return the "base" revision as a string.
+
+        This is the revision number of the script that
+        has a ``down_revision`` of None.
+
+        If the script directory has multiple bases, an error is raised;
+        :meth:`.ScriptDirectory.get_bases` should be
+        preferred.
+
+        """
+        bases = self.get_bases()
+        if len(bases) > 1:
+            raise util.CommandError(
+                "The script directory has multiple bases. "
+                "Please use get_bases()."
+            )
+        elif bases:
+            return bases[0]
+        else:
+            return None
+
+    def get_bases(self) -> List[str]:
+        """return all "base" revisions as strings.
+
+        This is the revision number of all scripts that
+        have a ``down_revision`` of None.
+
+        """
+        return list(self.revision_map.bases)
+
+    def _upgrade_revs(
+        self, destination: str, current_rev: str
+    ) -> List[RevisionStep]:
+        with self._catch_revision_errors(
+            ancestor="Destination %(end)s is not a valid upgrade "
+            "target from current head(s)",
+            end=destination,
+        ):
+            revs = self.iterate_revisions(
+                destination, current_rev, implicit_base=True
+            )
+            return [
+                migration.MigrationStep.upgrade_from_script(
+                    self.revision_map, script
+                )
+                for script in reversed(list(revs))
+            ]
+
+    def _downgrade_revs(
+        self, destination: str, current_rev: Optional[str]
+    ) -> List[RevisionStep]:
+        with self._catch_revision_errors(
+            ancestor="Destination %(end)s is not a valid downgrade "
+            "target from current head(s)",
+            end=destination,
+        ):
+            revs = self.iterate_revisions(
+                current_rev, destination, select_for_downgrade=True
+            )
+            return [
+                migration.MigrationStep.downgrade_from_script(
+                    self.revision_map, script
+                )
+                for script in revs
+            ]
+
+    def _stamp_revs(
+        self, revision: _RevIdType, heads: _RevIdType
+    ) -> List[StampStep]:
+        with self._catch_revision_errors(
+            multiple_heads="Multiple heads are present; please specify a "
+            "single target revision"
+        ):
+            heads_revs = self.get_revisions(heads)
+
+            steps = []
+
+            if not revision:
+                revision = "base"
+
+            filtered_heads: List[Script] = []
+            for rev in util.to_tuple(revision):
+                if rev:
+                    filtered_heads.extend(
+                        self.revision_map.filter_for_lineage(
+                            cast(Sequence[Script], heads_revs),
+                            rev,
+                            include_dependencies=True,
+                        )
+                    )
+            filtered_heads = util.unique_list(filtered_heads)
+
+            dests = self.get_revisions(revision) or [None]
+
+            for dest in dests:
+                if dest is None:
+                    # dest is 'base'.  Return a "delete branch" migration
+                    # for all applicable heads.
+                    steps.extend(
+                        [
+                            migration.StampStep(
+                                head.revision,
+                                None,
+                                False,
+                                True,
+                                self.revision_map,
+                            )
+                            for head in filtered_heads
+                        ]
+                    )
+                    continue
+                elif dest in filtered_heads:
+                    # the dest is already in the version table, do nothing.
+                    continue
+
+                # figure out if the dest is a descendant or an
+                # ancestor of the selected nodes
+                descendants = set(
+                    self.revision_map._get_descendant_nodes([dest])
+                )
+                ancestors = set(self.revision_map._get_ancestor_nodes([dest]))
+
+                if descendants.intersection(filtered_heads):
+                    # heads are above the target, so this is a downgrade.
+                    # we can treat them as a "merge", single step.
+                    assert not ancestors.intersection(filtered_heads)
+                    todo_heads = [head.revision for head in filtered_heads]
+                    step = migration.StampStep(
+                        todo_heads,
+                        dest.revision,
+                        False,
+                        False,
+                        self.revision_map,
+                    )
+                    steps.append(step)
+                    continue
+                elif ancestors.intersection(filtered_heads):
+                    # heads are below the target, so this is an upgrade.
+                    # we can treat them as a "merge", single step.
+                    todo_heads = [head.revision for head in filtered_heads]
+                    step = migration.StampStep(
+                        todo_heads,
+                        dest.revision,
+                        True,
+                        False,
+                        self.revision_map,
+                    )
+                    steps.append(step)
+                    continue
+                else:
+                    # destination is in a branch not represented,
+                    # treat it as new branch
+                    step = migration.StampStep(
+                        (), dest.revision, True, True, self.revision_map
+                    )
+                    steps.append(step)
+                    continue
+
+            return steps
+
+    def run_env(self) -> None:
+        """Run the script environment.
+
+        This basically runs the ``env.py`` script present
+        in the migration environment.   It is called exclusively
+        by the command functions in :mod:`alembic.command`.
+
+
+        """
+        util.load_python_file(self.dir, "env.py")
+
+    @property
+    def env_py_location(self) -> str:
+        return os.path.abspath(os.path.join(self.dir, "env.py"))
+
+    def _generate_template(self, src: str, dest: str, **kw: Any) -> None:
+        with util.status(
+            f"Generating {os.path.abspath(dest)}", **self.messaging_opts
+        ):
+            util.template_to_file(src, dest, self.output_encoding, **kw)
+
+    def _copy_file(self, src: str, dest: str) -> None:
+        with util.status(
+            f"Generating {os.path.abspath(dest)}", **self.messaging_opts
+        ):
+            shutil.copy(src, dest)
+
+    def _ensure_directory(self, path: str) -> None:
+        path = os.path.abspath(path)
+        if not os.path.exists(path):
+            with util.status(
+                f"Creating directory {path}", **self.messaging_opts
+            ):
+                os.makedirs(path)
+
+    def _generate_create_date(self) -> datetime.datetime:
+        if self.timezone is not None:
+            if ZoneInfo is None:
+                raise util.CommandError(
+                    "Python >= 3.9 is required for timezone support or "
+                    "the 'backports.zoneinfo' package must be installed."
+                )
+            # First, assume correct capitalization
+            try:
+                tzinfo = ZoneInfo(self.timezone)
+            except ZoneInfoNotFoundError:
+                tzinfo = None
+            if tzinfo is None:
+                try:
+                    tzinfo = ZoneInfo(self.timezone.upper())
+                except ZoneInfoNotFoundError:
+                    raise util.CommandError(
+                        "Can't locate timezone: %s" % self.timezone
+                    ) from None
+            create_date = (
+                datetime.datetime.utcnow()
+                .replace(tzinfo=datetime.timezone.utc)
+                .astimezone(tzinfo)
+            )
+        else:
+            create_date = datetime.datetime.now()
+        return create_date
+
+    def generate_revision(
+        self,
+        revid: str,
+        message: Optional[str],
+        head: Optional[_RevIdType] = None,
+        splice: Optional[bool] = False,
+        branch_labels: Optional[_RevIdType] = None,
+        version_path: Optional[str] = None,
+        depends_on: Optional[_RevIdType] = None,
+        **kw: Any,
+    ) -> Optional[Script]:
+        """Generate a new revision file.
+
+        This runs the ``script.py.mako`` template, given
+        template arguments, and creates a new file.
+
+        :param revid: String revision id.  Typically this
+         comes from ``alembic.util.rev_id()``.
+        :param message: the revision message, the one passed
+         by the -m argument to the ``revision`` command.
+        :param head: the head revision to generate against.  Defaults
+         to the current "head" if no branches are present, else raises
+         an exception.
+        :param splice: if True, allow the "head" version to not be an
+         actual head; otherwise, the selected head must be a head
+         (e.g. endpoint) revision.
+
+        """
+        if head is None:
+            head = "head"
+
+        try:
+            Script.verify_rev_id(revid)
+        except revision.RevisionError as err:
+            raise util.CommandError(err.args[0]) from err
+
+        with self._catch_revision_errors(
+            multiple_heads=(
+                "Multiple heads are present; please specify the head "
+                "revision on which the new revision should be based, "
+                "or perform a merge."
+            )
+        ):
+            heads = cast(
+                Tuple[Optional["Revision"], ...],
+                self.revision_map.get_revisions(head),
+            )
+            for h in heads:
+                assert h != "base"  # type: ignore[comparison-overlap]
+
+        if len(set(heads)) != len(heads):
+            raise util.CommandError("Duplicate head revisions specified")
+
+        create_date = self._generate_create_date()
+
+        if version_path is None:
+            if len(self._version_locations) > 1:
+                for head_ in heads:
+                    if head_ is not None:
+                        assert isinstance(head_, Script)
+                        version_path = os.path.dirname(head_.path)
+                        break
+                else:
+                    raise util.CommandError(
+                        "Multiple version locations present, "
+                        "please specify --version-path"
+                    )
+            else:
+                version_path = self.versions
+
+        norm_path = os.path.normpath(os.path.abspath(version_path))
+        for vers_path in self._version_locations:
+            if os.path.normpath(vers_path) == norm_path:
+                break
+        else:
+            raise util.CommandError(
+                "Path %s is not represented in current "
+                "version locations" % version_path
+            )
+
+        if self.version_locations:
+            self._ensure_directory(version_path)
+
+        path = self._rev_path(version_path, revid, message, create_date)
+
+        if not splice:
+            for head_ in heads:
+                if head_ is not None and not head_.is_head:
+                    raise util.CommandError(
+                        "Revision %s is not a head revision; please specify "
+                        "--splice to create a new branch from this revision"
+                        % head_.revision
+                    )
+
+        resolved_depends_on: Optional[List[str]]
+        if depends_on:
+            with self._catch_revision_errors():
+                resolved_depends_on = [
+                    (
+                        dep
+                        if dep in rev.branch_labels  # maintain branch labels
+                        else rev.revision
+                    )  # resolve partial revision identifiers
+                    for rev, dep in [
+                        (not_none(self.revision_map.get_revision(dep)), dep)
+                        for dep in util.to_list(depends_on)
+                    ]
+                ]
+        else:
+            resolved_depends_on = None
+
+        self._generate_template(
+            os.path.join(self.dir, "script.py.mako"),
+            path,
+            up_revision=str(revid),
+            down_revision=revision.tuple_rev_as_scalar(
+                tuple(h.revision if h is not None else None for h in heads)
+            ),
+            branch_labels=util.to_tuple(branch_labels),
+            depends_on=revision.tuple_rev_as_scalar(resolved_depends_on),
+            create_date=create_date,
+            comma=util.format_as_comma,
+            message=message if message is not None else ("empty message"),
+            **kw,
+        )
+
+        post_write_hooks = self.hook_config
+        if post_write_hooks:
+            write_hooks._run_hooks(path, post_write_hooks)
+
+        try:
+            script = Script._from_path(self, path)
+        except revision.RevisionError as err:
+            raise util.CommandError(err.args[0]) from err
+        if script is None:
+            return None
+        if branch_labels and not script.branch_labels:
+            raise util.CommandError(
+                "Version %s specified branch_labels %s, however the "
+                "migration file %s does not have them; have you upgraded "
+                "your script.py.mako to include the "
+                "'branch_labels' section?"
+                % (script.revision, branch_labels, script.path)
+            )
+        self.revision_map.add_revision(script)
+        return script
+
+    def _rev_path(
+        self,
+        path: str,
+        rev_id: str,
+        message: Optional[str],
+        create_date: datetime.datetime,
+    ) -> str:
+        epoch = int(create_date.timestamp())
+        slug = "_".join(_slug_re.findall(message or "")).lower()
+        if len(slug) > self.truncate_slug_length:
+            slug = slug[: self.truncate_slug_length].rsplit("_", 1)[0] + "_"
+        filename = "%s.py" % (
+            self.file_template
+            % {
+                "rev": rev_id,
+                "slug": slug,
+                "epoch": epoch,
+                "year": create_date.year,
+                "month": create_date.month,
+                "day": create_date.day,
+                "hour": create_date.hour,
+                "minute": create_date.minute,
+                "second": create_date.second,
+            }
+        )
+        return os.path.join(path, filename)
+
+
+class Script(revision.Revision):
+    """Represent a single revision file in a ``versions/`` directory.
+
+    The :class:`.Script` instance is returned by methods
+    such as :meth:`.ScriptDirectory.iterate_revisions`.
+
+    """
+
+    def __init__(self, module: ModuleType, rev_id: str, path: str):
+        self.module = module
+        self.path = path
+        super().__init__(
+            rev_id,
+            module.down_revision,
+            branch_labels=util.to_tuple(
+                getattr(module, "branch_labels", None), default=()
+            ),
+            dependencies=util.to_tuple(
+                getattr(module, "depends_on", None), default=()
+            ),
+        )
+
+    module: ModuleType
+    """The Python module representing the actual script itself."""
+
+    path: str
+    """Filesystem path of the script."""
+
+    _db_current_indicator: Optional[bool] = None
+    """Utility variable which when set will cause string output to indicate
+    this is a "current" version in some database"""
+
+    @property
+    def doc(self) -> str:
+        """Return the docstring given in the script."""
+
+        return re.split("\n\n", self.longdoc)[0]
+
+    @property
+    def longdoc(self) -> str:
+        """Return the docstring given in the script."""
+
+        doc = self.module.__doc__
+        if doc:
+            if hasattr(self.module, "_alembic_source_encoding"):
+                doc = doc.decode(  # type: ignore[attr-defined]
+                    self.module._alembic_source_encoding
+                )
+            return doc.strip()  # type: ignore[union-attr]
+        else:
+            return ""
+
+    @property
+    def log_entry(self) -> str:
+        entry = "Rev: %s%s%s%s%s\n" % (
+            self.revision,
+            " (head)" if self.is_head else "",
+            " (branchpoint)" if self.is_branch_point else "",
+            " (mergepoint)" if self.is_merge_point else "",
+            " (current)" if self._db_current_indicator else "",
+        )
+        if self.is_merge_point:
+            entry += "Merges: %s\n" % (self._format_down_revision(),)
+        else:
+            entry += "Parent: %s\n" % (self._format_down_revision(),)
+
+        if self.dependencies:
+            entry += "Also depends on: %s\n" % (
+                util.format_as_comma(self.dependencies)
+            )
+
+        if self.is_branch_point:
+            entry += "Branches into: %s\n" % (
+                util.format_as_comma(self.nextrev)
+            )
+
+        if self.branch_labels:
+            entry += "Branch names: %s\n" % (
+                util.format_as_comma(self.branch_labels),
+            )
+
+        entry += "Path: %s\n" % (self.path,)
+
+        entry += "\n%s\n" % (
+            "\n".join("    %s" % para for para in self.longdoc.splitlines())
+        )
+        return entry
+
+    def __str__(self) -> str:
+        return "%s -> %s%s%s%s, %s" % (
+            self._format_down_revision(),
+            self.revision,
+            " (head)" if self.is_head else "",
+            " (branchpoint)" if self.is_branch_point else "",
+            " (mergepoint)" if self.is_merge_point else "",
+            self.doc,
+        )
+
+    def _head_only(
+        self,
+        include_branches: bool = False,
+        include_doc: bool = False,
+        include_parents: bool = False,
+        tree_indicators: bool = True,
+        head_indicators: bool = True,
+    ) -> str:
+        text = self.revision
+        if include_parents:
+            if self.dependencies:
+                text = "%s (%s) -> %s" % (
+                    self._format_down_revision(),
+                    util.format_as_comma(self.dependencies),
+                    text,
+                )
+            else:
+                text = "%s -> %s" % (self._format_down_revision(), text)
+        assert text is not None
+        if include_branches and self.branch_labels:
+            text += " (%s)" % util.format_as_comma(self.branch_labels)
+        if head_indicators or tree_indicators:
+            text += "%s%s%s" % (
+                " (head)" if self._is_real_head else "",
+                (
+                    " (effective head)"
+                    if self.is_head and not self._is_real_head
+                    else ""
+                ),
+                " (current)" if self._db_current_indicator else "",
+            )
+        if tree_indicators:
+            text += "%s%s" % (
+                " (branchpoint)" if self.is_branch_point else "",
+                " (mergepoint)" if self.is_merge_point else "",
+            )
+        if include_doc:
+            text += ", %s" % self.doc
+        return text
+
+    def cmd_format(
+        self,
+        verbose: bool,
+        include_branches: bool = False,
+        include_doc: bool = False,
+        include_parents: bool = False,
+        tree_indicators: bool = True,
+    ) -> str:
+        if verbose:
+            return self.log_entry
+        else:
+            return self._head_only(
+                include_branches, include_doc, include_parents, tree_indicators
+            )
+
+    def _format_down_revision(self) -> str:
+        if not self.down_revision:
+            return "<base>"
+        else:
+            return util.format_as_comma(self._versioned_down_revisions)
+
+    @classmethod
+    def _from_path(
+        cls, scriptdir: ScriptDirectory, path: str
+    ) -> Optional[Script]:
+        dir_, filename = os.path.split(path)
+        return cls._from_filename(scriptdir, dir_, filename)
+
+    @classmethod
+    def _list_py_dir(cls, scriptdir: ScriptDirectory, path: str) -> List[str]:
+        paths = []
+        for root, dirs, files in os.walk(path, topdown=True):
+            if root.endswith("__pycache__"):
+                # a special case - we may include these files
+                # if a `sourceless` option is specified
+                continue
+
+            for filename in sorted(files):
+                paths.append(os.path.join(root, filename))
+
+            if scriptdir.sourceless:
+                # look for __pycache__
+                py_cache_path = os.path.join(root, "__pycache__")
+                if os.path.exists(py_cache_path):
+                    # add all files from __pycache__ whose filename is not
+                    # already in the names we got from the version directory.
+                    # add as relative paths including __pycache__ token
+                    names = {filename.split(".")[0] for filename in files}
+                    paths.extend(
+                        os.path.join(py_cache_path, pyc)
+                        for pyc in os.listdir(py_cache_path)
+                        if pyc.split(".")[0] not in names
+                    )
+
+            if not scriptdir.recursive_version_locations:
+                break
+
+            # the real script order is defined by revision,
+            # but it may be undefined if there are many files with a same
+            # `down_revision`, for a better user experience (ex. debugging),
+            # we use a deterministic order
+            dirs.sort()
+
+        return paths
+
+    @classmethod
+    def _from_filename(
+        cls, scriptdir: ScriptDirectory, dir_: str, filename: str
+    ) -> Optional[Script]:
+        if scriptdir.sourceless:
+            py_match = _sourceless_rev_file.match(filename)
+        else:
+            py_match = _only_source_rev_file.match(filename)
+
+        if not py_match:
+            return None
+
+        py_filename = py_match.group(1)
+
+        if scriptdir.sourceless:
+            is_c = py_match.group(2) == "c"
+            is_o = py_match.group(2) == "o"
+        else:
+            is_c = is_o = False
+
+        if is_o or is_c:
+            py_exists = os.path.exists(os.path.join(dir_, py_filename))
+            pyc_exists = os.path.exists(os.path.join(dir_, py_filename + "c"))
+
+            # prefer .py over .pyc because we'd like to get the
+            # source encoding; prefer .pyc over .pyo because we'd like to
+            # have the docstrings which a -OO file would not have
+            if py_exists or is_o and pyc_exists:
+                return None
+
+        module = util.load_python_file(dir_, filename)
+
+        if not hasattr(module, "revision"):
+            # attempt to get the revision id from the script name,
+            # this for legacy only
+            m = _legacy_rev.match(filename)
+            if not m:
+                raise util.CommandError(
+                    "Could not determine revision id from filename %s. "
+                    "Be sure the 'revision' variable is "
+                    "declared inside the script (please see 'Upgrading "
+                    "from Alembic 0.1 to 0.2' in the documentation)."
+                    % filename
+                )
+            else:
+                revision = m.group(1)
+        else:
+            revision = module.revision
+        return Script(module, revision, os.path.join(dir_, filename))
diff --git a/.venv/lib/python3.12/site-packages/alembic/script/revision.py b/.venv/lib/python3.12/site-packages/alembic/script/revision.py
new file mode 100644
index 00000000..c3108e98
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/script/revision.py
@@ -0,0 +1,1728 @@
+from __future__ import annotations
+
+import collections
+import re
+from typing import Any
+from typing import Callable
+from typing import cast
+from typing import Collection
+from typing import Deque
+from typing import Dict
+from typing import FrozenSet
+from typing import Iterable
+from typing import Iterator
+from typing import List
+from typing import Optional
+from typing import overload
+from typing import Protocol
+from typing import Sequence
+from typing import Set
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy import util as sqlautil
+
+from .. import util
+from ..util import not_none
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+_RevIdType = Union[str, List[str], Tuple[str, ...]]
+_GetRevArg = Union[
+    str,
+    Iterable[Optional[str]],
+    Iterable[str],
+]
+_RevisionIdentifierType = Union[str, Tuple[str, ...], None]
+_RevisionOrStr = Union["Revision", str]
+_RevisionOrBase = Union["Revision", "Literal['base']"]
+_InterimRevisionMapType = Dict[str, "Revision"]
+_RevisionMapType = Dict[Union[None, str, Tuple[()]], Optional["Revision"]]
+_T = TypeVar("_T")
+_TR = TypeVar("_TR", bound=Optional[_RevisionOrStr])
+
+_relative_destination = re.compile(r"(?:(.+?)@)?(\w+)?((?:\+|-)\d+)")
+_revision_illegal_chars = ["@", "-", "+"]
+
+
+class _CollectRevisionsProtocol(Protocol):
+    def __call__(
+        self,
+        upper: _RevisionIdentifierType,
+        lower: _RevisionIdentifierType,
+        inclusive: bool,
+        implicit_base: bool,
+        assert_relative_length: bool,
+    ) -> Tuple[Set[Revision], Tuple[Optional[_RevisionOrBase], ...]]: ...
+
+
+class RevisionError(Exception):
+    pass
+
+
+class RangeNotAncestorError(RevisionError):
+    def __init__(
+        self, lower: _RevisionIdentifierType, upper: _RevisionIdentifierType
+    ) -> None:
+        self.lower = lower
+        self.upper = upper
+        super().__init__(
+            "Revision %s is not an ancestor of revision %s"
+            % (lower or "base", upper or "base")
+        )
+
+
+class MultipleHeads(RevisionError):
+    def __init__(self, heads: Sequence[str], argument: Optional[str]) -> None:
+        self.heads = heads
+        self.argument = argument
+        super().__init__(
+            "Multiple heads are present for given argument '%s'; "
+            "%s" % (argument, ", ".join(heads))
+        )
+
+
+class ResolutionError(RevisionError):
+    def __init__(self, message: str, argument: str) -> None:
+        super().__init__(message)
+        self.argument = argument
+
+
+class CycleDetected(RevisionError):
+    kind = "Cycle"
+
+    def __init__(self, revisions: Sequence[str]) -> None:
+        self.revisions = revisions
+        super().__init__(
+            "%s is detected in revisions (%s)"
+            % (self.kind, ", ".join(revisions))
+        )
+
+
+class DependencyCycleDetected(CycleDetected):
+    kind = "Dependency cycle"
+
+    def __init__(self, revisions: Sequence[str]) -> None:
+        super().__init__(revisions)
+
+
+class LoopDetected(CycleDetected):
+    kind = "Self-loop"
+
+    def __init__(self, revision: str) -> None:
+        super().__init__([revision])
+
+
+class DependencyLoopDetected(DependencyCycleDetected, LoopDetected):
+    kind = "Dependency self-loop"
+
+    def __init__(self, revision: Sequence[str]) -> None:
+        super().__init__(revision)
+
+
+class RevisionMap:
+    """Maintains a map of :class:`.Revision` objects.
+
+    :class:`.RevisionMap` is used by :class:`.ScriptDirectory` to maintain
+    and traverse the collection of :class:`.Script` objects, which are
+    themselves instances of :class:`.Revision`.
+
+    """
+
+    def __init__(self, generator: Callable[[], Iterable[Revision]]) -> None:
+        """Construct a new :class:`.RevisionMap`.
+
+        :param generator: a zero-arg callable that will generate an iterable
+         of :class:`.Revision` instances to be used.   These are typically
+         :class:`.Script` subclasses within regular Alembic use.
+
+        """
+        self._generator = generator
+
+    @util.memoized_property
+    def heads(self) -> Tuple[str, ...]:
+        """All "head" revisions as strings.
+
+        This is normally a tuple of length one,
+        unless unmerged branches are present.
+
+        :return: a tuple of string revision numbers.
+
+        """
+        self._revision_map
+        return self.heads
+
+    @util.memoized_property
+    def bases(self) -> Tuple[str, ...]:
+        """All "base" revisions as strings.
+
+        These are revisions that have a ``down_revision`` of None,
+        or empty tuple.
+
+        :return: a tuple of string revision numbers.
+
+        """
+        self._revision_map
+        return self.bases
+
+    @util.memoized_property
+    def _real_heads(self) -> Tuple[str, ...]:
+        """All "real" head revisions as strings.
+
+        :return: a tuple of string revision numbers.
+
+        """
+        self._revision_map
+        return self._real_heads
+
+    @util.memoized_property
+    def _real_bases(self) -> Tuple[str, ...]:
+        """All "real" base revisions as strings.
+
+        :return: a tuple of string revision numbers.
+
+        """
+        self._revision_map
+        return self._real_bases
+
+    @util.memoized_property
+    def _revision_map(self) -> _RevisionMapType:
+        """memoized attribute, initializes the revision map from the
+        initial collection.
+
+        """
+        # Ordering required for some tests to pass (but not required in
+        # general)
+        map_: _InterimRevisionMapType = sqlautil.OrderedDict()
+
+        heads: Set[Revision] = sqlautil.OrderedSet()
+        _real_heads: Set[Revision] = sqlautil.OrderedSet()
+        bases: Tuple[Revision, ...] = ()
+        _real_bases: Tuple[Revision, ...] = ()
+
+        has_branch_labels = set()
+        all_revisions = set()
+
+        for revision in self._generator():
+            all_revisions.add(revision)
+
+            if revision.revision in map_:
+                util.warn(
+                    "Revision %s is present more than once" % revision.revision
+                )
+            map_[revision.revision] = revision
+            if revision.branch_labels:
+                has_branch_labels.add(revision)
+
+            heads.add(revision)
+            _real_heads.add(revision)
+            if revision.is_base:
+                bases += (revision,)
+            if revision._is_real_base:
+                _real_bases += (revision,)
+
+        # add the branch_labels to the map_.  We'll need these
+        # to resolve the dependencies.
+        rev_map = map_.copy()
+        self._map_branch_labels(
+            has_branch_labels, cast(_RevisionMapType, map_)
+        )
+
+        # resolve dependency names from branch labels and symbolic
+        # names
+        self._add_depends_on(all_revisions, cast(_RevisionMapType, map_))
+
+        for rev in map_.values():
+            for downrev in rev._all_down_revisions:
+                if downrev not in map_:
+                    util.warn(
+                        "Revision %s referenced from %s is not present"
+                        % (downrev, rev)
+                    )
+                down_revision = map_[downrev]
+                down_revision.add_nextrev(rev)
+                if downrev in rev._versioned_down_revisions:
+                    heads.discard(down_revision)
+                _real_heads.discard(down_revision)
+
+        # once the map has downrevisions populated, the dependencies
+        # can be further refined to include only those which are not
+        # already ancestors
+        self._normalize_depends_on(all_revisions, cast(_RevisionMapType, map_))
+        self._detect_cycles(rev_map, heads, bases, _real_heads, _real_bases)
+
+        revision_map: _RevisionMapType = dict(map_.items())
+        revision_map[None] = revision_map[()] = None
+        self.heads = tuple(rev.revision for rev in heads)
+        self._real_heads = tuple(rev.revision for rev in _real_heads)
+        self.bases = tuple(rev.revision for rev in bases)
+        self._real_bases = tuple(rev.revision for rev in _real_bases)
+
+        self._add_branches(has_branch_labels, revision_map)
+        return revision_map
+
+    def _detect_cycles(
+        self,
+        rev_map: _InterimRevisionMapType,
+        heads: Set[Revision],
+        bases: Tuple[Revision, ...],
+        _real_heads: Set[Revision],
+        _real_bases: Tuple[Revision, ...],
+    ) -> None:
+        if not rev_map:
+            return
+        if not heads or not bases:
+            raise CycleDetected(list(rev_map))
+        total_space = {
+            rev.revision
+            for rev in self._iterate_related_revisions(
+                lambda r: r._versioned_down_revisions,
+                heads,
+                map_=cast(_RevisionMapType, rev_map),
+            )
+        }.intersection(
+            rev.revision
+            for rev in self._iterate_related_revisions(
+                lambda r: r.nextrev,
+                bases,
+                map_=cast(_RevisionMapType, rev_map),
+            )
+        )
+        deleted_revs = set(rev_map.keys()) - total_space
+        if deleted_revs:
+            raise CycleDetected(sorted(deleted_revs))
+
+        if not _real_heads or not _real_bases:
+            raise DependencyCycleDetected(list(rev_map))
+        total_space = {
+            rev.revision
+            for rev in self._iterate_related_revisions(
+                lambda r: r._all_down_revisions,
+                _real_heads,
+                map_=cast(_RevisionMapType, rev_map),
+            )
+        }.intersection(
+            rev.revision
+            for rev in self._iterate_related_revisions(
+                lambda r: r._all_nextrev,
+                _real_bases,
+                map_=cast(_RevisionMapType, rev_map),
+            )
+        )
+        deleted_revs = set(rev_map.keys()) - total_space
+        if deleted_revs:
+            raise DependencyCycleDetected(sorted(deleted_revs))
+
+    def _map_branch_labels(
+        self, revisions: Collection[Revision], map_: _RevisionMapType
+    ) -> None:
+        for revision in revisions:
+            if revision.branch_labels:
+                assert revision._orig_branch_labels is not None
+                for branch_label in revision._orig_branch_labels:
+                    if branch_label in map_:
+                        map_rev = map_[branch_label]
+                        assert map_rev is not None
+                        raise RevisionError(
+                            "Branch name '%s' in revision %s already "
+                            "used by revision %s"
+                            % (
+                                branch_label,
+                                revision.revision,
+                                map_rev.revision,
+                            )
+                        )
+                    map_[branch_label] = revision
+
+    def _add_branches(
+        self, revisions: Collection[Revision], map_: _RevisionMapType
+    ) -> None:
+        for revision in revisions:
+            if revision.branch_labels:
+                revision.branch_labels.update(revision.branch_labels)
+                for node in self._get_descendant_nodes(
+                    [revision], map_, include_dependencies=False
+                ):
+                    node.branch_labels.update(revision.branch_labels)
+
+                parent = node
+                while (
+                    parent
+                    and not parent._is_real_branch_point
+                    and not parent.is_merge_point
+                ):
+                    parent.branch_labels.update(revision.branch_labels)
+                    if parent.down_revision:
+                        parent = map_[parent.down_revision]
+                    else:
+                        break
+
+    def _add_depends_on(
+        self, revisions: Collection[Revision], map_: _RevisionMapType
+    ) -> None:
+        """Resolve the 'dependencies' for each revision in a collection
+        in terms of actual revision ids, as opposed to branch labels or other
+        symbolic names.
+
+        The collection is then assigned to the _resolved_dependencies
+        attribute on each revision object.
+
+        """
+
+        for revision in revisions:
+            if revision.dependencies:
+                deps = [
+                    map_[dep] for dep in util.to_tuple(revision.dependencies)
+                ]
+                revision._resolved_dependencies = tuple(
+                    [d.revision for d in deps if d is not None]
+                )
+            else:
+                revision._resolved_dependencies = ()
+
+    def _normalize_depends_on(
+        self, revisions: Collection[Revision], map_: _RevisionMapType
+    ) -> None:
+        """Create a collection of "dependencies" that omits dependencies
+        that are already ancestor nodes for each revision in a given
+        collection.
+
+        This builds upon the _resolved_dependencies collection created in the
+        _add_depends_on() method, looking in the fully populated revision map
+        for ancestors, and omitting them as the _resolved_dependencies
+        collection as it is copied to a new collection. The new collection is
+        then assigned to the _normalized_resolved_dependencies attribute on
+        each revision object.
+
+        The collection is then used to determine the immediate "down revision"
+        identifiers for this revision.
+
+        """
+
+        for revision in revisions:
+            if revision._resolved_dependencies:
+                normalized_resolved = set(revision._resolved_dependencies)
+                for rev in self._get_ancestor_nodes(
+                    [revision],
+                    include_dependencies=False,
+                    map_=map_,
+                ):
+                    if rev is revision:
+                        continue
+                    elif rev._resolved_dependencies:
+                        normalized_resolved.difference_update(
+                            rev._resolved_dependencies
+                        )
+
+                revision._normalized_resolved_dependencies = tuple(
+                    normalized_resolved
+                )
+            else:
+                revision._normalized_resolved_dependencies = ()
+
+    def add_revision(self, revision: Revision, _replace: bool = False) -> None:
+        """add a single revision to an existing map.
+
+        This method is for single-revision use cases, it's not
+        appropriate for fully populating an entire revision map.
+
+        """
+        map_ = self._revision_map
+        if not _replace and revision.revision in map_:
+            util.warn(
+                "Revision %s is present more than once" % revision.revision
+            )
+        elif _replace and revision.revision not in map_:
+            raise Exception("revision %s not in map" % revision.revision)
+
+        map_[revision.revision] = revision
+
+        revisions = [revision]
+        self._add_branches(revisions, map_)
+        self._map_branch_labels(revisions, map_)
+        self._add_depends_on(revisions, map_)
+
+        if revision.is_base:
+            self.bases += (revision.revision,)
+        if revision._is_real_base:
+            self._real_bases += (revision.revision,)
+
+        for downrev in revision._all_down_revisions:
+            if downrev not in map_:
+                util.warn(
+                    "Revision %s referenced from %s is not present"
+                    % (downrev, revision)
+                )
+            not_none(map_[downrev]).add_nextrev(revision)
+
+        self._normalize_depends_on(revisions, map_)
+
+        if revision._is_real_head:
+            self._real_heads = tuple(
+                head
+                for head in self._real_heads
+                if head
+                not in set(revision._all_down_revisions).union(
+                    [revision.revision]
+                )
+            ) + (revision.revision,)
+        if revision.is_head:
+            self.heads = tuple(
+                head
+                for head in self.heads
+                if head
+                not in set(revision._versioned_down_revisions).union(
+                    [revision.revision]
+                )
+            ) + (revision.revision,)
+
+    def get_current_head(
+        self, branch_label: Optional[str] = None
+    ) -> Optional[str]:
+        """Return the current head revision.
+
+        If the script directory has multiple heads
+        due to branching, an error is raised;
+        :meth:`.ScriptDirectory.get_heads` should be
+        preferred.
+
+        :param branch_label: optional branch name which will limit the
+         heads considered to those which include that branch_label.
+
+        :return: a string revision number.
+
+        .. seealso::
+
+            :meth:`.ScriptDirectory.get_heads`
+
+        """
+        current_heads: Sequence[str] = self.heads
+        if branch_label:
+            current_heads = self.filter_for_lineage(
+                current_heads, branch_label
+            )
+        if len(current_heads) > 1:
+            raise MultipleHeads(
+                current_heads,
+                "%s@head" % branch_label if branch_label else "head",
+            )
+
+        if current_heads:
+            return current_heads[0]
+        else:
+            return None
+
+    def _get_base_revisions(self, identifier: str) -> Tuple[str, ...]:
+        return self.filter_for_lineage(self.bases, identifier)
+
+    def get_revisions(
+        self, id_: Optional[_GetRevArg]
+    ) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """Return the :class:`.Revision` instances with the given rev id
+        or identifiers.
+
+        May be given a single identifier, a sequence of identifiers, or the
+        special symbols "head" or "base".  The result is a tuple of one
+        or more identifiers, or an empty tuple in the case of "base".
+
+        In the cases where 'head', 'heads' is requested and the
+        revision map is empty, returns an empty tuple.
+
+        Supports partial identifiers, where the given identifier
+        is matched against all identifiers that start with the given
+        characters; if there is exactly one match, that determines the
+        full revision.
+
+        """
+
+        if isinstance(id_, (list, tuple, set, frozenset)):
+            return sum([self.get_revisions(id_elem) for id_elem in id_], ())
+        else:
+            resolved_id, branch_label = self._resolve_revision_number(id_)
+            if len(resolved_id) == 1:
+                try:
+                    rint = int(resolved_id[0])
+                    if rint < 0:
+                        # branch@-n -> walk down from heads
+                        select_heads = self.get_revisions("heads")
+                        if branch_label is not None:
+                            select_heads = tuple(
+                                head
+                                for head in select_heads
+                                if branch_label
+                                in is_revision(head).branch_labels
+                            )
+                        return tuple(
+                            self._walk(head, steps=rint)
+                            for head in select_heads
+                        )
+                except ValueError:
+                    # couldn't resolve as integer
+                    pass
+            return tuple(
+                self._revision_for_ident(rev_id, branch_label)
+                for rev_id in resolved_id
+            )
+
+    def get_revision(self, id_: Optional[str]) -> Optional[Revision]:
+        """Return the :class:`.Revision` instance with the given rev id.
+
+        If a symbolic name such as "head" or "base" is given, resolves
+        the identifier into the current head or base revision.  If the symbolic
+        name refers to multiples, :class:`.MultipleHeads` is raised.
+
+        Supports partial identifiers, where the given identifier
+        is matched against all identifiers that start with the given
+        characters; if there is exactly one match, that determines the
+        full revision.
+
+        """
+
+        resolved_id, branch_label = self._resolve_revision_number(id_)
+        if len(resolved_id) > 1:
+            raise MultipleHeads(resolved_id, id_)
+
+        resolved: Union[str, Tuple[()]] = resolved_id[0] if resolved_id else ()
+        return self._revision_for_ident(resolved, branch_label)
+
+    def _resolve_branch(self, branch_label: str) -> Optional[Revision]:
+        try:
+            branch_rev = self._revision_map[branch_label]
+        except KeyError:
+            try:
+                nonbranch_rev = self._revision_for_ident(branch_label)
+            except ResolutionError as re:
+                raise ResolutionError(
+                    "No such branch: '%s'" % branch_label, branch_label
+                ) from re
+
+            else:
+                return nonbranch_rev
+        else:
+            return branch_rev
+
+    def _revision_for_ident(
+        self,
+        resolved_id: Union[str, Tuple[()], None],
+        check_branch: Optional[str] = None,
+    ) -> Optional[Revision]:
+        branch_rev: Optional[Revision]
+        if check_branch:
+            branch_rev = self._resolve_branch(check_branch)
+        else:
+            branch_rev = None
+
+        revision: Union[Optional[Revision], Literal[False]]
+        try:
+            revision = self._revision_map[resolved_id]
+        except KeyError:
+            # break out to avoid misleading py3k stack traces
+            revision = False
+        revs: Sequence[str]
+        if revision is False:
+            assert resolved_id
+            # do a partial lookup
+            revs = [
+                x
+                for x in self._revision_map
+                if x and len(x) > 3 and x.startswith(resolved_id)
+            ]
+
+            if branch_rev:
+                revs = self.filter_for_lineage(revs, check_branch)
+            if not revs:
+                raise ResolutionError(
+                    "No such revision or branch '%s'%s"
+                    % (
+                        resolved_id,
+                        (
+                            "; please ensure at least four characters are "
+                            "present for partial revision identifier matches"
+                            if len(resolved_id) < 4
+                            else ""
+                        ),
+                    ),
+                    resolved_id,
+                )
+            elif len(revs) > 1:
+                raise ResolutionError(
+                    "Multiple revisions start "
+                    "with '%s': %s..."
+                    % (resolved_id, ", ".join("'%s'" % r for r in revs[0:3])),
+                    resolved_id,
+                )
+            else:
+                revision = self._revision_map[revs[0]]
+
+        if check_branch and revision is not None:
+            assert branch_rev is not None
+            assert resolved_id
+            if not self._shares_lineage(
+                revision.revision, branch_rev.revision
+            ):
+                raise ResolutionError(
+                    "Revision %s is not a member of branch '%s'"
+                    % (revision.revision, check_branch),
+                    resolved_id,
+                )
+        return revision
+
+    def _filter_into_branch_heads(
+        self, targets: Iterable[Optional[_RevisionOrBase]]
+    ) -> Set[Optional[_RevisionOrBase]]:
+        targets = set(targets)
+
+        for rev in list(targets):
+            assert rev
+            if targets.intersection(
+                self._get_descendant_nodes([rev], include_dependencies=False)
+            ).difference([rev]):
+                targets.discard(rev)
+        return targets
+
+    def filter_for_lineage(
+        self,
+        targets: Iterable[_TR],
+        check_against: Optional[str],
+        include_dependencies: bool = False,
+    ) -> Tuple[_TR, ...]:
+        id_, branch_label = self._resolve_revision_number(check_against)
+
+        shares = []
+        if branch_label:
+            shares.append(branch_label)
+        if id_:
+            shares.extend(id_)
+
+        return tuple(
+            tg
+            for tg in targets
+            if self._shares_lineage(
+                tg, shares, include_dependencies=include_dependencies
+            )
+        )
+
+    def _shares_lineage(
+        self,
+        target: Optional[_RevisionOrStr],
+        test_against_revs: Sequence[_RevisionOrStr],
+        include_dependencies: bool = False,
+    ) -> bool:
+        if not test_against_revs:
+            return True
+        if not isinstance(target, Revision):
+            resolved_target = not_none(self._revision_for_ident(target))
+        else:
+            resolved_target = target
+
+        resolved_test_against_revs = [
+            (
+                self._revision_for_ident(test_against_rev)
+                if not isinstance(test_against_rev, Revision)
+                else test_against_rev
+            )
+            for test_against_rev in util.to_tuple(
+                test_against_revs, default=()
+            )
+        ]
+
+        return bool(
+            set(
+                self._get_descendant_nodes(
+                    [resolved_target],
+                    include_dependencies=include_dependencies,
+                )
+            )
+            .union(
+                self._get_ancestor_nodes(
+                    [resolved_target],
+                    include_dependencies=include_dependencies,
+                )
+            )
+            .intersection(resolved_test_against_revs)
+        )
+
+    def _resolve_revision_number(
+        self, id_: Optional[_GetRevArg]
+    ) -> Tuple[Tuple[str, ...], Optional[str]]:
+        branch_label: Optional[str]
+        if isinstance(id_, str) and "@" in id_:
+            branch_label, id_ = id_.split("@", 1)
+
+        elif id_ is not None and (
+            (isinstance(id_, tuple) and id_ and not isinstance(id_[0], str))
+            or not isinstance(id_, (str, tuple))
+        ):
+            raise RevisionError(
+                "revision identifier %r is not a string; ensure database "
+                "driver settings are correct" % (id_,)
+            )
+
+        else:
+            branch_label = None
+
+        # ensure map is loaded
+        self._revision_map
+        if id_ == "heads":
+            if branch_label:
+                return (
+                    self.filter_for_lineage(self.heads, branch_label),
+                    branch_label,
+                )
+            else:
+                return self._real_heads, branch_label
+        elif id_ == "head":
+            current_head = self.get_current_head(branch_label)
+            if current_head:
+                return (current_head,), branch_label
+            else:
+                return (), branch_label
+        elif id_ == "base" or id_ is None:
+            return (), branch_label
+        else:
+            return util.to_tuple(id_, default=None), branch_label
+
+    def iterate_revisions(
+        self,
+        upper: _RevisionIdentifierType,
+        lower: _RevisionIdentifierType,
+        implicit_base: bool = False,
+        inclusive: bool = False,
+        assert_relative_length: bool = True,
+        select_for_downgrade: bool = False,
+    ) -> Iterator[Revision]:
+        """Iterate through script revisions, starting at the given
+        upper revision identifier and ending at the lower.
+
+        The traversal uses strictly the `down_revision`
+        marker inside each migration script, so
+        it is a requirement that upper >= lower,
+        else you'll get nothing back.
+
+        The iterator yields :class:`.Revision` objects.
+
+        """
+        fn: _CollectRevisionsProtocol
+        if select_for_downgrade:
+            fn = self._collect_downgrade_revisions
+        else:
+            fn = self._collect_upgrade_revisions
+
+        revisions, heads = fn(
+            upper,
+            lower,
+            inclusive=inclusive,
+            implicit_base=implicit_base,
+            assert_relative_length=assert_relative_length,
+        )
+
+        for node in self._topological_sort(revisions, heads):
+            yield not_none(self.get_revision(node))
+
+    def _get_descendant_nodes(
+        self,
+        targets: Collection[Optional[_RevisionOrBase]],
+        map_: Optional[_RevisionMapType] = None,
+        check: bool = False,
+        omit_immediate_dependencies: bool = False,
+        include_dependencies: bool = True,
+    ) -> Iterator[Any]:
+        if omit_immediate_dependencies:
+
+            def fn(rev: Revision) -> Iterable[str]:
+                if rev not in targets:
+                    return rev._all_nextrev
+                else:
+                    return rev.nextrev
+
+        elif include_dependencies:
+
+            def fn(rev: Revision) -> Iterable[str]:
+                return rev._all_nextrev
+
+        else:
+
+            def fn(rev: Revision) -> Iterable[str]:
+                return rev.nextrev
+
+        return self._iterate_related_revisions(
+            fn, targets, map_=map_, check=check
+        )
+
+    def _get_ancestor_nodes(
+        self,
+        targets: Collection[Optional[_RevisionOrBase]],
+        map_: Optional[_RevisionMapType] = None,
+        check: bool = False,
+        include_dependencies: bool = True,
+    ) -> Iterator[Revision]:
+        if include_dependencies:
+
+            def fn(rev: Revision) -> Iterable[str]:
+                return rev._normalized_down_revisions
+
+        else:
+
+            def fn(rev: Revision) -> Iterable[str]:
+                return rev._versioned_down_revisions
+
+        return self._iterate_related_revisions(
+            fn, targets, map_=map_, check=check
+        )
+
+    def _iterate_related_revisions(
+        self,
+        fn: Callable[[Revision], Iterable[str]],
+        targets: Collection[Optional[_RevisionOrBase]],
+        map_: Optional[_RevisionMapType],
+        check: bool = False,
+    ) -> Iterator[Revision]:
+        if map_ is None:
+            map_ = self._revision_map
+
+        seen = set()
+        todo: Deque[Revision] = collections.deque()
+        for target_for in targets:
+            target = is_revision(target_for)
+            todo.append(target)
+            if check:
+                per_target = set()
+
+            while todo:
+                rev = todo.pop()
+                if check:
+                    per_target.add(rev)
+
+                if rev in seen:
+                    continue
+                seen.add(rev)
+                # Check for map errors before collecting.
+                for rev_id in fn(rev):
+                    next_rev = map_[rev_id]
+                    assert next_rev is not None
+                    if next_rev.revision != rev_id:
+                        raise RevisionError(
+                            "Dependency resolution failed; broken map"
+                        )
+                    todo.append(next_rev)
+                yield rev
+            if check:
+                overlaps = per_target.intersection(targets).difference(
+                    [target]
+                )
+                if overlaps:
+                    raise RevisionError(
+                        "Requested revision %s overlaps with "
+                        "other requested revisions %s"
+                        % (
+                            target.revision,
+                            ", ".join(r.revision for r in overlaps),
+                        )
+                    )
+
+    def _topological_sort(
+        self,
+        revisions: Collection[Revision],
+        heads: Any,
+    ) -> List[str]:
+        """Yield revision ids of a collection of Revision objects in
+        topological sorted order (i.e. revisions always come after their
+        down_revisions and dependencies). Uses the order of keys in
+        _revision_map to sort.
+
+        """
+
+        id_to_rev = self._revision_map
+
+        def get_ancestors(rev_id: str) -> Set[str]:
+            return {
+                r.revision
+                for r in self._get_ancestor_nodes([id_to_rev[rev_id]])
+            }
+
+        todo = {d.revision for d in revisions}
+
+        # Use revision map (ordered dict) key order to pre-sort.
+        inserted_order = list(self._revision_map)
+
+        current_heads = list(
+            sorted(
+                {d.revision for d in heads if d.revision in todo},
+                key=inserted_order.index,
+            )
+        )
+        ancestors_by_idx = [get_ancestors(rev_id) for rev_id in current_heads]
+
+        output = []
+
+        current_candidate_idx = 0
+        while current_heads:
+            candidate = current_heads[current_candidate_idx]
+
+            for check_head_index, ancestors in enumerate(ancestors_by_idx):
+                # scan all the heads.  see if we can continue walking
+                # down the current branch indicated by current_candidate_idx.
+                if (
+                    check_head_index != current_candidate_idx
+                    and candidate in ancestors
+                ):
+                    current_candidate_idx = check_head_index
+                    # nope, another head is dependent on us, they have
+                    # to be traversed first
+                    break
+            else:
+                # yup, we can emit
+                if candidate in todo:
+                    output.append(candidate)
+                    todo.remove(candidate)
+
+                # now update the heads with our ancestors.
+
+                candidate_rev = id_to_rev[candidate]
+                assert candidate_rev is not None
+
+                heads_to_add = [
+                    r
+                    for r in candidate_rev._normalized_down_revisions
+                    if r in todo and r not in current_heads
+                ]
+
+                if not heads_to_add:
+                    # no ancestors, so remove this head from the list
+                    del current_heads[current_candidate_idx]
+                    del ancestors_by_idx[current_candidate_idx]
+                    current_candidate_idx = max(current_candidate_idx - 1, 0)
+                else:
+                    if (
+                        not candidate_rev._normalized_resolved_dependencies
+                        and len(candidate_rev._versioned_down_revisions) == 1
+                    ):
+                        current_heads[current_candidate_idx] = heads_to_add[0]
+
+                        # for plain movement down a revision line without
+                        # any mergepoints, branchpoints, or deps, we
+                        # can update the ancestors collection directly
+                        # by popping out the candidate we just emitted
+                        ancestors_by_idx[current_candidate_idx].discard(
+                            candidate
+                        )
+
+                    else:
+                        # otherwise recalculate it again, things get
+                        # complicated otherwise.  This can possibly be
+                        # improved to not run the whole ancestor thing
+                        # each time but it was getting complicated
+                        current_heads[current_candidate_idx] = heads_to_add[0]
+                        current_heads.extend(heads_to_add[1:])
+                        ancestors_by_idx[current_candidate_idx] = (
+                            get_ancestors(heads_to_add[0])
+                        )
+                        ancestors_by_idx.extend(
+                            get_ancestors(head) for head in heads_to_add[1:]
+                        )
+
+        assert not todo
+        return output
+
+    def _walk(
+        self,
+        start: Optional[Union[str, Revision]],
+        steps: int,
+        branch_label: Optional[str] = None,
+        no_overwalk: bool = True,
+    ) -> Optional[_RevisionOrBase]:
+        """
+        Walk the requested number of :steps up (steps > 0) or down (steps < 0)
+        the revision tree.
+
+        :branch_label is used to select branches only when walking up.
+
+        If the walk goes past the boundaries of the tree and :no_overwalk is
+        True, None is returned, otherwise the walk terminates early.
+
+        A RevisionError is raised if there is no unambiguous revision to
+        walk to.
+        """
+        initial: Optional[_RevisionOrBase]
+        if isinstance(start, str):
+            initial = self.get_revision(start)
+        else:
+            initial = start
+
+        children: Sequence[Optional[_RevisionOrBase]]
+        for _ in range(abs(steps)):
+            if steps > 0:
+                assert initial != "base"  # type: ignore[comparison-overlap]
+                # Walk up
+                walk_up = [
+                    is_revision(rev)
+                    for rev in self.get_revisions(
+                        self.bases if initial is None else initial.nextrev
+                    )
+                ]
+                if branch_label:
+                    children = self.filter_for_lineage(walk_up, branch_label)
+                else:
+                    children = walk_up
+            else:
+                # Walk down
+                if initial == "base":  # type: ignore[comparison-overlap]
+                    children = ()
+                else:
+                    children = self.get_revisions(
+                        self.heads
+                        if initial is None
+                        else initial.down_revision
+                    )
+                    if not children:
+                        children = ("base",)
+            if not children:
+                # This will return an invalid result if no_overwalk, otherwise
+                # further steps will stay where we are.
+                ret = None if no_overwalk else initial
+                return ret
+            elif len(children) > 1:
+                raise RevisionError("Ambiguous walk")
+            initial = children[0]
+
+        return initial
+
+    def _parse_downgrade_target(
+        self,
+        current_revisions: _RevisionIdentifierType,
+        target: _RevisionIdentifierType,
+        assert_relative_length: bool,
+    ) -> Tuple[Optional[str], Optional[_RevisionOrBase]]:
+        """
+        Parse downgrade command syntax :target to retrieve the target revision
+        and branch label (if any) given the :current_revisions stamp of the
+        database.
+
+        Returns a tuple (branch_label, target_revision) where branch_label
+        is a string from the command specifying the branch to consider (or
+        None if no branch given), and target_revision is a Revision object
+        which the command refers to. target_revisions is None if the command
+        refers to 'base'. The target may be specified in absolute form, or
+        relative to :current_revisions.
+        """
+        if target is None:
+            return None, None
+        assert isinstance(
+            target, str
+        ), "Expected downgrade target in string form"
+        match = _relative_destination.match(target)
+        if match:
+            branch_label, symbol, relative = match.groups()
+            rel_int = int(relative)
+            if rel_int >= 0:
+                if symbol is None:
+                    # Downgrading to current + n is not valid.
+                    raise RevisionError(
+                        "Relative revision %s didn't "
+                        "produce %d migrations" % (relative, abs(rel_int))
+                    )
+                # Find target revision relative to given symbol.
+                rev = self._walk(
+                    symbol,
+                    rel_int,
+                    branch_label,
+                    no_overwalk=assert_relative_length,
+                )
+                if rev is None:
+                    raise RevisionError("Walked too far")
+                return branch_label, rev
+            else:
+                relative_revision = symbol is None
+                if relative_revision:
+                    # Find target revision relative to current state.
+                    if branch_label:
+                        cr_tuple = util.to_tuple(current_revisions)
+                        symbol_list: Sequence[str]
+                        symbol_list = self.filter_for_lineage(
+                            cr_tuple, branch_label
+                        )
+                        if not symbol_list:
+                            # check the case where there are multiple branches
+                            # but there is currently a single heads, since all
+                            # other branch heads are dependent of the current
+                            # single heads.
+                            all_current = cast(
+                                Set[Revision], self._get_all_current(cr_tuple)
+                            )
+                            sl_all_current = self.filter_for_lineage(
+                                all_current, branch_label
+                            )
+                            symbol_list = [
+                                r.revision if r else r  # type: ignore[misc]
+                                for r in sl_all_current
+                            ]
+
+                        assert len(symbol_list) == 1
+                        symbol = symbol_list[0]
+                    else:
+                        current_revisions = util.to_tuple(current_revisions)
+                        if not current_revisions:
+                            raise RevisionError(
+                                "Relative revision %s didn't "
+                                "produce %d migrations"
+                                % (relative, abs(rel_int))
+                            )
+                        # Have to check uniques here for duplicate rows test.
+                        if len(set(current_revisions)) > 1:
+                            util.warn(
+                                "downgrade -1 from multiple heads is "
+                                "ambiguous; "
+                                "this usage will be disallowed in a future "
+                                "release."
+                            )
+                        symbol = current_revisions[0]
+                        # Restrict iteration to just the selected branch when
+                        # ambiguous branches are involved.
+                        branch_label = symbol
+                # Walk down the tree to find downgrade target.
+                rev = self._walk(
+                    start=(
+                        self.get_revision(symbol)
+                        if branch_label is None
+                        else self.get_revision(
+                            "%s@%s" % (branch_label, symbol)
+                        )
+                    ),
+                    steps=rel_int,
+                    no_overwalk=assert_relative_length,
+                )
+                if rev is None:
+                    if relative_revision:
+                        raise RevisionError(
+                            "Relative revision %s didn't "
+                            "produce %d migrations" % (relative, abs(rel_int))
+                        )
+                    else:
+                        raise RevisionError("Walked too far")
+                return branch_label, rev
+
+        # No relative destination given, revision specified is absolute.
+        branch_label, _, symbol = target.rpartition("@")
+        if not branch_label:
+            branch_label = None
+        return branch_label, self.get_revision(symbol)
+
+    def _parse_upgrade_target(
+        self,
+        current_revisions: _RevisionIdentifierType,
+        target: _RevisionIdentifierType,
+        assert_relative_length: bool,
+    ) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """
+        Parse upgrade command syntax :target to retrieve the target revision
+        and given the :current_revisions stamp of the database.
+
+        Returns a tuple of Revision objects which should be iterated/upgraded
+        to. The target may be specified in absolute form, or relative to
+        :current_revisions.
+        """
+        if isinstance(target, str):
+            match = _relative_destination.match(target)
+        else:
+            match = None
+
+        if not match:
+            # No relative destination, target is absolute.
+            return self.get_revisions(target)
+
+        current_revisions_tup: Union[str, Tuple[Optional[str], ...], None]
+        current_revisions_tup = util.to_tuple(current_revisions)
+
+        branch_label, symbol, relative_str = match.groups()
+        relative = int(relative_str)
+        if relative > 0:
+            if symbol is None:
+                if not current_revisions_tup:
+                    current_revisions_tup = (None,)
+                # Try to filter to a single target (avoid ambiguous branches).
+                start_revs = current_revisions_tup
+                if branch_label:
+                    start_revs = self.filter_for_lineage(
+                        self.get_revisions(current_revisions_tup),  # type: ignore[arg-type] # noqa: E501
+                        branch_label,
+                    )
+                    if not start_revs:
+                        # The requested branch is not a head, so we need to
+                        # backtrack to find a branchpoint.
+                        active_on_branch = self.filter_for_lineage(
+                            self._get_ancestor_nodes(
+                                self.get_revisions(current_revisions_tup)
+                            ),
+                            branch_label,
+                        )
+                        # Find the tips of this set of revisions (revisions
+                        # without children within the set).
+                        start_revs = tuple(
+                            {rev.revision for rev in active_on_branch}
+                            - {
+                                down
+                                for rev in active_on_branch
+                                for down in rev._normalized_down_revisions
+                            }
+                        )
+                        if not start_revs:
+                            # We must need to go right back to base to find
+                            # a starting point for this branch.
+                            start_revs = (None,)
+                if len(start_revs) > 1:
+                    raise RevisionError(
+                        "Ambiguous upgrade from multiple current revisions"
+                    )
+                # Walk up from unique target revision.
+                rev = self._walk(
+                    start=start_revs[0],
+                    steps=relative,
+                    branch_label=branch_label,
+                    no_overwalk=assert_relative_length,
+                )
+                if rev is None:
+                    raise RevisionError(
+                        "Relative revision %s didn't "
+                        "produce %d migrations" % (relative_str, abs(relative))
+                    )
+                return (rev,)
+            else:
+                # Walk is relative to a given revision, not the current state.
+                return (
+                    self._walk(
+                        start=self.get_revision(symbol),
+                        steps=relative,
+                        branch_label=branch_label,
+                        no_overwalk=assert_relative_length,
+                    ),
+                )
+        else:
+            if symbol is None:
+                # Upgrading to current - n is not valid.
+                raise RevisionError(
+                    "Relative revision %s didn't "
+                    "produce %d migrations" % (relative, abs(relative))
+                )
+            return (
+                self._walk(
+                    start=(
+                        self.get_revision(symbol)
+                        if branch_label is None
+                        else self.get_revision(
+                            "%s@%s" % (branch_label, symbol)
+                        )
+                    ),
+                    steps=relative,
+                    no_overwalk=assert_relative_length,
+                ),
+            )
+
+    def _collect_downgrade_revisions(
+        self,
+        upper: _RevisionIdentifierType,
+        lower: _RevisionIdentifierType,
+        inclusive: bool,
+        implicit_base: bool,
+        assert_relative_length: bool,
+    ) -> Tuple[Set[Revision], Tuple[Optional[_RevisionOrBase], ...]]:
+        """
+        Compute the set of current revisions specified by :upper, and the
+        downgrade target specified by :target. Return all dependents of target
+        which are currently active.
+
+        :inclusive=True includes the target revision in the set
+        """
+
+        branch_label, target_revision = self._parse_downgrade_target(
+            current_revisions=upper,
+            target=lower,
+            assert_relative_length=assert_relative_length,
+        )
+        if target_revision == "base":
+            target_revision = None
+        assert target_revision is None or isinstance(target_revision, Revision)
+
+        roots: List[Revision]
+        # Find candidates to drop.
+        if target_revision is None:
+            # Downgrading back to base: find all tree roots.
+            roots = [
+                rev
+                for rev in self._revision_map.values()
+                if rev is not None and rev.down_revision is None
+            ]
+        elif inclusive:
+            # inclusive implies target revision should also be dropped
+            roots = [target_revision]
+        else:
+            # Downgrading to fixed target: find all direct children.
+            roots = [
+                is_revision(rev)
+                for rev in self.get_revisions(target_revision.nextrev)
+            ]
+
+        if branch_label and len(roots) > 1:
+            # Need to filter roots.
+            ancestors = {
+                rev.revision
+                for rev in self._get_ancestor_nodes(
+                    [self._resolve_branch(branch_label)],
+                    include_dependencies=False,
+                )
+            }
+            # Intersection gives the root revisions we are trying to
+            # rollback with the downgrade.
+            roots = [
+                is_revision(rev)
+                for rev in self.get_revisions(
+                    {rev.revision for rev in roots}.intersection(ancestors)
+                )
+            ]
+
+            # Ensure we didn't throw everything away when filtering branches.
+            if len(roots) == 0:
+                raise RevisionError(
+                    "Not a valid downgrade target from current heads"
+                )
+
+        heads = self.get_revisions(upper)
+
+        # Aim is to drop :branch_revision; to do so we also need to drop its
+        # descendents and anything dependent on it.
+        downgrade_revisions = set(
+            self._get_descendant_nodes(
+                roots,
+                include_dependencies=True,
+                omit_immediate_dependencies=False,
+            )
+        )
+        active_revisions = set(
+            self._get_ancestor_nodes(heads, include_dependencies=True)
+        )
+
+        # Emit revisions to drop in reverse topological sorted order.
+        downgrade_revisions.intersection_update(active_revisions)
+
+        if implicit_base:
+            # Wind other branches back to base.
+            downgrade_revisions.update(
+                active_revisions.difference(self._get_ancestor_nodes(roots))
+            )
+
+        if (
+            target_revision is not None
+            and not downgrade_revisions
+            and target_revision not in heads
+        ):
+            # Empty intersection: target revs are not present.
+
+            raise RangeNotAncestorError("Nothing to drop", upper)
+
+        return downgrade_revisions, heads
+
+    def _collect_upgrade_revisions(
+        self,
+        upper: _RevisionIdentifierType,
+        lower: _RevisionIdentifierType,
+        inclusive: bool,
+        implicit_base: bool,
+        assert_relative_length: bool,
+    ) -> Tuple[Set[Revision], Tuple[Revision, ...]]:
+        """
+        Compute the set of required revisions specified by :upper, and the
+        current set of active revisions specified by :lower. Find the
+        difference between the two to compute the required upgrades.
+
+        :inclusive=True includes the current/lower revisions in the set
+
+        :implicit_base=False only returns revisions which are downstream
+        of the current/lower revisions. Dependencies from branches with
+        different bases will not be included.
+        """
+        targets: Collection[Revision] = [
+            is_revision(rev)
+            for rev in self._parse_upgrade_target(
+                current_revisions=lower,
+                target=upper,
+                assert_relative_length=assert_relative_length,
+            )
+        ]
+
+        # assert type(targets) is tuple, "targets should be a tuple"
+
+        # Handled named bases (e.g. branch@... -> heads should only produce
+        # targets on the given branch)
+        if isinstance(lower, str) and "@" in lower:
+            branch, _, _ = lower.partition("@")
+            branch_rev = self.get_revision(branch)
+            if branch_rev is not None and branch_rev.revision == branch:
+                # A revision was used as a label; get its branch instead
+                assert len(branch_rev.branch_labels) == 1
+                branch = next(iter(branch_rev.branch_labels))
+            targets = {
+                need for need in targets if branch in need.branch_labels
+            }
+
+        required_node_set = set(
+            self._get_ancestor_nodes(
+                targets, check=True, include_dependencies=True
+            )
+        ).union(targets)
+
+        current_revisions = self.get_revisions(lower)
+        if not implicit_base and any(
+            rev not in required_node_set
+            for rev in current_revisions
+            if rev is not None
+        ):
+            raise RangeNotAncestorError(lower, upper)
+        assert (
+            type(current_revisions) is tuple
+        ), "current_revisions should be a tuple"
+
+        # Special case where lower = a relative value (get_revisions can't
+        # find it)
+        if current_revisions and current_revisions[0] is None:
+            _, rev = self._parse_downgrade_target(
+                current_revisions=upper,
+                target=lower,
+                assert_relative_length=assert_relative_length,
+            )
+            assert rev
+            if rev == "base":
+                current_revisions = tuple()
+                lower = None
+            else:
+                current_revisions = (rev,)
+                lower = rev.revision
+
+        current_node_set = set(
+            self._get_ancestor_nodes(
+                current_revisions, check=True, include_dependencies=True
+            )
+        ).union(current_revisions)
+
+        needs = required_node_set.difference(current_node_set)
+
+        # Include the lower revision (=current_revisions?) in the iteration
+        if inclusive:
+            needs.update(is_revision(rev) for rev in self.get_revisions(lower))
+        # By default, base is implicit as we want all dependencies returned.
+        # Base is also implicit if lower = base
+        # implicit_base=False -> only return direct downstreams of
+        # current_revisions
+        if current_revisions and not implicit_base:
+            lower_descendents = self._get_descendant_nodes(
+                [is_revision(rev) for rev in current_revisions],
+                check=True,
+                include_dependencies=False,
+            )
+            needs.intersection_update(lower_descendents)
+
+        return needs, tuple(targets)
+
+    def _get_all_current(
+        self, id_: Tuple[str, ...]
+    ) -> Set[Optional[_RevisionOrBase]]:
+        top_revs: Set[Optional[_RevisionOrBase]]
+        top_revs = set(self.get_revisions(id_))
+        top_revs.update(
+            self._get_ancestor_nodes(list(top_revs), include_dependencies=True)
+        )
+        return self._filter_into_branch_heads(top_revs)
+
+
+class Revision:
+    """Base class for revisioned objects.
+
+    The :class:`.Revision` class is the base of the more public-facing
+    :class:`.Script` object, which represents a migration script.
+    The mechanics of revision management and traversal are encapsulated
+    within :class:`.Revision`, while :class:`.Script` applies this logic
+    to Python files in a version directory.
+
+    """
+
+    nextrev: FrozenSet[str] = frozenset()
+    """following revisions, based on down_revision only."""
+
+    _all_nextrev: FrozenSet[str] = frozenset()
+
+    revision: str = None  # type: ignore[assignment]
+    """The string revision number."""
+
+    down_revision: Optional[_RevIdType] = None
+    """The ``down_revision`` identifier(s) within the migration script.
+
+    Note that the total set of "down" revisions is
+    down_revision + dependencies.
+
+    """
+
+    dependencies: Optional[_RevIdType] = None
+    """Additional revisions which this revision is dependent on.
+
+    From a migration standpoint, these dependencies are added to the
+    down_revision to form the full iteration.  However, the separation
+    of down_revision from "dependencies" is to assist in navigating
+    a history that contains many branches, typically a multi-root scenario.
+
+    """
+
+    branch_labels: Set[str] = None  # type: ignore[assignment]
+    """Optional string/tuple of symbolic names to apply to this
+    revision's branch"""
+
+    _resolved_dependencies: Tuple[str, ...]
+    _normalized_resolved_dependencies: Tuple[str, ...]
+
+    @classmethod
+    def verify_rev_id(cls, revision: str) -> None:
+        illegal_chars = set(revision).intersection(_revision_illegal_chars)
+        if illegal_chars:
+            raise RevisionError(
+                "Character(s) '%s' not allowed in revision identifier '%s'"
+                % (", ".join(sorted(illegal_chars)), revision)
+            )
+
+    def __init__(
+        self,
+        revision: str,
+        down_revision: Optional[Union[str, Tuple[str, ...]]],
+        dependencies: Optional[Union[str, Tuple[str, ...]]] = None,
+        branch_labels: Optional[Union[str, Tuple[str, ...]]] = None,
+    ) -> None:
+        if down_revision and revision in util.to_tuple(down_revision):
+            raise LoopDetected(revision)
+        elif dependencies is not None and revision in util.to_tuple(
+            dependencies
+        ):
+            raise DependencyLoopDetected(revision)
+
+        self.verify_rev_id(revision)
+        self.revision = revision
+        self.down_revision = tuple_rev_as_scalar(util.to_tuple(down_revision))
+        self.dependencies = tuple_rev_as_scalar(util.to_tuple(dependencies))
+        self._orig_branch_labels = util.to_tuple(branch_labels, default=())
+        self.branch_labels = set(self._orig_branch_labels)
+
+    def __repr__(self) -> str:
+        args = [repr(self.revision), repr(self.down_revision)]
+        if self.dependencies:
+            args.append("dependencies=%r" % (self.dependencies,))
+        if self.branch_labels:
+            args.append("branch_labels=%r" % (self.branch_labels,))
+        return "%s(%s)" % (self.__class__.__name__, ", ".join(args))
+
+    def add_nextrev(self, revision: Revision) -> None:
+        self._all_nextrev = self._all_nextrev.union([revision.revision])
+        if self.revision in revision._versioned_down_revisions:
+            self.nextrev = self.nextrev.union([revision.revision])
+
+    @property
+    def _all_down_revisions(self) -> Tuple[str, ...]:
+        return util.dedupe_tuple(
+            util.to_tuple(self.down_revision, default=())
+            + self._resolved_dependencies
+        )
+
+    @property
+    def _normalized_down_revisions(self) -> Tuple[str, ...]:
+        """return immediate down revisions for a rev, omitting dependencies
+        that are still dependencies of ancestors.
+
+        """
+        return util.dedupe_tuple(
+            util.to_tuple(self.down_revision, default=())
+            + self._normalized_resolved_dependencies
+        )
+
+    @property
+    def _versioned_down_revisions(self) -> Tuple[str, ...]:
+        return util.to_tuple(self.down_revision, default=())
+
+    @property
+    def is_head(self) -> bool:
+        """Return True if this :class:`.Revision` is a 'head' revision.
+
+        This is determined based on whether any other :class:`.Script`
+        within the :class:`.ScriptDirectory` refers to this
+        :class:`.Script`.   Multiple heads can be present.
+
+        """
+        return not bool(self.nextrev)
+
+    @property
+    def _is_real_head(self) -> bool:
+        return not bool(self._all_nextrev)
+
+    @property
+    def is_base(self) -> bool:
+        """Return True if this :class:`.Revision` is a 'base' revision."""
+
+        return self.down_revision is None
+
+    @property
+    def _is_real_base(self) -> bool:
+        """Return True if this :class:`.Revision` is a "real" base revision,
+        e.g. that it has no dependencies either."""
+
+        # we use self.dependencies here because this is called up
+        # in initialization where _real_dependencies isn't set up
+        # yet
+        return self.down_revision is None and self.dependencies is None
+
+    @property
+    def is_branch_point(self) -> bool:
+        """Return True if this :class:`.Script` is a branch point.
+
+        A branchpoint is defined as a :class:`.Script` which is referred
+        to by more than one succeeding :class:`.Script`, that is more
+        than one :class:`.Script` has a `down_revision` identifier pointing
+        here.
+
+        """
+        return len(self.nextrev) > 1
+
+    @property
+    def _is_real_branch_point(self) -> bool:
+        """Return True if this :class:`.Script` is a 'real' branch point,
+        taking into account dependencies as well.
+
+        """
+        return len(self._all_nextrev) > 1
+
+    @property
+    def is_merge_point(self) -> bool:
+        """Return True if this :class:`.Script` is a merge point."""
+
+        return len(self._versioned_down_revisions) > 1
+
+
+@overload
+def tuple_rev_as_scalar(rev: None) -> None: ...
+
+
+@overload
+def tuple_rev_as_scalar(
+    rev: Union[Tuple[_T, ...], List[_T]]
+) -> Union[_T, Tuple[_T, ...], List[_T]]: ...
+
+
+def tuple_rev_as_scalar(
+    rev: Optional[Sequence[_T]],
+) -> Union[_T, Sequence[_T], None]:
+    if not rev:
+        return None
+    elif len(rev) == 1:
+        return rev[0]
+    else:
+        return rev
+
+
+def is_revision(rev: Any) -> Revision:
+    assert isinstance(rev, Revision)
+    return rev
diff --git a/.venv/lib/python3.12/site-packages/alembic/script/write_hooks.py b/.venv/lib/python3.12/site-packages/alembic/script/write_hooks.py
new file mode 100644
index 00000000..99771479
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/script/write_hooks.py
@@ -0,0 +1,179 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import shlex
+import subprocess
+import sys
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import List
+from typing import Mapping
+from typing import Optional
+from typing import Union
+
+from .. import util
+from ..util import compat
+
+
+REVISION_SCRIPT_TOKEN = "REVISION_SCRIPT_FILENAME"
+
+_registry: dict = {}
+
+
+def register(name: str) -> Callable:
+    """A function decorator that will register that function as a write hook.
+
+    See the documentation linked below for an example.
+
+    .. seealso::
+
+        :ref:`post_write_hooks_custom`
+
+
+    """
+
+    def decorate(fn):
+        _registry[name] = fn
+        return fn
+
+    return decorate
+
+
+def _invoke(
+    name: str, revision: str, options: Mapping[str, Union[str, int]]
+) -> Any:
+    """Invokes the formatter registered for the given name.
+
+    :param name: The name of a formatter in the registry
+    :param revision: A :class:`.MigrationRevision` instance
+    :param options: A dict containing kwargs passed to the
+        specified formatter.
+    :raises: :class:`alembic.util.CommandError`
+    """
+    try:
+        hook = _registry[name]
+    except KeyError as ke:
+        raise util.CommandError(
+            f"No formatter with name '{name}' registered"
+        ) from ke
+    else:
+        return hook(revision, options)
+
+
+def _run_hooks(path: str, hook_config: Mapping[str, str]) -> None:
+    """Invoke hooks for a generated revision."""
+
+    from .base import _split_on_space_comma
+
+    names = _split_on_space_comma.split(hook_config.get("hooks", ""))
+
+    for name in names:
+        if not name:
+            continue
+        opts = {
+            key[len(name) + 1 :]: hook_config[key]
+            for key in hook_config
+            if key.startswith(name + ".")
+        }
+        opts["_hook_name"] = name
+        try:
+            type_ = opts["type"]
+        except KeyError as ke:
+            raise util.CommandError(
+                f"Key {name}.type is required for post write hook {name!r}"
+            ) from ke
+        else:
+            with util.status(
+                f"Running post write hook {name!r}", newline=True
+            ):
+                _invoke(type_, path, opts)
+
+
+def _parse_cmdline_options(cmdline_options_str: str, path: str) -> List[str]:
+    """Parse options from a string into a list.
+
+    Also substitutes the revision script token with the actual filename of
+    the revision script.
+
+    If the revision script token doesn't occur in the options string, it is
+    automatically prepended.
+    """
+    if REVISION_SCRIPT_TOKEN not in cmdline_options_str:
+        cmdline_options_str = REVISION_SCRIPT_TOKEN + " " + cmdline_options_str
+    cmdline_options_list = shlex.split(
+        cmdline_options_str, posix=compat.is_posix
+    )
+    cmdline_options_list = [
+        option.replace(REVISION_SCRIPT_TOKEN, path)
+        for option in cmdline_options_list
+    ]
+    return cmdline_options_list
+
+
+@register("console_scripts")
+def console_scripts(
+    path: str, options: dict, ignore_output: bool = False
+) -> None:
+    try:
+        entrypoint_name = options["entrypoint"]
+    except KeyError as ke:
+        raise util.CommandError(
+            f"Key {options['_hook_name']}.entrypoint is required for post "
+            f"write hook {options['_hook_name']!r}"
+        ) from ke
+    for entry in compat.importlib_metadata_get("console_scripts"):
+        if entry.name == entrypoint_name:
+            impl: Any = entry
+            break
+    else:
+        raise util.CommandError(
+            f"Could not find entrypoint console_scripts.{entrypoint_name}"
+        )
+    cwd: Optional[str] = options.get("cwd", None)
+    cmdline_options_str = options.get("options", "")
+    cmdline_options_list = _parse_cmdline_options(cmdline_options_str, path)
+
+    kw: Dict[str, Any] = {}
+    if ignore_output:
+        kw["stdout"] = kw["stderr"] = subprocess.DEVNULL
+
+    subprocess.run(
+        [
+            sys.executable,
+            "-c",
+            f"import {impl.module}; {impl.module}.{impl.attr}()",
+        ]
+        + cmdline_options_list,
+        cwd=cwd,
+        **kw,
+    )
+
+
+@register("exec")
+def exec_(path: str, options: dict, ignore_output: bool = False) -> None:
+    try:
+        executable = options["executable"]
+    except KeyError as ke:
+        raise util.CommandError(
+            f"Key {options['_hook_name']}.executable is required for post "
+            f"write hook {options['_hook_name']!r}"
+        ) from ke
+    cwd: Optional[str] = options.get("cwd", None)
+    cmdline_options_str = options.get("options", "")
+    cmdline_options_list = _parse_cmdline_options(cmdline_options_str, path)
+
+    kw: Dict[str, Any] = {}
+    if ignore_output:
+        kw["stdout"] = kw["stderr"] = subprocess.DEVNULL
+
+    subprocess.run(
+        [
+            executable,
+            *cmdline_options_list,
+        ],
+        cwd=cwd,
+        **kw,
+    )
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/async/README b/.venv/lib/python3.12/site-packages/alembic/templates/async/README
new file mode 100644
index 00000000..e0d0858f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/async/README
@@ -0,0 +1 @@
+Generic single-database configuration with an async dbapi.
\ No newline at end of file
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/async/alembic.ini.mako b/.venv/lib/python3.12/site-packages/alembic/templates/async/alembic.ini.mako
new file mode 100644
index 00000000..7ffd7926
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/async/alembic.ini.mako
@@ -0,0 +1,117 @@
+# A generic, single database configuration.
+
+[alembic]
+# path to migration scripts.
+# Use forward slashes (/) also on windows to provide an os agnostic path
+script_location = ${script_location}
+
+# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
+# Uncomment the line below if you want the files to be prepended with date and time
+# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.
+prepend_sys_path = .
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
+# Any required deps can installed by adding `alembic[tz]` to the pip requirements
+# string value is passed to ZoneInfo()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to ${script_location}/versions.  When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "version_path_separator" below.
+# version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions
+
+# version path separator; As mentioned above, this is the character used to split
+# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
+# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
+# Valid values for version_path_separator are:
+#
+# version_path_separator = :
+# version_path_separator = ;
+# version_path_separator = space
+# version_path_separator = newline
+#
+# Use os.pathsep. Default configuration used for new projects.
+version_path_separator = os
+
+# set to 'true' to search source files recursively
+# in each "version_locations" directory
+# new in Alembic version 1.10
+# recursive_version_locations = false
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+sqlalchemy.url = driver://user:pass@localhost/dbname
+
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts.  See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
+# hooks = ruff
+# ruff.type = exec
+# ruff.executable = %(here)s/.venv/bin/ruff
+# ruff.options = check --fix REVISION_SCRIPT_FILENAME
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARNING
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARNING
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/async/env.py b/.venv/lib/python3.12/site-packages/alembic/templates/async/env.py
new file mode 100644
index 00000000..9f2d5194
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/async/env.py
@@ -0,0 +1,89 @@
+import asyncio
+from logging.config import fileConfig
+
+from sqlalchemy import pool
+from sqlalchemy.engine import Connection
+from sqlalchemy.ext.asyncio import async_engine_from_config
+
+from alembic import context
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+    fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = None
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    url = config.get_main_option("sqlalchemy.url")
+    context.configure(
+        url=url,
+        target_metadata=target_metadata,
+        literal_binds=True,
+        dialect_opts={"paramstyle": "named"},
+    )
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+def do_run_migrations(connection: Connection) -> None:
+    context.configure(connection=connection, target_metadata=target_metadata)
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+async def run_async_migrations() -> None:
+    """In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+
+    connectable = async_engine_from_config(
+        config.get_section(config.config_ini_section, {}),
+        prefix="sqlalchemy.",
+        poolclass=pool.NullPool,
+    )
+
+    async with connectable.connect() as connection:
+        await connection.run_sync(do_run_migrations)
+
+    await connectable.dispose()
+
+
+def run_migrations_online() -> None:
+    """Run migrations in 'online' mode."""
+
+    asyncio.run(run_async_migrations())
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/async/script.py.mako b/.venv/lib/python3.12/site-packages/alembic/templates/async/script.py.mako
new file mode 100644
index 00000000..480b130d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/async/script.py.mako
@@ -0,0 +1,28 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+    """Upgrade schema."""
+    ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+    """Downgrade schema."""
+    ${downgrades if downgrades else "pass"}
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/generic/README b/.venv/lib/python3.12/site-packages/alembic/templates/generic/README
new file mode 100644
index 00000000..98e4f9c4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/generic/README
@@ -0,0 +1 @@
+Generic single-database configuration.
\ No newline at end of file
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/generic/alembic.ini.mako b/.venv/lib/python3.12/site-packages/alembic/templates/generic/alembic.ini.mako
new file mode 100644
index 00000000..3e211d0d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/generic/alembic.ini.mako
@@ -0,0 +1,119 @@
+# A generic, single database configuration.
+
+[alembic]
+# path to migration scripts
+# Use forward slashes (/) also on windows to provide an os agnostic path
+script_location = ${script_location}
+
+# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
+# Uncomment the line below if you want the files to be prepended with date and time
+# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
+# for all available tokens
+# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.
+prepend_sys_path = .
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
+# Any required deps can installed by adding `alembic[tz]` to the pip requirements
+# string value is passed to ZoneInfo()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to ${script_location}/versions.  When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "version_path_separator" below.
+# version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions
+
+# version path separator; As mentioned above, this is the character used to split
+# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
+# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
+# Valid values for version_path_separator are:
+#
+# version_path_separator = :
+# version_path_separator = ;
+# version_path_separator = space
+# version_path_separator = newline
+#
+# Use os.pathsep. Default configuration used for new projects.
+version_path_separator = os
+
+# set to 'true' to search source files recursively
+# in each "version_locations" directory
+# new in Alembic version 1.10
+# recursive_version_locations = false
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+sqlalchemy.url = driver://user:pass@localhost/dbname
+
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts.  See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
+# hooks = ruff
+# ruff.type = exec
+# ruff.executable = %(here)s/.venv/bin/ruff
+# ruff.options = check --fix REVISION_SCRIPT_FILENAME
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARNING
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARNING
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/generic/env.py b/.venv/lib/python3.12/site-packages/alembic/templates/generic/env.py
new file mode 100644
index 00000000..36112a3c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/generic/env.py
@@ -0,0 +1,78 @@
+from logging.config import fileConfig
+
+from sqlalchemy import engine_from_config
+from sqlalchemy import pool
+
+from alembic import context
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+    fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = None
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    url = config.get_main_option("sqlalchemy.url")
+    context.configure(
+        url=url,
+        target_metadata=target_metadata,
+        literal_binds=True,
+        dialect_opts={"paramstyle": "named"},
+    )
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+def run_migrations_online() -> None:
+    """Run migrations in 'online' mode.
+
+    In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+    connectable = engine_from_config(
+        config.get_section(config.config_ini_section, {}),
+        prefix="sqlalchemy.",
+        poolclass=pool.NullPool,
+    )
+
+    with connectable.connect() as connection:
+        context.configure(
+            connection=connection, target_metadata=target_metadata
+        )
+
+        with context.begin_transaction():
+            context.run_migrations()
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/generic/script.py.mako b/.venv/lib/python3.12/site-packages/alembic/templates/generic/script.py.mako
new file mode 100644
index 00000000..480b130d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/generic/script.py.mako
@@ -0,0 +1,28 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+    """Upgrade schema."""
+    ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+    """Downgrade schema."""
+    ${downgrades if downgrades else "pass"}
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/multidb/README b/.venv/lib/python3.12/site-packages/alembic/templates/multidb/README
new file mode 100644
index 00000000..f046ec91
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/multidb/README
@@ -0,0 +1,12 @@
+Rudimentary multi-database configuration.
+
+Multi-DB isn't vastly different from generic. The primary difference is that it
+will run the migrations N times (depending on how many databases you have
+configured), providing one engine name and associated context for each run.
+
+That engine name will then allow the migration to restrict what runs within it to
+just the appropriate migrations for that engine. You can see this behavior within
+the mako template.
+
+In the provided configuration, you'll need to have `databases` provided in
+alembic's config, and an `sqlalchemy.url` provided for each engine name.
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/multidb/alembic.ini.mako b/.venv/lib/python3.12/site-packages/alembic/templates/multidb/alembic.ini.mako
new file mode 100644
index 00000000..00316456
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/multidb/alembic.ini.mako
@@ -0,0 +1,124 @@
+# a multi-database configuration.
+
+[alembic]
+# path to migration scripts
+# Use forward slashes (/) also on windows to provide an os agnostic path
+script_location = ${script_location}
+
+# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
+# Uncomment the line below if you want the files to be prepended with date and time
+# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
+# for all available tokens
+# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.
+prepend_sys_path = .
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
+# Any required deps can installed by adding `alembic[tz]` to the pip requirements
+# string value is passed to ZoneInfo()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to ${script_location}/versions.  When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "version_path_separator" below.
+# version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions
+
+# version path separator; As mentioned above, this is the character used to split
+# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
+# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
+# Valid values for version_path_separator are:
+#
+# version_path_separator = :
+# version_path_separator = ;
+# version_path_separator = space
+# version_path_separator = newline
+#
+# Use os.pathsep. Default configuration used for new projects.
+version_path_separator = os
+
+# set to 'true' to search source files recursively
+# in each "version_locations" directory
+# new in Alembic version 1.10
+# recursive_version_locations = false
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+databases = engine1, engine2
+
+[engine1]
+sqlalchemy.url = driver://user:pass@localhost/dbname
+
+[engine2]
+sqlalchemy.url = driver://user:pass@localhost/dbname2
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts.  See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
+# hooks = ruff
+# ruff.type = exec
+# ruff.executable = %(here)s/.venv/bin/ruff
+# ruff.options = check --fix REVISION_SCRIPT_FILENAME
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARNING
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARNING
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/multidb/env.py b/.venv/lib/python3.12/site-packages/alembic/templates/multidb/env.py
new file mode 100644
index 00000000..e937b64e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/multidb/env.py
@@ -0,0 +1,140 @@
+import logging
+from logging.config import fileConfig
+import re
+
+from sqlalchemy import engine_from_config
+from sqlalchemy import pool
+
+from alembic import context
+
+USE_TWOPHASE = False
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+    fileConfig(config.config_file_name)
+logger = logging.getLogger("alembic.env")
+
+# gather section names referring to different
+# databases.  These are named "engine1", "engine2"
+# in the sample .ini file.
+db_names = config.get_main_option("databases", "")
+
+# add your model's MetaData objects here
+# for 'autogenerate' support.  These must be set
+# up to hold just those tables targeting a
+# particular database. table.tometadata() may be
+# helpful here in case a "copy" of
+# a MetaData is needed.
+# from myapp import mymodel
+# target_metadata = {
+#       'engine1':mymodel.metadata1,
+#       'engine2':mymodel.metadata2
+# }
+target_metadata = {}
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    # for the --sql use case, run migrations for each URL into
+    # individual files.
+
+    engines = {}
+    for name in re.split(r",\s*", db_names):
+        engines[name] = rec = {}
+        rec["url"] = context.config.get_section_option(name, "sqlalchemy.url")
+
+    for name, rec in engines.items():
+        logger.info("Migrating database %s" % name)
+        file_ = "%s.sql" % name
+        logger.info("Writing output to %s" % file_)
+        with open(file_, "w") as buffer:
+            context.configure(
+                url=rec["url"],
+                output_buffer=buffer,
+                target_metadata=target_metadata.get(name),
+                literal_binds=True,
+                dialect_opts={"paramstyle": "named"},
+            )
+            with context.begin_transaction():
+                context.run_migrations(engine_name=name)
+
+
+def run_migrations_online() -> None:
+    """Run migrations in 'online' mode.
+
+    In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+
+    # for the direct-to-DB use case, start a transaction on all
+    # engines, then run all migrations, then commit all transactions.
+
+    engines = {}
+    for name in re.split(r",\s*", db_names):
+        engines[name] = rec = {}
+        rec["engine"] = engine_from_config(
+            context.config.get_section(name, {}),
+            prefix="sqlalchemy.",
+            poolclass=pool.NullPool,
+        )
+
+    for name, rec in engines.items():
+        engine = rec["engine"]
+        rec["connection"] = conn = engine.connect()
+
+        if USE_TWOPHASE:
+            rec["transaction"] = conn.begin_twophase()
+        else:
+            rec["transaction"] = conn.begin()
+
+    try:
+        for name, rec in engines.items():
+            logger.info("Migrating database %s" % name)
+            context.configure(
+                connection=rec["connection"],
+                upgrade_token="%s_upgrades" % name,
+                downgrade_token="%s_downgrades" % name,
+                target_metadata=target_metadata.get(name),
+            )
+            context.run_migrations(engine_name=name)
+
+        if USE_TWOPHASE:
+            for rec in engines.values():
+                rec["transaction"].prepare()
+
+        for rec in engines.values():
+            rec["transaction"].commit()
+    except:
+        for rec in engines.values():
+            rec["transaction"].rollback()
+        raise
+    finally:
+        for rec in engines.values():
+            rec["connection"].close()
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()
diff --git a/.venv/lib/python3.12/site-packages/alembic/templates/multidb/script.py.mako b/.venv/lib/python3.12/site-packages/alembic/templates/multidb/script.py.mako
new file mode 100644
index 00000000..3caca7bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/templates/multidb/script.py.mako
@@ -0,0 +1,51 @@
+<%!
+import re
+
+%>"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade(engine_name: str) -> None:
+    """Upgrade schema."""
+    globals()["upgrade_%s" % engine_name]()
+
+
+def downgrade(engine_name: str) -> None:
+    """Downgrade schema."""
+    globals()["downgrade_%s" % engine_name]()
+
+<%
+    db_names = config.get_main_option("databases")
+%>
+
+## generate an "upgrade_<xyz>() / downgrade_<xyz>()" function
+## for each database name in the ini file.
+
+% for db_name in re.split(r',\s*', db_names):
+
+def upgrade_${db_name}() -> None:
+    """Upgrade ${db_name} schema."""
+    ${context.get("%s_upgrades" % db_name, "pass")}
+
+
+def downgrade_${db_name}() -> None:
+    """Downgrade ${db_name} schema."""
+    ${context.get("%s_downgrades" % db_name, "pass")}
+
+% endfor
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/__init__.py b/.venv/lib/python3.12/site-packages/alembic/testing/__init__.py
new file mode 100644
index 00000000..0407adfe
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/__init__.py
@@ -0,0 +1,29 @@
+from sqlalchemy.testing import config
+from sqlalchemy.testing import emits_warning
+from sqlalchemy.testing import engines
+from sqlalchemy.testing import exclusions
+from sqlalchemy.testing import mock
+from sqlalchemy.testing import provide_metadata
+from sqlalchemy.testing import skip_if
+from sqlalchemy.testing import uses_deprecated
+from sqlalchemy.testing.config import combinations
+from sqlalchemy.testing.config import fixture
+from sqlalchemy.testing.config import requirements as requires
+
+from .assertions import assert_raises
+from .assertions import assert_raises_message
+from .assertions import emits_python_deprecation_warning
+from .assertions import eq_
+from .assertions import eq_ignore_whitespace
+from .assertions import expect_raises
+from .assertions import expect_raises_message
+from .assertions import expect_sqlalchemy_deprecated
+from .assertions import expect_sqlalchemy_deprecated_20
+from .assertions import expect_warnings
+from .assertions import is_
+from .assertions import is_false
+from .assertions import is_not_
+from .assertions import is_true
+from .assertions import ne_
+from .fixtures import TestBase
+from .util import resolve_lambda
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/assertions.py b/.venv/lib/python3.12/site-packages/alembic/testing/assertions.py
new file mode 100644
index 00000000..c08b2228
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/assertions.py
@@ -0,0 +1,175 @@
+from __future__ import annotations
+
+import contextlib
+import re
+import sys
+from typing import Any
+from typing import Dict
+
+from sqlalchemy import exc as sa_exc
+from sqlalchemy.engine import default
+from sqlalchemy.engine import URL
+from sqlalchemy.testing.assertions import _expect_warnings
+from sqlalchemy.testing.assertions import eq_  # noqa
+from sqlalchemy.testing.assertions import is_  # noqa
+from sqlalchemy.testing.assertions import is_false  # noqa
+from sqlalchemy.testing.assertions import is_not_  # noqa
+from sqlalchemy.testing.assertions import is_true  # noqa
+from sqlalchemy.testing.assertions import ne_  # noqa
+from sqlalchemy.util import decorator
+
+
+def _assert_proper_exception_context(exception):
+    """assert that any exception we're catching does not have a __context__
+    without a __cause__, and that __suppress_context__ is never set.
+
+    Python 3 will report nested as exceptions as "during the handling of
+    error X, error Y occurred". That's not what we want to do.  we want
+    these exceptions in a cause chain.
+
+    """
+
+    if (
+        exception.__context__ is not exception.__cause__
+        and not exception.__suppress_context__
+    ):
+        assert False, (
+            "Exception %r was correctly raised but did not set a cause, "
+            "within context %r as its cause."
+            % (exception, exception.__context__)
+        )
+
+
+def assert_raises(except_cls, callable_, *args, **kw):
+    return _assert_raises(except_cls, callable_, args, kw, check_context=True)
+
+
+def assert_raises_context_ok(except_cls, callable_, *args, **kw):
+    return _assert_raises(except_cls, callable_, args, kw)
+
+
+def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
+    return _assert_raises(
+        except_cls, callable_, args, kwargs, msg=msg, check_context=True
+    )
+
+
+def assert_raises_message_context_ok(
+    except_cls, msg, callable_, *args, **kwargs
+):
+    return _assert_raises(except_cls, callable_, args, kwargs, msg=msg)
+
+
+def _assert_raises(
+    except_cls, callable_, args, kwargs, msg=None, check_context=False
+):
+    with _expect_raises(except_cls, msg, check_context) as ec:
+        callable_(*args, **kwargs)
+    return ec.error
+
+
+class _ErrorContainer:
+    error: Any = None
+
+
+@contextlib.contextmanager
+def _expect_raises(
+    except_cls, msg=None, check_context=False, text_exact=False
+):
+    ec = _ErrorContainer()
+    if check_context:
+        are_we_already_in_a_traceback = sys.exc_info()[0]
+    try:
+        yield ec
+        success = False
+    except except_cls as err:
+        ec.error = err
+        success = True
+        if msg is not None:
+            if text_exact:
+                assert str(err) == msg, f"{msg} != {err}"
+            else:
+                assert re.search(msg, str(err), re.UNICODE), f"{msg} !~ {err}"
+        if check_context and not are_we_already_in_a_traceback:
+            _assert_proper_exception_context(err)
+        print(str(err).encode("utf-8"))
+
+    # assert outside the block so it works for AssertionError too !
+    assert success, "Callable did not raise an exception"
+
+
+def expect_raises(except_cls, check_context=True):
+    return _expect_raises(except_cls, check_context=check_context)
+
+
+def expect_raises_message(
+    except_cls, msg, check_context=True, text_exact=False
+):
+    return _expect_raises(
+        except_cls, msg=msg, check_context=check_context, text_exact=text_exact
+    )
+
+
+def eq_ignore_whitespace(a, b, msg=None):
+    a = re.sub(r"^\s+?|\n", "", a)
+    a = re.sub(r" {2,}", " ", a)
+    b = re.sub(r"^\s+?|\n", "", b)
+    b = re.sub(r" {2,}", " ", b)
+
+    assert a == b, msg or "%r != %r" % (a, b)
+
+
+_dialect_mods: Dict[Any, Any] = {}
+
+
+def _get_dialect(name):
+    if name is None or name == "default":
+        return default.DefaultDialect()
+    else:
+        d = URL.create(name).get_dialect()()
+
+        if name == "postgresql":
+            d.implicit_returning = True
+        elif name == "mssql":
+            d.legacy_schema_aliasing = False
+        return d
+
+
+def expect_warnings(*messages, **kw):
+    """Context manager which expects one or more warnings.
+
+    With no arguments, squelches all SAWarnings emitted via
+    sqlalchemy.util.warn and sqlalchemy.util.warn_limited.   Otherwise
+    pass string expressions that will match selected warnings via regex;
+    all non-matching warnings are sent through.
+
+    The expect version **asserts** that the warnings were in fact seen.
+
+    Note that the test suite sets SAWarning warnings to raise exceptions.
+
+    """
+    return _expect_warnings(Warning, messages, **kw)
+
+
+def emits_python_deprecation_warning(*messages):
+    """Decorator form of expect_warnings().
+
+    Note that emits_warning does **not** assert that the warnings
+    were in fact seen.
+
+    """
+
+    @decorator
+    def decorate(fn, *args, **kw):
+        with _expect_warnings(DeprecationWarning, assert_=False, *messages):
+            return fn(*args, **kw)
+
+    return decorate
+
+
+def expect_sqlalchemy_deprecated(*messages, **kw):
+    return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw)
+
+
+def expect_sqlalchemy_deprecated_20(*messages, **kw):
+    return _expect_warnings(sa_exc.RemovedIn20Warning, messages, **kw)
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/env.py b/.venv/lib/python3.12/site-packages/alembic/testing/env.py
new file mode 100644
index 00000000..9a457b7f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/env.py
@@ -0,0 +1,502 @@
+import importlib.machinery
+import os
+from pathlib import Path
+import shutil
+import textwrap
+
+from sqlalchemy.testing import config
+from sqlalchemy.testing import provision
+
+from . import util as testing_util
+from .. import command
+from .. import script
+from .. import util
+from ..script import Script
+from ..script import ScriptDirectory
+
+
+def _get_staging_directory():
+    if provision.FOLLOWER_IDENT:
+        return f"scratch_{provision.FOLLOWER_IDENT}"
+    else:
+        return "scratch"
+
+
+def staging_env(create=True, template="generic", sourceless=False):
+    cfg = _testing_config()
+    if create:
+        path = _join_path(_get_staging_directory(), "scripts")
+        assert not os.path.exists(path), (
+            "staging directory %s already exists; poor cleanup?" % path
+        )
+
+        command.init(cfg, path, template=template)
+        if sourceless:
+            try:
+                # do an import so that a .pyc/.pyo is generated.
+                util.load_python_file(path, "env.py")
+            except AttributeError:
+                # we don't have the migration context set up yet
+                # so running the .env py throws this exception.
+                # theoretically we could be using py_compiler here to
+                # generate .pyc/.pyo without importing but not really
+                # worth it.
+                pass
+            assert sourceless in (
+                "pep3147_envonly",
+                "simple",
+                "pep3147_everything",
+            ), sourceless
+            make_sourceless(
+                _join_path(path, "env.py"),
+                "pep3147" if "pep3147" in sourceless else "simple",
+            )
+
+    sc = script.ScriptDirectory.from_config(cfg)
+    return sc
+
+
+def clear_staging_env():
+    from sqlalchemy.testing import engines
+
+    engines.testing_reaper.close_all()
+    shutil.rmtree(_get_staging_directory(), True)
+
+
+def script_file_fixture(txt):
+    dir_ = _join_path(_get_staging_directory(), "scripts")
+    path = _join_path(dir_, "script.py.mako")
+    with open(path, "w") as f:
+        f.write(txt)
+
+
+def env_file_fixture(txt):
+    dir_ = _join_path(_get_staging_directory(), "scripts")
+    txt = (
+        """
+from alembic import context
+
+config = context.config
+"""
+        + txt
+    )
+
+    path = _join_path(dir_, "env.py")
+    pyc_path = util.pyc_file_from_path(path)
+    if pyc_path:
+        os.unlink(pyc_path)
+
+    with open(path, "w") as f:
+        f.write(txt)
+
+
+def _sqlite_file_db(tempname="foo.db", future=False, scope=None, **options):
+    dir_ = _join_path(_get_staging_directory(), "scripts")
+    url = "sqlite:///%s/%s" % (dir_, tempname)
+    if scope:
+        options["scope"] = scope
+    return testing_util.testing_engine(url=url, future=future, options=options)
+
+
+def _sqlite_testing_config(sourceless=False, future=False):
+    dir_ = _join_path(_get_staging_directory(), "scripts")
+    url = f"sqlite:///{dir_}/foo.db"
+
+    sqlalchemy_future = future or ("future" in config.db.__class__.__module__)
+
+    return _write_config_file(
+        f"""
+[alembic]
+script_location = {dir_}
+sqlalchemy.url = {url}
+sourceless = {"true" if sourceless else "false"}
+{"sqlalchemy.future = true" if sqlalchemy_future else ""}
+
+[loggers]
+keys = root,sqlalchemy
+
+[handlers]
+keys = console
+
+[logger_root]
+level = WARNING
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = DEBUG
+handlers =
+qualname = sqlalchemy.engine
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatters]
+keys = generic
+
+[formatter_generic]
+format = %%(levelname)-5.5s [%%(name)s] %%(message)s
+datefmt = %%H:%%M:%%S
+    """
+    )
+
+
+def _multi_dir_testing_config(sourceless=False, extra_version_location=""):
+    dir_ = _join_path(_get_staging_directory(), "scripts")
+    sqlalchemy_future = "future" in config.db.__class__.__module__
+
+    url = "sqlite:///%s/foo.db" % dir_
+
+    return _write_config_file(
+        f"""
+[alembic]
+script_location = {dir_}
+sqlalchemy.url = {url}
+sqlalchemy.future = {"true" if sqlalchemy_future else "false"}
+sourceless = {"true" if sourceless else "false"}
+version_locations = %(here)s/model1/ %(here)s/model2/ %(here)s/model3/ \
+{extra_version_location}
+
+[loggers]
+keys = root
+
+[handlers]
+keys = console
+
+[logger_root]
+level = WARNING
+handlers = console
+qualname =
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatters]
+keys = generic
+
+[formatter_generic]
+format = %%(levelname)-5.5s [%%(name)s] %%(message)s
+datefmt = %%H:%%M:%%S
+    """
+    )
+
+
+def _no_sql_testing_config(dialect="postgresql", directives=""):
+    """use a postgresql url with no host so that
+    connections guaranteed to fail"""
+    dir_ = _join_path(_get_staging_directory(), "scripts")
+    return _write_config_file(
+        f"""
+[alembic]
+script_location ={dir_}
+sqlalchemy.url = {dialect}://
+{directives}
+
+[loggers]
+keys = root
+
+[handlers]
+keys = console
+
+[logger_root]
+level = WARNING
+handlers = console
+qualname =
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatters]
+keys = generic
+
+[formatter_generic]
+format = %%(levelname)-5.5s [%%(name)s] %%(message)s
+datefmt = %%H:%%M:%%S
+
+"""
+    )
+
+
+def _write_config_file(text):
+    cfg = _testing_config()
+    with open(cfg.config_file_name, "w") as f:
+        f.write(text)
+    return cfg
+
+
+def _testing_config():
+    from alembic.config import Config
+
+    if not os.access(_get_staging_directory(), os.F_OK):
+        os.mkdir(_get_staging_directory())
+    return Config(_join_path(_get_staging_directory(), "test_alembic.ini"))
+
+
+def write_script(
+    scriptdir, rev_id, content, encoding="ascii", sourceless=False
+):
+    old = scriptdir.revision_map.get_revision(rev_id)
+    path = old.path
+
+    content = textwrap.dedent(content)
+    if encoding:
+        content = content.encode(encoding)
+    with open(path, "wb") as fp:
+        fp.write(content)
+    pyc_path = util.pyc_file_from_path(path)
+    if pyc_path:
+        os.unlink(pyc_path)
+    script = Script._from_path(scriptdir, path)
+    old = scriptdir.revision_map.get_revision(script.revision)
+    if old.down_revision != script.down_revision:
+        raise Exception("Can't change down_revision on a refresh operation.")
+    scriptdir.revision_map.add_revision(script, _replace=True)
+
+    if sourceless:
+        make_sourceless(
+            path, "pep3147" if sourceless == "pep3147_everything" else "simple"
+        )
+
+
+def make_sourceless(path, style):
+    import py_compile
+
+    py_compile.compile(path)
+
+    if style == "simple":
+        pyc_path = util.pyc_file_from_path(path)
+        suffix = importlib.machinery.BYTECODE_SUFFIXES[0]
+        filepath, ext = os.path.splitext(path)
+        simple_pyc_path = filepath + suffix
+        shutil.move(pyc_path, simple_pyc_path)
+        pyc_path = simple_pyc_path
+    else:
+        assert style in ("pep3147", "simple")
+        pyc_path = util.pyc_file_from_path(path)
+
+    assert os.access(pyc_path, os.F_OK)
+
+    os.unlink(path)
+
+
+def three_rev_fixture(cfg):
+    a = util.rev_id()
+    b = util.rev_id()
+    c = util.rev_id()
+
+    script = ScriptDirectory.from_config(cfg)
+    script.generate_revision(a, "revision a", refresh=True, head="base")
+    write_script(
+        script,
+        a,
+        f"""\
+"Rev A"
+revision = '{a}'
+down_revision = None
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 1")
+
+
+def downgrade():
+    op.execute("DROP STEP 1")
+
+""",
+    )
+
+    script.generate_revision(b, "revision b", refresh=True, head=a)
+    write_script(
+        script,
+        b,
+        f"""# coding: utf-8
+"Rev B, méil, %3"
+revision = '{b}'
+down_revision = '{a}'
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 2")
+
+
+def downgrade():
+    op.execute("DROP STEP 2")
+
+""",
+        encoding="utf-8",
+    )
+
+    script.generate_revision(c, "revision c", refresh=True, head=b)
+    write_script(
+        script,
+        c,
+        f"""\
+"Rev C"
+revision = '{c}'
+down_revision = '{b}'
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 3")
+
+
+def downgrade():
+    op.execute("DROP STEP 3")
+
+""",
+    )
+    return a, b, c
+
+
+def multi_heads_fixture(cfg, a, b, c):
+    """Create a multiple head fixture from the three-revs fixture"""
+
+    # a->b->c
+    #     -> d -> e
+    #     -> f
+    d = util.rev_id()
+    e = util.rev_id()
+    f = util.rev_id()
+
+    script = ScriptDirectory.from_config(cfg)
+    script.generate_revision(
+        d, "revision d from b", head=b, splice=True, refresh=True
+    )
+    write_script(
+        script,
+        d,
+        f"""\
+"Rev D"
+revision = '{d}'
+down_revision = '{b}'
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 4")
+
+
+def downgrade():
+    op.execute("DROP STEP 4")
+
+""",
+    )
+
+    script.generate_revision(
+        e, "revision e from d", head=d, splice=True, refresh=True
+    )
+    write_script(
+        script,
+        e,
+        f"""\
+"Rev E"
+revision = '{e}'
+down_revision = '{d}'
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 5")
+
+
+def downgrade():
+    op.execute("DROP STEP 5")
+
+""",
+    )
+
+    script.generate_revision(
+        f, "revision f from b", head=b, splice=True, refresh=True
+    )
+    write_script(
+        script,
+        f,
+        f"""\
+"Rev F"
+revision = '{f}'
+down_revision = '{b}'
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 6")
+
+
+def downgrade():
+    op.execute("DROP STEP 6")
+
+""",
+    )
+
+    return d, e, f
+
+
+def _multidb_testing_config(engines):
+    """alembic.ini fixture to work exactly with the 'multidb' template"""
+
+    dir_ = _join_path(_get_staging_directory(), "scripts")
+
+    sqlalchemy_future = "future" in config.db.__class__.__module__
+
+    databases = ", ".join(engines.keys())
+    engines = "\n\n".join(
+        f"[{key}]\nsqlalchemy.url = {value.url}"
+        for key, value in engines.items()
+    )
+
+    return _write_config_file(
+        f"""
+[alembic]
+script_location = {dir_}
+sourceless = false
+sqlalchemy.future = {"true" if sqlalchemy_future else "false"}
+databases = {databases}
+
+{engines}
+[loggers]
+keys = root
+
+[handlers]
+keys = console
+
+[logger_root]
+level = WARNING
+handlers = console
+qualname =
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatters]
+keys = generic
+
+[formatter_generic]
+format = %%(levelname)-5.5s [%%(name)s] %%(message)s
+datefmt = %%H:%%M:%%S
+    """
+    )
+
+
+def _join_path(base: str, *more: str):
+    return str(Path(base).joinpath(*more).as_posix())
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/fixtures.py b/.venv/lib/python3.12/site-packages/alembic/testing/fixtures.py
new file mode 100644
index 00000000..17d732f8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/fixtures.py
@@ -0,0 +1,306 @@
+from __future__ import annotations
+
+import configparser
+from contextlib import contextmanager
+import io
+import re
+from typing import Any
+from typing import Dict
+
+from sqlalchemy import Column
+from sqlalchemy import create_mock_engine
+from sqlalchemy import inspect
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy import testing
+from sqlalchemy import text
+from sqlalchemy.testing import config
+from sqlalchemy.testing import mock
+from sqlalchemy.testing.assertions import eq_
+from sqlalchemy.testing.fixtures import FutureEngineMixin
+from sqlalchemy.testing.fixtures import TablesTest as SQLAlchemyTablesTest
+from sqlalchemy.testing.fixtures import TestBase as SQLAlchemyTestBase
+
+import alembic
+from .assertions import _get_dialect
+from ..environment import EnvironmentContext
+from ..migration import MigrationContext
+from ..operations import Operations
+from ..util import sqla_compat
+from ..util.sqla_compat import sqla_2
+
+
+testing_config = configparser.ConfigParser()
+testing_config.read(["test.cfg"])
+
+
+class TestBase(SQLAlchemyTestBase):
+    is_sqlalchemy_future = sqla_2
+
+    @testing.fixture()
+    def ops_context(self, migration_context):
+        with migration_context.begin_transaction(_per_migration=True):
+            yield Operations(migration_context)
+
+    @testing.fixture
+    def migration_context(self, connection):
+        return MigrationContext.configure(
+            connection, opts=dict(transaction_per_migration=True)
+        )
+
+    @testing.fixture
+    def as_sql_migration_context(self, connection):
+        return MigrationContext.configure(
+            connection, opts=dict(transaction_per_migration=True, as_sql=True)
+        )
+
+    @testing.fixture
+    def connection(self):
+        with config.db.connect() as conn:
+            yield conn
+
+
+class TablesTest(TestBase, SQLAlchemyTablesTest):
+    pass
+
+
+FutureEngineMixin.is_sqlalchemy_future = True
+
+
+def capture_db(dialect="postgresql://"):
+    buf = []
+
+    def dump(sql, *multiparams, **params):
+        buf.append(str(sql.compile(dialect=engine.dialect)))
+
+    engine = create_mock_engine(dialect, dump)
+    return engine, buf
+
+
+_engs: Dict[Any, Any] = {}
+
+
+@contextmanager
+def capture_context_buffer(**kw):
+    if kw.pop("bytes_io", False):
+        buf = io.BytesIO()
+    else:
+        buf = io.StringIO()
+
+    kw.update({"dialect_name": "sqlite", "output_buffer": buf})
+    conf = EnvironmentContext.configure
+
+    def configure(*arg, **opt):
+        opt.update(**kw)
+        return conf(*arg, **opt)
+
+    with mock.patch.object(EnvironmentContext, "configure", configure):
+        yield buf
+
+
+@contextmanager
+def capture_engine_context_buffer(**kw):
+    from .env import _sqlite_file_db
+    from sqlalchemy import event
+
+    buf = io.StringIO()
+
+    eng = _sqlite_file_db()
+
+    conn = eng.connect()
+
+    @event.listens_for(conn, "before_cursor_execute")
+    def bce(conn, cursor, statement, parameters, context, executemany):
+        buf.write(statement + "\n")
+
+    kw.update({"connection": conn})
+    conf = EnvironmentContext.configure
+
+    def configure(*arg, **opt):
+        opt.update(**kw)
+        return conf(*arg, **opt)
+
+    with mock.patch.object(EnvironmentContext, "configure", configure):
+        yield buf
+
+
+def op_fixture(
+    dialect="default",
+    as_sql=False,
+    naming_convention=None,
+    literal_binds=False,
+    native_boolean=None,
+):
+    opts = {}
+    if naming_convention:
+        opts["target_metadata"] = MetaData(naming_convention=naming_convention)
+
+    class buffer_:
+        def __init__(self):
+            self.lines = []
+
+        def write(self, msg):
+            msg = msg.strip()
+            msg = re.sub(r"[\n\t]", "", msg)
+            if as_sql:
+                # the impl produces soft tabs,
+                # so search for blocks of 4 spaces
+                msg = re.sub(r"    ", "", msg)
+                msg = re.sub(r"\;\n*$", "", msg)
+
+            self.lines.append(msg)
+
+        def flush(self):
+            pass
+
+    buf = buffer_()
+
+    class ctx(MigrationContext):
+        def get_buf(self):
+            return buf
+
+        def clear_assertions(self):
+            buf.lines[:] = []
+
+        def assert_(self, *sql):
+            # TODO: make this more flexible about
+            # whitespace and such
+            eq_(buf.lines, [re.sub(r"[\n\t]", "", s) for s in sql])
+
+        def assert_contains(self, sql):
+            for stmt in buf.lines:
+                if re.sub(r"[\n\t]", "", sql) in stmt:
+                    return
+            else:
+                assert False, "Could not locate fragment %r in %r" % (
+                    sql,
+                    buf.lines,
+                )
+
+    if as_sql:
+        opts["as_sql"] = as_sql
+    if literal_binds:
+        opts["literal_binds"] = literal_binds
+
+    ctx_dialect = _get_dialect(dialect)
+    if native_boolean is not None:
+        ctx_dialect.supports_native_boolean = native_boolean
+        # this is new as of SQLAlchemy 1.2.7 and is used by SQL Server,
+        # which breaks assumptions in the alembic test suite
+        ctx_dialect.non_native_boolean_check_constraint = True
+    if not as_sql:
+
+        def execute(stmt, *multiparam, **param):
+            if isinstance(stmt, str):
+                stmt = text(stmt)
+            assert stmt.supports_execution
+            sql = str(stmt.compile(dialect=ctx_dialect))
+
+            buf.write(sql)
+
+        connection = mock.Mock(dialect=ctx_dialect, execute=execute)
+    else:
+        opts["output_buffer"] = buf
+        connection = None
+    context = ctx(ctx_dialect, connection, opts)
+
+    alembic.op._proxy = Operations(context)
+    return context
+
+
+class AlterColRoundTripFixture:
+    # since these tests are about syntax, use more recent SQLAlchemy as some of
+    # the type / server default compare logic might not work on older
+    # SQLAlchemy versions as seems to be the case for SQLAlchemy 1.1 on Oracle
+
+    __requires__ = ("alter_column",)
+
+    def setUp(self):
+        self.conn = config.db.connect()
+        self.ctx = MigrationContext.configure(self.conn)
+        self.op = Operations(self.ctx)
+        self.metadata = MetaData()
+
+    def _compare_type(self, t1, t2):
+        c1 = Column("q", t1)
+        c2 = Column("q", t2)
+        assert not self.ctx.impl.compare_type(
+            c1, c2
+        ), "Type objects %r and %r didn't compare as equivalent" % (t1, t2)
+
+    def _compare_server_default(self, t1, s1, t2, s2):
+        c1 = Column("q", t1, server_default=s1)
+        c2 = Column("q", t2, server_default=s2)
+        assert not self.ctx.impl.compare_server_default(
+            c1, c2, s2, s1
+        ), "server defaults %r and %r didn't compare as equivalent" % (s1, s2)
+
+    def tearDown(self):
+        sqla_compat._safe_rollback_connection_transaction(self.conn)
+        with self.conn.begin():
+            self.metadata.drop_all(self.conn)
+        self.conn.close()
+
+    def _run_alter_col(self, from_, to_, compare=None):
+        column = Column(
+            from_.get("name", "colname"),
+            from_.get("type", String(10)),
+            nullable=from_.get("nullable", True),
+            server_default=from_.get("server_default", None),
+            # comment=from_.get("comment", None)
+        )
+        t = Table("x", self.metadata, column)
+
+        with sqla_compat._ensure_scope_for_ddl(self.conn):
+            t.create(self.conn)
+            insp = inspect(self.conn)
+            old_col = insp.get_columns("x")[0]
+
+            # TODO: conditional comment support
+            self.op.alter_column(
+                "x",
+                column.name,
+                existing_type=column.type,
+                existing_server_default=(
+                    column.server_default
+                    if column.server_default is not None
+                    else False
+                ),
+                existing_nullable=True if column.nullable else False,
+                # existing_comment=column.comment,
+                nullable=to_.get("nullable", None),
+                # modify_comment=False,
+                server_default=to_.get("server_default", False),
+                new_column_name=to_.get("name", None),
+                type_=to_.get("type", None),
+            )
+
+        insp = inspect(self.conn)
+        new_col = insp.get_columns("x")[0]
+
+        if compare is None:
+            compare = to_
+
+        eq_(
+            new_col["name"],
+            compare["name"] if "name" in compare else column.name,
+        )
+        self._compare_type(
+            new_col["type"], compare.get("type", old_col["type"])
+        )
+        eq_(new_col["nullable"], compare.get("nullable", column.nullable))
+        self._compare_server_default(
+            new_col["type"],
+            new_col.get("default", None),
+            compare.get("type", old_col["type"]),
+            (
+                compare["server_default"].text
+                if "server_default" in compare
+                else (
+                    column.server_default.arg.text
+                    if column.server_default is not None
+                    else None
+                )
+            ),
+        )
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/plugin/__init__.py b/.venv/lib/python3.12/site-packages/alembic/testing/plugin/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/plugin/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/plugin/bootstrap.py b/.venv/lib/python3.12/site-packages/alembic/testing/plugin/bootstrap.py
new file mode 100644
index 00000000..d4a2c552
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/plugin/bootstrap.py
@@ -0,0 +1,4 @@
+"""
+Bootstrapper for test framework plugins.
+
+"""
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/requirements.py b/.venv/lib/python3.12/site-packages/alembic/testing/requirements.py
new file mode 100644
index 00000000..8b63c16b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/requirements.py
@@ -0,0 +1,176 @@
+from sqlalchemy.testing.requirements import Requirements
+
+from alembic import util
+from ..testing import exclusions
+
+
+class SuiteRequirements(Requirements):
+    @property
+    def schemas(self):
+        """Target database must support external schemas, and have one
+        named 'test_schema'."""
+
+        return exclusions.open()
+
+    @property
+    def autocommit_isolation(self):
+        """target database should support 'AUTOCOMMIT' isolation level"""
+
+        return exclusions.closed()
+
+    @property
+    def materialized_views(self):
+        """needed for sqlalchemy compat"""
+        return exclusions.closed()
+
+    @property
+    def unique_constraint_reflection(self):
+        def doesnt_have_check_uq_constraints(config):
+            from sqlalchemy import inspect
+
+            insp = inspect(config.db)
+            try:
+                insp.get_unique_constraints("x")
+            except NotImplementedError:
+                return True
+            except TypeError:
+                return True
+            except Exception:
+                pass
+            return False
+
+        return exclusions.skip_if(doesnt_have_check_uq_constraints)
+
+    @property
+    def sequences(self):
+        """Target database must support SEQUENCEs."""
+
+        return exclusions.only_if(
+            [lambda config: config.db.dialect.supports_sequences],
+            "no sequence support",
+        )
+
+    @property
+    def foreign_key_match(self):
+        return exclusions.open()
+
+    @property
+    def foreign_key_constraint_reflection(self):
+        return exclusions.open()
+
+    @property
+    def check_constraints_w_enforcement(self):
+        """Target database must support check constraints
+        and also enforce them."""
+
+        return exclusions.open()
+
+    @property
+    def reflects_pk_names(self):
+        return exclusions.closed()
+
+    @property
+    def reflects_fk_options(self):
+        return exclusions.closed()
+
+    @property
+    def sqlalchemy_1x(self):
+        return exclusions.skip_if(
+            lambda config: util.sqla_2,
+            "SQLAlchemy 1.x test",
+        )
+
+    @property
+    def sqlalchemy_2(self):
+        return exclusions.skip_if(
+            lambda config: not util.sqla_2,
+            "SQLAlchemy 2.x test",
+        )
+
+    @property
+    def asyncio(self):
+        def go(config):
+            try:
+                import greenlet  # noqa: F401
+            except ImportError:
+                return False
+            else:
+                return True
+
+        return exclusions.only_if(go)
+
+    @property
+    def comments(self):
+        return exclusions.only_if(
+            lambda config: config.db.dialect.supports_comments
+        )
+
+    @property
+    def alter_column(self):
+        return exclusions.open()
+
+    @property
+    def computed_columns(self):
+        return exclusions.closed()
+
+    @property
+    def autoincrement_on_composite_pk(self):
+        return exclusions.closed()
+
+    @property
+    def fk_ondelete_is_reflected(self):
+        return exclusions.closed()
+
+    @property
+    def fk_onupdate_is_reflected(self):
+        return exclusions.closed()
+
+    @property
+    def fk_onupdate(self):
+        return exclusions.open()
+
+    @property
+    def fk_ondelete_restrict(self):
+        return exclusions.open()
+
+    @property
+    def fk_onupdate_restrict(self):
+        return exclusions.open()
+
+    @property
+    def fk_ondelete_noaction(self):
+        return exclusions.open()
+
+    @property
+    def fk_initially(self):
+        return exclusions.closed()
+
+    @property
+    def fk_deferrable(self):
+        return exclusions.closed()
+
+    @property
+    def fk_deferrable_is_reflected(self):
+        return exclusions.closed()
+
+    @property
+    def fk_names(self):
+        return exclusions.open()
+
+    @property
+    def integer_subtype_comparisons(self):
+        return exclusions.open()
+
+    @property
+    def no_name_normalize(self):
+        return exclusions.skip_if(
+            lambda config: config.db.dialect.requires_name_normalize
+        )
+
+    @property
+    def identity_columns(self):
+        return exclusions.closed()
+
+    @property
+    def identity_columns_alter(self):
+        return exclusions.closed()
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/schemacompare.py b/.venv/lib/python3.12/site-packages/alembic/testing/schemacompare.py
new file mode 100644
index 00000000..204cc4dd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/schemacompare.py
@@ -0,0 +1,169 @@
+from itertools import zip_longest
+
+from sqlalchemy import schema
+from sqlalchemy.sql.elements import ClauseList
+
+
+class CompareTable:
+    def __init__(self, table):
+        self.table = table
+
+    def __eq__(self, other):
+        if self.table.name != other.name or self.table.schema != other.schema:
+            return False
+
+        for c1, c2 in zip_longest(self.table.c, other.c):
+            if (c1 is None and c2 is not None) or (
+                c2 is None and c1 is not None
+            ):
+                return False
+            if CompareColumn(c1) != c2:
+                return False
+
+        return True
+
+        # TODO: compare constraints, indexes
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class CompareColumn:
+    def __init__(self, column):
+        self.column = column
+
+    def __eq__(self, other):
+        return (
+            self.column.name == other.name
+            and self.column.nullable == other.nullable
+        )
+        # TODO: datatypes etc
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class CompareIndex:
+    def __init__(self, index, name_only=False):
+        self.index = index
+        self.name_only = name_only
+
+    def __eq__(self, other):
+        if self.name_only:
+            return self.index.name == other.name
+        else:
+            return (
+                str(schema.CreateIndex(self.index))
+                == str(schema.CreateIndex(other))
+                and self.index.dialect_kwargs == other.dialect_kwargs
+            )
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __repr__(self):
+        expr = ClauseList(*self.index.expressions)
+        try:
+            expr_str = expr.compile().string
+        except Exception:
+            expr_str = str(expr)
+        return f"<CompareIndex {self.index.name}({expr_str})>"
+
+
+class CompareCheckConstraint:
+    def __init__(self, constraint):
+        self.constraint = constraint
+
+    def __eq__(self, other):
+        return (
+            isinstance(other, schema.CheckConstraint)
+            and self.constraint.name == other.name
+            and (str(self.constraint.sqltext) == str(other.sqltext))
+            and (other.table.name == self.constraint.table.name)
+            and other.table.schema == self.constraint.table.schema
+        )
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class CompareForeignKey:
+    def __init__(self, constraint):
+        self.constraint = constraint
+
+    def __eq__(self, other):
+        r1 = (
+            isinstance(other, schema.ForeignKeyConstraint)
+            and self.constraint.name == other.name
+            and (other.table.name == self.constraint.table.name)
+            and other.table.schema == self.constraint.table.schema
+        )
+        if not r1:
+            return False
+        for c1, c2 in zip_longest(self.constraint.columns, other.columns):
+            if (c1 is None and c2 is not None) or (
+                c2 is None and c1 is not None
+            ):
+                return False
+            if CompareColumn(c1) != c2:
+                return False
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class ComparePrimaryKey:
+    def __init__(self, constraint):
+        self.constraint = constraint
+
+    def __eq__(self, other):
+        r1 = (
+            isinstance(other, schema.PrimaryKeyConstraint)
+            and self.constraint.name == other.name
+            and (other.table.name == self.constraint.table.name)
+            and other.table.schema == self.constraint.table.schema
+        )
+        if not r1:
+            return False
+
+        for c1, c2 in zip_longest(self.constraint.columns, other.columns):
+            if (c1 is None and c2 is not None) or (
+                c2 is None and c1 is not None
+            ):
+                return False
+            if CompareColumn(c1) != c2:
+                return False
+
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class CompareUniqueConstraint:
+    def __init__(self, constraint):
+        self.constraint = constraint
+
+    def __eq__(self, other):
+        r1 = (
+            isinstance(other, schema.UniqueConstraint)
+            and self.constraint.name == other.name
+            and (other.table.name == self.constraint.table.name)
+            and other.table.schema == self.constraint.table.schema
+        )
+        if not r1:
+            return False
+
+        for c1, c2 in zip_longest(self.constraint.columns, other.columns):
+            if (c1 is None and c2 is not None) or (
+                c2 is None and c1 is not None
+            ):
+                return False
+            if CompareColumn(c1) != c2:
+                return False
+
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/suite/__init__.py b/.venv/lib/python3.12/site-packages/alembic/testing/suite/__init__.py
new file mode 100644
index 00000000..3da498d2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/suite/__init__.py
@@ -0,0 +1,7 @@
+from .test_autogen_comments import *  # noqa
+from .test_autogen_computed import *  # noqa
+from .test_autogen_diffs import *  # noqa
+from .test_autogen_fks import *  # noqa
+from .test_autogen_identity import *  # noqa
+from .test_environment import *  # noqa
+from .test_op import *  # noqa
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/suite/_autogen_fixtures.py b/.venv/lib/python3.12/site-packages/alembic/testing/suite/_autogen_fixtures.py
new file mode 100644
index 00000000..d838ebef
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/suite/_autogen_fixtures.py
@@ -0,0 +1,335 @@
+from __future__ import annotations
+
+from typing import Any
+from typing import Dict
+from typing import Set
+
+from sqlalchemy import CHAR
+from sqlalchemy import CheckConstraint
+from sqlalchemy import Column
+from sqlalchemy import event
+from sqlalchemy import ForeignKey
+from sqlalchemy import Index
+from sqlalchemy import inspect
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import Numeric
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy import Text
+from sqlalchemy import text
+from sqlalchemy import UniqueConstraint
+
+from ... import autogenerate
+from ... import util
+from ...autogenerate import api
+from ...ddl.base import _fk_spec
+from ...migration import MigrationContext
+from ...operations import ops
+from ...testing import config
+from ...testing import eq_
+from ...testing.env import clear_staging_env
+from ...testing.env import staging_env
+
+names_in_this_test: Set[Any] = set()
+
+
+@event.listens_for(Table, "after_parent_attach")
+def new_table(table, parent):
+    names_in_this_test.add(table.name)
+
+
+def _default_include_object(obj, name, type_, reflected, compare_to):
+    if type_ == "table":
+        return name in names_in_this_test
+    else:
+        return True
+
+
+_default_object_filters: Any = _default_include_object
+
+_default_name_filters: Any = None
+
+
+class ModelOne:
+    __requires__ = ("unique_constraint_reflection",)
+
+    schema: Any = None
+
+    @classmethod
+    def _get_db_schema(cls):
+        schema = cls.schema
+
+        m = MetaData(schema=schema)
+
+        Table(
+            "user",
+            m,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50)),
+            Column("a1", Text),
+            Column("pw", String(50)),
+            Index("pw_idx", "pw"),
+        )
+
+        Table(
+            "address",
+            m,
+            Column("id", Integer, primary_key=True),
+            Column("email_address", String(100), nullable=False),
+        )
+
+        Table(
+            "order",
+            m,
+            Column("order_id", Integer, primary_key=True),
+            Column(
+                "amount",
+                Numeric(8, 2),
+                nullable=False,
+                server_default=text("0"),
+            ),
+            CheckConstraint("amount >= 0", name="ck_order_amount"),
+        )
+
+        Table(
+            "extra",
+            m,
+            Column("x", CHAR),
+            Column("uid", Integer, ForeignKey("user.id")),
+        )
+
+        return m
+
+    @classmethod
+    def _get_model_schema(cls):
+        schema = cls.schema
+
+        m = MetaData(schema=schema)
+
+        Table(
+            "user",
+            m,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", Text, server_default="x"),
+        )
+
+        Table(
+            "address",
+            m,
+            Column("id", Integer, primary_key=True),
+            Column("email_address", String(100), nullable=False),
+            Column("street", String(50)),
+            UniqueConstraint("email_address", name="uq_email"),
+        )
+
+        Table(
+            "order",
+            m,
+            Column("order_id", Integer, primary_key=True),
+            Column(
+                "amount",
+                Numeric(10, 2),
+                nullable=True,
+                server_default=text("0"),
+            ),
+            Column("user_id", Integer, ForeignKey("user.id")),
+            CheckConstraint("amount > -1", name="ck_order_amount"),
+        )
+
+        Table(
+            "item",
+            m,
+            Column("id", Integer, primary_key=True),
+            Column("description", String(100)),
+            Column("order_id", Integer, ForeignKey("order.order_id")),
+            CheckConstraint("len(description) > 5"),
+        )
+        return m
+
+
+class _ComparesFKs:
+    def _assert_fk_diff(
+        self,
+        diff,
+        type_,
+        source_table,
+        source_columns,
+        target_table,
+        target_columns,
+        name=None,
+        conditional_name=None,
+        source_schema=None,
+        onupdate=None,
+        ondelete=None,
+        initially=None,
+        deferrable=None,
+    ):
+        # the public API for ForeignKeyConstraint was not very rich
+        # in 0.7, 0.8, so here we use the well-known but slightly
+        # private API to get at its elements
+        (
+            fk_source_schema,
+            fk_source_table,
+            fk_source_columns,
+            fk_target_schema,
+            fk_target_table,
+            fk_target_columns,
+            fk_onupdate,
+            fk_ondelete,
+            fk_deferrable,
+            fk_initially,
+        ) = _fk_spec(diff[1])
+
+        eq_(diff[0], type_)
+        eq_(fk_source_table, source_table)
+        eq_(fk_source_columns, source_columns)
+        eq_(fk_target_table, target_table)
+        eq_(fk_source_schema, source_schema)
+        eq_(fk_onupdate, onupdate)
+        eq_(fk_ondelete, ondelete)
+        eq_(fk_initially, initially)
+        eq_(fk_deferrable, deferrable)
+
+        eq_([elem.column.name for elem in diff[1].elements], target_columns)
+        if conditional_name is not None:
+            if conditional_name == "servergenerated":
+                fks = inspect(self.bind).get_foreign_keys(source_table)
+                server_fk_name = fks[0]["name"]
+                eq_(diff[1].name, server_fk_name)
+            else:
+                eq_(diff[1].name, conditional_name)
+        else:
+            eq_(diff[1].name, name)
+
+
+class AutogenTest(_ComparesFKs):
+    def _flatten_diffs(self, diffs):
+        for d in diffs:
+            if isinstance(d, list):
+                yield from self._flatten_diffs(d)
+            else:
+                yield d
+
+    @classmethod
+    def _get_bind(cls):
+        return config.db
+
+    configure_opts: Dict[Any, Any] = {}
+
+    @classmethod
+    def setup_class(cls):
+        staging_env()
+        cls.bind = cls._get_bind()
+        cls.m1 = cls._get_db_schema()
+        cls.m1.create_all(cls.bind)
+        cls.m2 = cls._get_model_schema()
+
+    @classmethod
+    def teardown_class(cls):
+        cls.m1.drop_all(cls.bind)
+        clear_staging_env()
+
+    def setUp(self):
+        self.conn = conn = self.bind.connect()
+        ctx_opts = {
+            "compare_type": True,
+            "compare_server_default": True,
+            "target_metadata": self.m2,
+            "upgrade_token": "upgrades",
+            "downgrade_token": "downgrades",
+            "alembic_module_prefix": "op.",
+            "sqlalchemy_module_prefix": "sa.",
+            "include_object": _default_object_filters,
+            "include_name": _default_name_filters,
+        }
+        if self.configure_opts:
+            ctx_opts.update(self.configure_opts)
+        self.context = context = MigrationContext.configure(
+            connection=conn, opts=ctx_opts
+        )
+
+        self.autogen_context = api.AutogenContext(context, self.m2)
+
+    def tearDown(self):
+        self.conn.close()
+
+    def _update_context(
+        self, object_filters=None, name_filters=None, include_schemas=None
+    ):
+        if include_schemas is not None:
+            self.autogen_context.opts["include_schemas"] = include_schemas
+        if object_filters is not None:
+            self.autogen_context._object_filters = [object_filters]
+        if name_filters is not None:
+            self.autogen_context._name_filters = [name_filters]
+        return self.autogen_context
+
+
+class AutogenFixtureTest(_ComparesFKs):
+    def _fixture(
+        self,
+        m1,
+        m2,
+        include_schemas=False,
+        opts=None,
+        object_filters=_default_object_filters,
+        name_filters=_default_name_filters,
+        return_ops=False,
+        max_identifier_length=None,
+    ):
+        if max_identifier_length:
+            dialect = self.bind.dialect
+            existing_length = dialect.max_identifier_length
+            dialect.max_identifier_length = (
+                dialect._user_defined_max_identifier_length
+            ) = max_identifier_length
+        try:
+            self._alembic_metadata, model_metadata = m1, m2
+            for m in util.to_list(self._alembic_metadata):
+                m.create_all(self.bind)
+
+            with self.bind.connect() as conn:
+                ctx_opts = {
+                    "compare_type": True,
+                    "compare_server_default": True,
+                    "target_metadata": model_metadata,
+                    "upgrade_token": "upgrades",
+                    "downgrade_token": "downgrades",
+                    "alembic_module_prefix": "op.",
+                    "sqlalchemy_module_prefix": "sa.",
+                    "include_object": object_filters,
+                    "include_name": name_filters,
+                    "include_schemas": include_schemas,
+                }
+                if opts:
+                    ctx_opts.update(opts)
+                self.context = context = MigrationContext.configure(
+                    connection=conn, opts=ctx_opts
+                )
+
+                autogen_context = api.AutogenContext(context, model_metadata)
+                uo = ops.UpgradeOps(ops=[])
+                autogenerate._produce_net_changes(autogen_context, uo)
+
+                if return_ops:
+                    return uo
+                else:
+                    return uo.as_diffs()
+        finally:
+            if max_identifier_length:
+                dialect = self.bind.dialect
+                dialect.max_identifier_length = (
+                    dialect._user_defined_max_identifier_length
+                ) = existing_length
+
+    def setUp(self):
+        staging_env()
+        self.bind = config.db
+
+    def tearDown(self):
+        if hasattr(self, "_alembic_metadata"):
+            for m in util.to_list(self._alembic_metadata):
+                m.drop_all(self.bind)
+        clear_staging_env()
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_comments.py b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_comments.py
new file mode 100644
index 00000000..7ef074f5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_comments.py
@@ -0,0 +1,242 @@
+from sqlalchemy import Column
+from sqlalchemy import Float
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+
+from ._autogen_fixtures import AutogenFixtureTest
+from ...testing import eq_
+from ...testing import mock
+from ...testing import TestBase
+
+
+class AutogenerateCommentsTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+
+    __requires__ = ("comments",)
+
+    def test_existing_table_comment_no_change(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            comment="this is some table",
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            comment="this is some table",
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+    def test_add_table_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("some_table", m1, Column("test", String(10), primary_key=True))
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            comment="this is some table",
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "add_table_comment")
+        eq_(diffs[0][1].comment, "this is some table")
+        eq_(diffs[0][2], None)
+
+    def test_remove_table_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            comment="this is some table",
+        )
+
+        Table("some_table", m2, Column("test", String(10), primary_key=True))
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "remove_table_comment")
+        eq_(diffs[0][1].comment, None)
+
+    def test_alter_table_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            comment="this is some table",
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            comment="this is also some table",
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "add_table_comment")
+        eq_(diffs[0][1].comment, "this is also some table")
+        eq_(diffs[0][2], "this is some table")
+
+    def test_existing_column_comment_no_change(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the amount"),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the amount"),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+    def test_add_column_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the amount"),
+        )
+
+        diffs = self._fixture(m1, m2)
+        eq_(
+            diffs,
+            [
+                [
+                    (
+                        "modify_comment",
+                        None,
+                        "some_table",
+                        "amount",
+                        {
+                            "existing_nullable": True,
+                            "existing_type": mock.ANY,
+                            "existing_server_default": False,
+                        },
+                        None,
+                        "the amount",
+                    )
+                ]
+            ],
+        )
+
+    def test_remove_column_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the amount"),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float),
+        )
+
+        diffs = self._fixture(m1, m2)
+        eq_(
+            diffs,
+            [
+                [
+                    (
+                        "modify_comment",
+                        None,
+                        "some_table",
+                        "amount",
+                        {
+                            "existing_nullable": True,
+                            "existing_type": mock.ANY,
+                            "existing_server_default": False,
+                        },
+                        "the amount",
+                        None,
+                    )
+                ]
+            ],
+        )
+
+    def test_alter_column_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the amount"),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the adjusted amount"),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(
+            diffs,
+            [
+                [
+                    (
+                        "modify_comment",
+                        None,
+                        "some_table",
+                        "amount",
+                        {
+                            "existing_nullable": True,
+                            "existing_type": mock.ANY,
+                            "existing_server_default": False,
+                        },
+                        "the amount",
+                        "the adjusted amount",
+                    )
+                ]
+            ],
+        )
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_computed.py b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_computed.py
new file mode 100644
index 00000000..fe7eb7a5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_computed.py
@@ -0,0 +1,144 @@
+import sqlalchemy as sa
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import Table
+
+from ._autogen_fixtures import AutogenFixtureTest
+from ... import testing
+from ...testing import eq_
+from ...testing import is_
+from ...testing import is_true
+from ...testing import mock
+from ...testing import TestBase
+
+
+class AutogenerateComputedTest(AutogenFixtureTest, TestBase):
+    __requires__ = ("computed_columns",)
+    __backend__ = True
+
+    def test_add_computed_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("user", m1, Column("id", Integer, primary_key=True))
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("foo", Integer, sa.Computed("5")),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "add_column")
+        eq_(diffs[0][2], "user")
+        eq_(diffs[0][3].name, "foo")
+        c = diffs[0][3].computed
+
+        is_true(isinstance(c, sa.Computed))
+        is_(c.persisted, None)
+        eq_(str(c.sqltext), "5")
+
+    def test_remove_computed_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("foo", Integer, sa.Computed("5")),
+        )
+
+        Table("user", m2, Column("id", Integer, primary_key=True))
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "remove_column")
+        eq_(diffs[0][2], "user")
+        c = diffs[0][3]
+        eq_(c.name, "foo")
+
+        is_true(isinstance(c.computed, sa.Computed))
+        is_true(isinstance(c.server_default, sa.Computed))
+
+    @testing.combinations(
+        lambda: (None, sa.Computed("bar*5")),
+        (lambda: (sa.Computed("bar*5"), None)),
+        lambda: (
+            sa.Computed("bar*5"),
+            sa.Computed("bar * 42", persisted=True),
+        ),
+        lambda: (sa.Computed("bar*5"), sa.Computed("bar * 42")),
+    )
+    def test_cant_change_computed_warning(self, test_case):
+        arg_before, arg_after = testing.resolve_lambda(test_case, **locals())
+        m1 = MetaData()
+        m2 = MetaData()
+
+        arg_before = [] if arg_before is None else [arg_before]
+        arg_after = [] if arg_after is None else [arg_after]
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("bar", Integer),
+            Column("foo", Integer, *arg_before),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("bar", Integer),
+            Column("foo", Integer, *arg_after),
+        )
+
+        with mock.patch("alembic.util.warn") as mock_warn:
+            diffs = self._fixture(m1, m2)
+
+        eq_(
+            mock_warn.mock_calls,
+            [mock.call("Computed default on user.foo cannot be modified")],
+        )
+
+        eq_(list(diffs), [])
+
+    @testing.combinations(
+        lambda: (None, None),
+        lambda: (sa.Computed("5"), sa.Computed("5")),
+        lambda: (sa.Computed("bar*5"), sa.Computed("bar*5")),
+        lambda: (sa.Computed("bar*5"), sa.Computed("bar * \r\n\t5")),
+    )
+    def test_computed_unchanged(self, test_case):
+        arg_before, arg_after = testing.resolve_lambda(test_case, **locals())
+        m1 = MetaData()
+        m2 = MetaData()
+
+        arg_before = [] if arg_before is None else [arg_before]
+        arg_after = [] if arg_after is None else [arg_after]
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("bar", Integer),
+            Column("foo", Integer, *arg_before),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("bar", Integer),
+            Column("foo", Integer, *arg_after),
+        )
+
+        with mock.patch("alembic.util.warn") as mock_warn:
+            diffs = self._fixture(m1, m2)
+        eq_(mock_warn.mock_calls, [])
+
+        eq_(list(diffs), [])
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_diffs.py b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_diffs.py
new file mode 100644
index 00000000..75bcd37a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_diffs.py
@@ -0,0 +1,273 @@
+from sqlalchemy import BigInteger
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import Table
+from sqlalchemy.testing import in_
+
+from ._autogen_fixtures import AutogenFixtureTest
+from ... import testing
+from ...testing import config
+from ...testing import eq_
+from ...testing import is_
+from ...testing import TestBase
+
+
+class AlterColumnTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+
+    @testing.combinations((True,), (False,))
+    @config.requirements.comments
+    def test_all_existings_filled(self, pk):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("a", m1, Column("x", Integer, primary_key=pk))
+        Table("a", m2, Column("x", Integer, comment="x", primary_key=pk))
+
+        alter_col = self._assert_alter_col(m1, m2, pk)
+        eq_(alter_col.modify_comment, "x")
+
+    @testing.combinations((True,), (False,))
+    @config.requirements.comments
+    def test_all_existings_filled_in_notnull(self, pk):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("a", m1, Column("x", Integer, nullable=False, primary_key=pk))
+        Table(
+            "a",
+            m2,
+            Column("x", Integer, nullable=False, comment="x", primary_key=pk),
+        )
+
+        self._assert_alter_col(m1, m2, pk, nullable=False)
+
+    @testing.combinations((True,), (False,))
+    @config.requirements.comments
+    def test_all_existings_filled_in_comment(self, pk):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("a", m1, Column("x", Integer, comment="old", primary_key=pk))
+        Table("a", m2, Column("x", Integer, comment="new", primary_key=pk))
+
+        alter_col = self._assert_alter_col(m1, m2, pk)
+        eq_(alter_col.existing_comment, "old")
+
+    @testing.combinations((True,), (False,))
+    @config.requirements.comments
+    def test_all_existings_filled_in_server_default(self, pk):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a", m1, Column("x", Integer, server_default="5", primary_key=pk)
+        )
+        Table(
+            "a",
+            m2,
+            Column(
+                "x", Integer, server_default="5", comment="new", primary_key=pk
+            ),
+        )
+
+        alter_col = self._assert_alter_col(m1, m2, pk)
+        in_("5", alter_col.existing_server_default.arg.text)
+
+    def _assert_alter_col(self, m1, m2, pk, nullable=None):
+        ops = self._fixture(m1, m2, return_ops=True)
+        modify_table = ops.ops[-1]
+        alter_col = modify_table.ops[0]
+
+        if nullable is None:
+            eq_(alter_col.existing_nullable, not pk)
+        else:
+            eq_(alter_col.existing_nullable, nullable)
+        assert alter_col.existing_type._compare_type_affinity(Integer())
+        return alter_col
+
+
+class AutoincrementTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+    __requires__ = ("integer_subtype_comparisons",)
+
+    def test_alter_column_autoincrement_none(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("a", m1, Column("x", Integer, nullable=False))
+        Table("a", m2, Column("x", Integer, nullable=True))
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        assert "autoincrement" not in ops.ops[0].ops[0].kw
+
+    def test_alter_column_autoincrement_pk_false(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("x", Integer, primary_key=True, autoincrement=False),
+        )
+        Table(
+            "a",
+            m2,
+            Column("x", BigInteger, primary_key=True, autoincrement=False),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], False)
+
+    def test_alter_column_autoincrement_pk_implicit_true(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("a", m1, Column("x", Integer, primary_key=True))
+        Table("a", m2, Column("x", BigInteger, primary_key=True))
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], True)
+
+    def test_alter_column_autoincrement_pk_explicit_true(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a", m1, Column("x", Integer, primary_key=True, autoincrement=True)
+        )
+        Table(
+            "a",
+            m2,
+            Column("x", BigInteger, primary_key=True, autoincrement=True),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], True)
+
+    def test_alter_column_autoincrement_nonpk_false(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("x", Integer, autoincrement=False),
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("x", BigInteger, autoincrement=False),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], False)
+
+    def test_alter_column_autoincrement_nonpk_implicit_false(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("x", Integer),
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("x", BigInteger),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        assert "autoincrement" not in ops.ops[0].ops[0].kw
+
+    def test_alter_column_autoincrement_nonpk_explicit_true(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True, autoincrement=False),
+            Column("x", Integer, autoincrement=True),
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True, autoincrement=False),
+            Column("x", BigInteger, autoincrement=True),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], True)
+
+    def test_alter_column_autoincrement_compositepk_false(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("x", Integer, primary_key=True, autoincrement=False),
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("x", BigInteger, primary_key=True, autoincrement=False),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], False)
+
+    def test_alter_column_autoincrement_compositepk_implicit_false(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("x", Integer, primary_key=True),
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("x", BigInteger, primary_key=True),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        assert "autoincrement" not in ops.ops[0].ops[0].kw
+
+    @config.requirements.autoincrement_on_composite_pk
+    def test_alter_column_autoincrement_compositepk_explicit_true(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True, autoincrement=False),
+            Column("x", Integer, primary_key=True, autoincrement=True),
+            # on SQLA 1.0 and earlier, this being present
+            # trips the "add KEY for the primary key" so that the
+            # AUTO_INCREMENT keyword is accepted by MySQL.  SQLA 1.1 and
+            # greater the columns are just reorganized.
+            mysql_engine="InnoDB",
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True, autoincrement=False),
+            Column("x", BigInteger, primary_key=True, autoincrement=True),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], True)
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_fks.py b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_fks.py
new file mode 100644
index 00000000..0240b98d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_fks.py
@@ -0,0 +1,1190 @@
+from sqlalchemy import Column
+from sqlalchemy import ForeignKeyConstraint
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+
+from ._autogen_fixtures import AutogenFixtureTest
+from ...testing import combinations
+from ...testing import config
+from ...testing import eq_
+from ...testing import mock
+from ...testing import TestBase
+
+
+class AutogenerateForeignKeysTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+    __requires__ = ("foreign_key_constraint_reflection",)
+
+    def test_remove_fk(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+            ForeignKeyConstraint(["test2"], ["some_table.test"]),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["test2"],
+            "some_table",
+            ["test"],
+            conditional_name="servergenerated",
+        )
+
+    def test_add_fk(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+            ForeignKeyConstraint(["test2"], ["some_table.test"]),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        self._assert_fk_diff(
+            diffs[0], "add_fk", "user", ["test2"], "some_table", ["test"]
+        )
+
+    def test_no_change(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", Integer),
+            ForeignKeyConstraint(["test2"], ["some_table.id"]),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", Integer),
+            ForeignKeyConstraint(["test2"], ["some_table.id"]),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+    def test_no_change_composite_fk(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+            ForeignKeyConstraint(
+                ["other_id_1", "other_id_2"],
+                ["some_table.id_1", "some_table.id_2"],
+            ),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+            ForeignKeyConstraint(
+                ["other_id_1", "other_id_2"],
+                ["some_table.id_1", "some_table.id_2"],
+            ),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+    def test_casing_convention_changed_so_put_drops_first(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+            ForeignKeyConstraint(["test2"], ["some_table.test"], name="MyFK"),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+        )
+
+        # foreign key autogen currently does not take "name" into account,
+        # so change the def just for the purposes of testing the
+        # add/drop order for now.
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+            ForeignKeyConstraint(["a1"], ["some_table.test"], name="myfk"),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["test2"],
+            "some_table",
+            ["test"],
+            name="MyFK" if config.requirements.fk_names.enabled else None,
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["a1"],
+            "some_table",
+            ["test"],
+            name="myfk",
+        )
+
+    def test_add_composite_fk_with_name(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+            ForeignKeyConstraint(
+                ["other_id_1", "other_id_2"],
+                ["some_table.id_1", "some_table.id_2"],
+                name="fk_test_name",
+            ),
+        )
+
+        diffs = self._fixture(m1, m2)
+        self._assert_fk_diff(
+            diffs[0],
+            "add_fk",
+            "user",
+            ["other_id_1", "other_id_2"],
+            "some_table",
+            ["id_1", "id_2"],
+            name="fk_test_name",
+        )
+
+    @config.requirements.no_name_normalize
+    def test_remove_composite_fk(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+            ForeignKeyConstraint(
+                ["other_id_1", "other_id_2"],
+                ["some_table.id_1", "some_table.id_2"],
+                name="fk_test_name",
+            ),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["other_id_1", "other_id_2"],
+            "some_table",
+            ["id_1", "id_2"],
+            conditional_name="fk_test_name",
+        )
+
+    def test_add_fk_colkeys(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id_1", String(10), key="tid1", primary_key=True),
+            Column("id_2", String(10), key="tid2", primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("other_id_1", String(10), key="oid1"),
+            Column("other_id_2", String(10), key="oid2"),
+            ForeignKeyConstraint(
+                ["oid1", "oid2"],
+                ["some_table.tid1", "some_table.tid2"],
+                name="fk_test_name",
+            ),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        self._assert_fk_diff(
+            diffs[0],
+            "add_fk",
+            "user",
+            ["other_id_1", "other_id_2"],
+            "some_table",
+            ["id_1", "id_2"],
+            name="fk_test_name",
+        )
+
+    def test_no_change_colkeys(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+            ForeignKeyConstraint(
+                ["other_id_1", "other_id_2"],
+                ["some_table.id_1", "some_table.id_2"],
+            ),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id_1", String(10), key="tid1", primary_key=True),
+            Column("id_2", String(10), key="tid2", primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("other_id_1", String(10), key="oid1"),
+            Column("other_id_2", String(10), key="oid2"),
+            ForeignKeyConstraint(
+                ["oid1", "oid2"], ["some_table.tid1", "some_table.tid2"]
+            ),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+
+class IncludeHooksTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+    __requires__ = ("fk_names",)
+
+    @combinations(("object",), ("name",))
+    @config.requirements.no_name_normalize
+    def test_remove_connection_fk(self, hook_type):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        ref = Table(
+            "ref",
+            m1,
+            Column("id", Integer, primary_key=True),
+        )
+        t1 = Table(
+            "t",
+            m1,
+            Column("x", Integer),
+            Column("y", Integer),
+        )
+        t1.append_constraint(
+            ForeignKeyConstraint([t1.c.x], [ref.c.id], name="fk1")
+        )
+        t1.append_constraint(
+            ForeignKeyConstraint([t1.c.y], [ref.c.id], name="fk2")
+        )
+
+        ref = Table(
+            "ref",
+            m2,
+            Column("id", Integer, primary_key=True),
+        )
+        Table(
+            "t",
+            m2,
+            Column("x", Integer),
+            Column("y", Integer),
+        )
+
+        if hook_type == "object":
+
+            def include_object(object_, name, type_, reflected, compare_to):
+                return not (
+                    isinstance(object_, ForeignKeyConstraint)
+                    and type_ == "foreign_key_constraint"
+                    and reflected
+                    and name == "fk1"
+                )
+
+            diffs = self._fixture(m1, m2, object_filters=include_object)
+        elif hook_type == "name":
+
+            def include_name(name, type_, parent_names):
+                if name == "fk1":
+                    if type_ == "index":  # MariaDB thing
+                        return True
+                    eq_(type_, "foreign_key_constraint")
+                    eq_(
+                        parent_names,
+                        {
+                            "schema_name": None,
+                            "table_name": "t",
+                            "schema_qualified_table_name": "t",
+                        },
+                    )
+                    return False
+                else:
+                    return True
+
+            diffs = self._fixture(m1, m2, name_filters=include_name)
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "t",
+            ["y"],
+            "ref",
+            ["id"],
+            conditional_name="fk2",
+        )
+        eq_(len(diffs), 1)
+
+    def test_add_metadata_fk(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "ref",
+            m1,
+            Column("id", Integer, primary_key=True),
+        )
+        Table(
+            "t",
+            m1,
+            Column("x", Integer),
+            Column("y", Integer),
+        )
+
+        ref = Table(
+            "ref",
+            m2,
+            Column("id", Integer, primary_key=True),
+        )
+        t2 = Table(
+            "t",
+            m2,
+            Column("x", Integer),
+            Column("y", Integer),
+        )
+        t2.append_constraint(
+            ForeignKeyConstraint([t2.c.x], [ref.c.id], name="fk1")
+        )
+        t2.append_constraint(
+            ForeignKeyConstraint([t2.c.y], [ref.c.id], name="fk2")
+        )
+
+        def include_object(object_, name, type_, reflected, compare_to):
+            return not (
+                isinstance(object_, ForeignKeyConstraint)
+                and type_ == "foreign_key_constraint"
+                and not reflected
+                and name == "fk1"
+            )
+
+        diffs = self._fixture(m1, m2, object_filters=include_object)
+
+        self._assert_fk_diff(
+            diffs[0], "add_fk", "t", ["y"], "ref", ["id"], name="fk2"
+        )
+        eq_(len(diffs), 1)
+
+    @combinations(("object",), ("name",))
+    @config.requirements.no_name_normalize
+    def test_change_fk(self, hook_type):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        r1a = Table(
+            "ref_a",
+            m1,
+            Column("a", Integer, primary_key=True),
+        )
+        Table(
+            "ref_b",
+            m1,
+            Column("a", Integer, primary_key=True),
+            Column("b", Integer, primary_key=True),
+        )
+        t1 = Table(
+            "t",
+            m1,
+            Column("x", Integer),
+            Column("y", Integer),
+            Column("z", Integer),
+        )
+        t1.append_constraint(
+            ForeignKeyConstraint([t1.c.x], [r1a.c.a], name="fk1")
+        )
+        t1.append_constraint(
+            ForeignKeyConstraint([t1.c.y], [r1a.c.a], name="fk2")
+        )
+
+        Table(
+            "ref_a",
+            m2,
+            Column("a", Integer, primary_key=True),
+        )
+        r2b = Table(
+            "ref_b",
+            m2,
+            Column("a", Integer, primary_key=True),
+            Column("b", Integer, primary_key=True),
+        )
+        t2 = Table(
+            "t",
+            m2,
+            Column("x", Integer),
+            Column("y", Integer),
+            Column("z", Integer),
+        )
+        t2.append_constraint(
+            ForeignKeyConstraint(
+                [t2.c.x, t2.c.z], [r2b.c.a, r2b.c.b], name="fk1"
+            )
+        )
+        t2.append_constraint(
+            ForeignKeyConstraint(
+                [t2.c.y, t2.c.z], [r2b.c.a, r2b.c.b], name="fk2"
+            )
+        )
+
+        if hook_type == "object":
+
+            def include_object(object_, name, type_, reflected, compare_to):
+                return not (
+                    isinstance(object_, ForeignKeyConstraint)
+                    and type_ == "foreign_key_constraint"
+                    and name == "fk1"
+                )
+
+            diffs = self._fixture(m1, m2, object_filters=include_object)
+        elif hook_type == "name":
+
+            def include_name(name, type_, parent_names):
+                if type_ == "index":
+                    return True  # MariaDB thing
+
+                if name == "fk1":
+                    eq_(type_, "foreign_key_constraint")
+                    eq_(
+                        parent_names,
+                        {
+                            "schema_name": None,
+                            "table_name": "t",
+                            "schema_qualified_table_name": "t",
+                        },
+                    )
+                    return False
+                else:
+                    return True
+
+            diffs = self._fixture(m1, m2, name_filters=include_name)
+
+        if hook_type == "object":
+            self._assert_fk_diff(
+                diffs[0], "remove_fk", "t", ["y"], "ref_a", ["a"], name="fk2"
+            )
+            self._assert_fk_diff(
+                diffs[1],
+                "add_fk",
+                "t",
+                ["y", "z"],
+                "ref_b",
+                ["a", "b"],
+                name="fk2",
+            )
+            eq_(len(diffs), 2)
+        elif hook_type == "name":
+            eq_(
+                {(d[0], d[1].name) for d in diffs},
+                {("add_fk", "fk2"), ("add_fk", "fk1"), ("remove_fk", "fk2")},
+            )
+
+
+class AutogenerateFKOptionsTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+
+    def _fk_opts_fixture(self, old_opts, new_opts):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("tid", Integer),
+            ForeignKeyConstraint(["tid"], ["some_table.id"], **old_opts),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("tid", Integer),
+            ForeignKeyConstraint(["tid"], ["some_table.id"], **new_opts),
+        )
+
+        return self._fixture(m1, m2)
+
+    @config.requirements.fk_ondelete_is_reflected
+    def test_add_ondelete(self):
+        diffs = self._fk_opts_fixture({}, {"ondelete": "cascade"})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            ondelete=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            ondelete="cascade",
+        )
+
+    @config.requirements.fk_ondelete_is_reflected
+    def test_remove_ondelete(self):
+        diffs = self._fk_opts_fixture({"ondelete": "CASCADE"}, {})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            ondelete="CASCADE",
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            ondelete=None,
+        )
+
+    def test_nochange_ondelete(self):
+        """test case sensitivity"""
+        diffs = self._fk_opts_fixture(
+            {"ondelete": "caSCAde"}, {"ondelete": "CasCade"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_onupdate_is_reflected
+    def test_add_onupdate(self):
+        diffs = self._fk_opts_fixture({}, {"onupdate": "cascade"})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate="cascade",
+        )
+
+    @config.requirements.fk_onupdate_is_reflected
+    def test_remove_onupdate(self):
+        diffs = self._fk_opts_fixture({"onupdate": "CASCADE"}, {})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate="CASCADE",
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate=None,
+        )
+
+    @config.requirements.fk_onupdate
+    def test_nochange_onupdate(self):
+        """test case sensitivity"""
+        diffs = self._fk_opts_fixture(
+            {"onupdate": "caSCAde"}, {"onupdate": "CasCade"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_ondelete_restrict
+    def test_nochange_ondelete_restrict(self):
+        """test the RESTRICT option which MySQL doesn't report on"""
+
+        diffs = self._fk_opts_fixture(
+            {"ondelete": "restrict"}, {"ondelete": "restrict"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_onupdate_restrict
+    def test_nochange_onupdate_restrict(self):
+        """test the RESTRICT option which MySQL doesn't report on"""
+
+        diffs = self._fk_opts_fixture(
+            {"onupdate": "restrict"}, {"onupdate": "restrict"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_ondelete_noaction
+    def test_nochange_ondelete_noaction(self):
+        """test the NO ACTION option which generally comes back as None"""
+
+        diffs = self._fk_opts_fixture(
+            {"ondelete": "no action"}, {"ondelete": "no action"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_onupdate
+    def test_nochange_onupdate_noaction(self):
+        """test the NO ACTION option which generally comes back as None"""
+
+        diffs = self._fk_opts_fixture(
+            {"onupdate": "no action"}, {"onupdate": "no action"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_ondelete_restrict
+    def test_change_ondelete_from_restrict(self):
+        """test the RESTRICT option which MySQL doesn't report on"""
+
+        # note that this is impossible to detect if we change
+        # from RESTRICT to NO ACTION on MySQL.
+        diffs = self._fk_opts_fixture(
+            {"ondelete": "restrict"}, {"ondelete": "cascade"}
+        )
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate=None,
+            ondelete=mock.ANY,  # MySQL reports None, PG reports RESTRICT
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate=None,
+            ondelete="cascade",
+        )
+
+    @config.requirements.fk_ondelete_restrict
+    def test_change_onupdate_from_restrict(self):
+        """test the RESTRICT option which MySQL doesn't report on"""
+
+        # note that this is impossible to detect if we change
+        # from RESTRICT to NO ACTION on MySQL.
+        diffs = self._fk_opts_fixture(
+            {"onupdate": "restrict"}, {"onupdate": "cascade"}
+        )
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate=mock.ANY,  # MySQL reports None, PG reports RESTRICT
+            ondelete=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate="cascade",
+            ondelete=None,
+        )
+
+    @config.requirements.fk_ondelete_is_reflected
+    @config.requirements.fk_onupdate_is_reflected
+    def test_ondelete_onupdate_combo(self):
+        diffs = self._fk_opts_fixture(
+            {"onupdate": "CASCADE", "ondelete": "SET NULL"},
+            {"onupdate": "RESTRICT", "ondelete": "RESTRICT"},
+        )
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate="CASCADE",
+            ondelete="SET NULL",
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate="RESTRICT",
+            ondelete="RESTRICT",
+        )
+
+    @config.requirements.fk_initially
+    def test_add_initially_deferred(self):
+        diffs = self._fk_opts_fixture({}, {"initially": "deferred"})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially="deferred",
+        )
+
+    @config.requirements.fk_initially
+    def test_remove_initially_deferred(self):
+        diffs = self._fk_opts_fixture({"initially": "deferred"}, {})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially="DEFERRED",
+            deferrable=True,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially=None,
+        )
+
+    @config.requirements.fk_deferrable
+    @config.requirements.fk_initially
+    def test_add_initially_immediate_plus_deferrable(self):
+        diffs = self._fk_opts_fixture(
+            {}, {"initially": "immediate", "deferrable": True}
+        )
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially="immediate",
+            deferrable=True,
+        )
+
+    @config.requirements.fk_deferrable
+    @config.requirements.fk_initially
+    def test_remove_initially_immediate_plus_deferrable(self):
+        diffs = self._fk_opts_fixture(
+            {"initially": "immediate", "deferrable": True}, {}
+        )
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially=None,  # immediate is the default
+            deferrable=True,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially=None,
+            deferrable=None,
+        )
+
+    @config.requirements.fk_initially
+    @config.requirements.fk_deferrable
+    def test_add_initially_deferrable_nochange_one(self):
+        diffs = self._fk_opts_fixture(
+            {"deferrable": True, "initially": "immediate"},
+            {"deferrable": True, "initially": "immediate"},
+        )
+
+        eq_(diffs, [])
+
+    @config.requirements.fk_initially
+    @config.requirements.fk_deferrable
+    def test_add_initially_deferrable_nochange_two(self):
+        diffs = self._fk_opts_fixture(
+            {"deferrable": True, "initially": "deferred"},
+            {"deferrable": True, "initially": "deferred"},
+        )
+
+        eq_(diffs, [])
+
+    @config.requirements.fk_initially
+    @config.requirements.fk_deferrable
+    def test_add_initially_deferrable_nochange_three(self):
+        diffs = self._fk_opts_fixture(
+            {"deferrable": None, "initially": "deferred"},
+            {"deferrable": None, "initially": "deferred"},
+        )
+
+        eq_(diffs, [])
+
+    @config.requirements.fk_deferrable
+    def test_add_deferrable(self):
+        diffs = self._fk_opts_fixture({}, {"deferrable": True})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            deferrable=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            deferrable=True,
+        )
+
+    @config.requirements.fk_deferrable_is_reflected
+    def test_remove_deferrable(self):
+        diffs = self._fk_opts_fixture({"deferrable": True}, {})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            deferrable=True,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            deferrable=None,
+        )
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_identity.py b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_identity.py
new file mode 100644
index 00000000..3dee9fc9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_autogen_identity.py
@@ -0,0 +1,226 @@
+import sqlalchemy as sa
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import Table
+
+from alembic.util import sqla_compat
+from ._autogen_fixtures import AutogenFixtureTest
+from ... import testing
+from ...testing import config
+from ...testing import eq_
+from ...testing import is_true
+from ...testing import TestBase
+
+
+class AutogenerateIdentityTest(AutogenFixtureTest, TestBase):
+    __requires__ = ("identity_columns",)
+    __backend__ = True
+
+    def test_add_identity_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("user", m1, Column("other", sa.Text))
+
+        Table(
+            "user",
+            m2,
+            Column("other", sa.Text),
+            Column(
+                "id",
+                Integer,
+                sa.Identity(start=5, increment=7),
+                primary_key=True,
+            ),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "add_column")
+        eq_(diffs[0][2], "user")
+        eq_(diffs[0][3].name, "id")
+        i = diffs[0][3].identity
+
+        is_true(isinstance(i, sa.Identity))
+        eq_(i.start, 5)
+        eq_(i.increment, 7)
+
+    def test_remove_identity_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column(
+                "id",
+                Integer,
+                sa.Identity(start=2, increment=3),
+                primary_key=True,
+            ),
+        )
+
+        Table("user", m2)
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "remove_column")
+        eq_(diffs[0][2], "user")
+        c = diffs[0][3]
+        eq_(c.name, "id")
+
+        is_true(isinstance(c.identity, sa.Identity))
+        eq_(c.identity.start, 2)
+        eq_(c.identity.increment, 3)
+
+    def test_no_change_identity_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        for m in (m1, m2):
+            id_ = sa.Identity(start=2)
+            Table("user", m, Column("id", Integer, id_))
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+    def test_dialect_kwargs_changes(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        if sqla_compat.identity_has_dialect_kwargs:
+            args = {"oracle_on_null": True, "oracle_order": True}
+        else:
+            args = {"on_null": True, "order": True}
+
+        Table("user", m1, Column("id", Integer, sa.Identity(start=2)))
+        id_ = sa.Identity(start=2, **args)
+        Table("user", m2, Column("id", Integer, id_))
+
+        diffs = self._fixture(m1, m2)
+        if config.db.name == "oracle":
+            is_true(len(diffs), 1)
+            eq_(diffs[0][0][0], "modify_default")
+        else:
+            eq_(diffs, [])
+
+    @testing.combinations(
+        (None, dict(start=2)),
+        (dict(start=2), None),
+        (dict(start=2), dict(start=2, increment=7)),
+        (dict(always=False), dict(always=True)),
+        (
+            dict(start=1, minvalue=0, maxvalue=100, cycle=True),
+            dict(start=1, minvalue=0, maxvalue=100, cycle=False),
+        ),
+        (
+            dict(start=10, increment=3, maxvalue=9999),
+            dict(start=10, increment=1, maxvalue=3333),
+        ),
+    )
+    @config.requirements.identity_columns_alter
+    def test_change_identity(self, before, after):
+        arg_before = (sa.Identity(**before),) if before else ()
+        arg_after = (sa.Identity(**after),) if after else ()
+
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, *arg_before),
+            Column("other", sa.Text),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, *arg_after),
+            Column("other", sa.Text),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(len(diffs[0]), 1)
+        diffs = diffs[0][0]
+        eq_(diffs[0], "modify_default")
+        eq_(diffs[2], "user")
+        eq_(diffs[3], "id")
+        old = diffs[5]
+        new = diffs[6]
+
+        def check(kw, idt):
+            if kw:
+                is_true(isinstance(idt, sa.Identity))
+                for k, v in kw.items():
+                    eq_(getattr(idt, k), v)
+            else:
+                is_true(idt in (None, False))
+
+        check(before, old)
+        check(after, new)
+
+    def test_add_identity_to_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer),
+            Column("other", sa.Text),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, sa.Identity(start=2, maxvalue=1000)),
+            Column("other", sa.Text),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(len(diffs[0]), 1)
+        diffs = diffs[0][0]
+        eq_(diffs[0], "modify_default")
+        eq_(diffs[2], "user")
+        eq_(diffs[3], "id")
+        eq_(diffs[5], None)
+        added = diffs[6]
+
+        is_true(isinstance(added, sa.Identity))
+        eq_(added.start, 2)
+        eq_(added.maxvalue, 1000)
+
+    def test_remove_identity_from_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, sa.Identity(start=2, maxvalue=1000)),
+            Column("other", sa.Text),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer),
+            Column("other", sa.Text),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(len(diffs[0]), 1)
+        diffs = diffs[0][0]
+        eq_(diffs[0], "modify_default")
+        eq_(diffs[2], "user")
+        eq_(diffs[3], "id")
+        eq_(diffs[6], None)
+        removed = diffs[5]
+
+        is_true(isinstance(removed, sa.Identity))
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_environment.py b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_environment.py
new file mode 100644
index 00000000..df2d9afb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_environment.py
@@ -0,0 +1,364 @@
+import io
+
+from ...migration import MigrationContext
+from ...testing import assert_raises
+from ...testing import config
+from ...testing import eq_
+from ...testing import is_
+from ...testing import is_false
+from ...testing import is_not_
+from ...testing import is_true
+from ...testing import ne_
+from ...testing.fixtures import TestBase
+
+
+class MigrationTransactionTest(TestBase):
+    __backend__ = True
+
+    conn = None
+
+    def _fixture(self, opts):
+        self.conn = conn = config.db.connect()
+
+        if opts.get("as_sql", False):
+            self.context = MigrationContext.configure(
+                dialect=conn.dialect, opts=opts
+            )
+            self.context.output_buffer = self.context.impl.output_buffer = (
+                io.StringIO()
+            )
+        else:
+            self.context = MigrationContext.configure(
+                connection=conn, opts=opts
+            )
+        return self.context
+
+    def teardown_method(self):
+        if self.conn:
+            self.conn.close()
+
+    def test_proxy_transaction_rollback(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+
+        is_false(self.conn.in_transaction())
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+        proxy.rollback()
+        is_false(self.conn.in_transaction())
+
+    def test_proxy_transaction_commit(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+        proxy.commit()
+        is_false(self.conn.in_transaction())
+
+    def test_proxy_transaction_contextmanager_commit(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+        with proxy:
+            pass
+        is_false(self.conn.in_transaction())
+
+    def test_proxy_transaction_contextmanager_rollback(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+
+        def go():
+            with proxy:
+                raise Exception("hi")
+
+        assert_raises(Exception, go)
+        is_false(self.conn.in_transaction())
+
+    def test_proxy_transaction_contextmanager_explicit_rollback(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+
+        with proxy:
+            is_true(self.conn.in_transaction())
+            proxy.rollback()
+            is_false(self.conn.in_transaction())
+
+        is_false(self.conn.in_transaction())
+
+    def test_proxy_transaction_contextmanager_explicit_commit(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+
+        with proxy:
+            is_true(self.conn.in_transaction())
+            proxy.commit()
+            is_false(self.conn.in_transaction())
+
+        is_false(self.conn.in_transaction())
+
+    def test_transaction_per_migration_transactional_ddl(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+
+        is_false(self.conn.in_transaction())
+
+        with context.begin_transaction():
+            is_false(self.conn.in_transaction())
+            with context.begin_transaction(_per_migration=True):
+                is_true(self.conn.in_transaction())
+
+            is_false(self.conn.in_transaction())
+        is_false(self.conn.in_transaction())
+
+    def test_transaction_per_migration_non_transactional_ddl(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": False}
+        )
+
+        is_false(self.conn.in_transaction())
+
+        with context.begin_transaction():
+            is_false(self.conn.in_transaction())
+            with context.begin_transaction(_per_migration=True):
+                is_true(self.conn.in_transaction())
+
+            is_false(self.conn.in_transaction())
+        is_false(self.conn.in_transaction())
+
+    def test_transaction_per_all_transactional_ddl(self):
+        context = self._fixture({"transactional_ddl": True})
+
+        is_false(self.conn.in_transaction())
+
+        with context.begin_transaction():
+            is_true(self.conn.in_transaction())
+            with context.begin_transaction(_per_migration=True):
+                is_true(self.conn.in_transaction())
+
+            is_true(self.conn.in_transaction())
+        is_false(self.conn.in_transaction())
+
+    def test_transaction_per_all_non_transactional_ddl(self):
+        context = self._fixture({"transactional_ddl": False})
+
+        is_false(self.conn.in_transaction())
+
+        with context.begin_transaction():
+            is_false(self.conn.in_transaction())
+            with context.begin_transaction(_per_migration=True):
+                is_true(self.conn.in_transaction())
+
+            is_false(self.conn.in_transaction())
+        is_false(self.conn.in_transaction())
+
+    def test_transaction_per_all_sqlmode(self):
+        context = self._fixture({"as_sql": True})
+
+        context.execute("step 1")
+        with context.begin_transaction():
+            context.execute("step 2")
+            with context.begin_transaction(_per_migration=True):
+                context.execute("step 3")
+
+            context.execute("step 4")
+        context.execute("step 5")
+
+        if context.impl.transactional_ddl:
+            self._assert_impl_steps(
+                "step 1",
+                "BEGIN",
+                "step 2",
+                "step 3",
+                "step 4",
+                "COMMIT",
+                "step 5",
+            )
+        else:
+            self._assert_impl_steps(
+                "step 1", "step 2", "step 3", "step 4", "step 5"
+            )
+
+    def test_transaction_per_migration_sqlmode(self):
+        context = self._fixture(
+            {"as_sql": True, "transaction_per_migration": True}
+        )
+
+        context.execute("step 1")
+        with context.begin_transaction():
+            context.execute("step 2")
+            with context.begin_transaction(_per_migration=True):
+                context.execute("step 3")
+
+            context.execute("step 4")
+        context.execute("step 5")
+
+        if context.impl.transactional_ddl:
+            self._assert_impl_steps(
+                "step 1",
+                "step 2",
+                "BEGIN",
+                "step 3",
+                "COMMIT",
+                "step 4",
+                "step 5",
+            )
+        else:
+            self._assert_impl_steps(
+                "step 1", "step 2", "step 3", "step 4", "step 5"
+            )
+
+    @config.requirements.autocommit_isolation
+    def test_autocommit_block(self):
+        context = self._fixture({"transaction_per_migration": True})
+
+        is_false(self.conn.in_transaction())
+
+        with context.begin_transaction():
+            is_false(self.conn.in_transaction())
+            with context.begin_transaction(_per_migration=True):
+                is_true(self.conn.in_transaction())
+
+                with context.autocommit_block():
+                    # in 1.x, self.conn is separate due to the
+                    # execution_options call.  however for future they are the
+                    # same connection and there is a "transaction" block
+                    # despite autocommit
+                    if self.is_sqlalchemy_future:
+                        is_(context.connection, self.conn)
+                    else:
+                        is_not_(context.connection, self.conn)
+                        is_false(self.conn.in_transaction())
+
+                    eq_(
+                        context.connection._execution_options[
+                            "isolation_level"
+                        ],
+                        "AUTOCOMMIT",
+                    )
+
+                ne_(
+                    context.connection._execution_options.get(
+                        "isolation_level", None
+                    ),
+                    "AUTOCOMMIT",
+                )
+                is_true(self.conn.in_transaction())
+
+            is_false(self.conn.in_transaction())
+        is_false(self.conn.in_transaction())
+
+    @config.requirements.autocommit_isolation
+    def test_autocommit_block_no_transaction(self):
+        context = self._fixture({"transaction_per_migration": True})
+
+        is_false(self.conn.in_transaction())
+
+        with context.autocommit_block():
+            is_true(context.connection.in_transaction())
+
+            # in 1.x, self.conn is separate due to the execution_options
+            # call.  however for future they are the same connection and there
+            # is a "transaction" block despite autocommit
+            if self.is_sqlalchemy_future:
+                is_(context.connection, self.conn)
+            else:
+                is_not_(context.connection, self.conn)
+                is_false(self.conn.in_transaction())
+
+            eq_(
+                context.connection._execution_options["isolation_level"],
+                "AUTOCOMMIT",
+            )
+
+        ne_(
+            context.connection._execution_options.get("isolation_level", None),
+            "AUTOCOMMIT",
+        )
+
+        is_false(self.conn.in_transaction())
+
+    def test_autocommit_block_transactional_ddl_sqlmode(self):
+        context = self._fixture(
+            {
+                "transaction_per_migration": True,
+                "transactional_ddl": True,
+                "as_sql": True,
+            }
+        )
+
+        with context.begin_transaction():
+            context.execute("step 1")
+            with context.begin_transaction(_per_migration=True):
+                context.execute("step 2")
+
+                with context.autocommit_block():
+                    context.execute("step 3")
+
+                context.execute("step 4")
+
+            context.execute("step 5")
+
+        self._assert_impl_steps(
+            "step 1",
+            "BEGIN",
+            "step 2",
+            "COMMIT",
+            "step 3",
+            "BEGIN",
+            "step 4",
+            "COMMIT",
+            "step 5",
+        )
+
+    def test_autocommit_block_nontransactional_ddl_sqlmode(self):
+        context = self._fixture(
+            {
+                "transaction_per_migration": True,
+                "transactional_ddl": False,
+                "as_sql": True,
+            }
+        )
+
+        with context.begin_transaction():
+            context.execute("step 1")
+            with context.begin_transaction(_per_migration=True):
+                context.execute("step 2")
+
+                with context.autocommit_block():
+                    context.execute("step 3")
+
+                context.execute("step 4")
+
+            context.execute("step 5")
+
+        self._assert_impl_steps(
+            "step 1", "step 2", "step 3", "step 4", "step 5"
+        )
+
+    def _assert_impl_steps(self, *steps):
+        to_check = self.context.output_buffer.getvalue()
+
+        self.context.impl.output_buffer = buf = io.StringIO()
+        for step in steps:
+            if step == "BEGIN":
+                self.context.impl.emit_begin()
+            elif step == "COMMIT":
+                self.context.impl.emit_commit()
+            else:
+                self.context.impl._exec(step)
+
+        eq_(to_check, buf.getvalue())
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_op.py b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_op.py
new file mode 100644
index 00000000..a63b3f2f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/suite/test_op.py
@@ -0,0 +1,42 @@
+"""Test against the builders in the op.* module."""
+
+from sqlalchemy import Column
+from sqlalchemy import event
+from sqlalchemy import Integer
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy.sql import text
+
+from ...testing.fixtures import AlterColRoundTripFixture
+from ...testing.fixtures import TestBase
+
+
+@event.listens_for(Table, "after_parent_attach")
+def _add_cols(table, metadata):
+    if table.name == "tbl_with_auto_appended_column":
+        table.append_column(Column("bat", Integer))
+
+
+class BackendAlterColumnTest(AlterColRoundTripFixture, TestBase):
+    __backend__ = True
+
+    def test_rename_column(self):
+        self._run_alter_col({}, {"name": "newname"})
+
+    def test_modify_type_int_str(self):
+        self._run_alter_col({"type": Integer()}, {"type": String(50)})
+
+    def test_add_server_default_int(self):
+        self._run_alter_col({"type": Integer}, {"server_default": text("5")})
+
+    def test_modify_server_default_int(self):
+        self._run_alter_col(
+            {"type": Integer, "server_default": text("2")},
+            {"server_default": text("5")},
+        )
+
+    def test_modify_nullable_to_non(self):
+        self._run_alter_col({}, {"nullable": False})
+
+    def test_modify_non_nullable_to_nullable(self):
+        self._run_alter_col({"nullable": False}, {"nullable": True})
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/util.py b/.venv/lib/python3.12/site-packages/alembic/testing/util.py
new file mode 100644
index 00000000..4517a69f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/util.py
@@ -0,0 +1,126 @@
+# testing/util.py
+# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+from __future__ import annotations
+
+import types
+from typing import Union
+
+from sqlalchemy.util import inspect_getfullargspec
+
+from ..util import sqla_2
+
+
+def flag_combinations(*combinations):
+    """A facade around @testing.combinations() oriented towards boolean
+    keyword-based arguments.
+
+    Basically generates a nice looking identifier based on the keywords
+    and also sets up the argument names.
+
+    E.g.::
+
+        @testing.flag_combinations(
+            dict(lazy=False, passive=False),
+            dict(lazy=True, passive=False),
+            dict(lazy=False, passive=True),
+            dict(lazy=False, passive=True, raiseload=True),
+        )
+
+
+    would result in::
+
+        @testing.combinations(
+            ('', False, False, False),
+            ('lazy', True, False, False),
+            ('lazy_passive', True, True, False),
+            ('lazy_passive', True, True, True),
+            id_='iaaa',
+            argnames='lazy,passive,raiseload'
+        )
+
+    """
+    from sqlalchemy.testing import config
+
+    keys = set()
+
+    for d in combinations:
+        keys.update(d)
+
+    keys = sorted(keys)
+
+    return config.combinations(
+        *[
+            ("_".join(k for k in keys if d.get(k, False)),)
+            + tuple(d.get(k, False) for k in keys)
+            for d in combinations
+        ],
+        id_="i" + ("a" * len(keys)),
+        argnames=",".join(keys),
+    )
+
+
+def resolve_lambda(__fn, **kw):
+    """Given a no-arg lambda and a namespace, return a new lambda that
+    has all the values filled in.
+
+    This is used so that we can have module-level fixtures that
+    refer to instance-level variables using lambdas.
+
+    """
+
+    pos_args = inspect_getfullargspec(__fn)[0]
+    pass_pos_args = {arg: kw.pop(arg) for arg in pos_args}
+    glb = dict(__fn.__globals__)
+    glb.update(kw)
+    new_fn = types.FunctionType(__fn.__code__, glb)
+    return new_fn(**pass_pos_args)
+
+
+def metadata_fixture(ddl="function"):
+    """Provide MetaData for a pytest fixture."""
+
+    from sqlalchemy.testing import config
+    from . import fixture_functions
+
+    def decorate(fn):
+        def run_ddl(self):
+            from sqlalchemy import schema
+
+            metadata = self.metadata = schema.MetaData()
+            try:
+                result = fn(self, metadata)
+                metadata.create_all(config.db)
+                # TODO:
+                # somehow get a per-function dml erase fixture here
+                yield result
+            finally:
+                metadata.drop_all(config.db)
+
+        return fixture_functions.fixture(scope=ddl)(run_ddl)
+
+    return decorate
+
+
+def _safe_int(value: str) -> Union[int, str]:
+    try:
+        return int(value)
+    except:
+        return value
+
+
+def testing_engine(url=None, options=None, future=False):
+    from sqlalchemy.testing import config
+    from sqlalchemy.testing.engines import testing_engine
+
+    if not future:
+        future = getattr(config._current.options, "future_engine", False)
+
+    if not sqla_2:
+        kw = {"future": future} if future else {}
+    else:
+        kw = {}
+    return testing_engine(url, options, **kw)
diff --git a/.venv/lib/python3.12/site-packages/alembic/testing/warnings.py b/.venv/lib/python3.12/site-packages/alembic/testing/warnings.py
new file mode 100644
index 00000000..86d45a0d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/testing/warnings.py
@@ -0,0 +1,31 @@
+# testing/warnings.py
+# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+
+import warnings
+
+from sqlalchemy import exc as sa_exc
+
+
+def setup_filters():
+    """Set global warning behavior for the test suite."""
+
+    warnings.resetwarnings()
+
+    warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning)
+    warnings.filterwarnings("error", category=sa_exc.SAWarning)
+
+    # some selected deprecations...
+    warnings.filterwarnings("error", category=DeprecationWarning)
+    try:
+        import pytest
+    except ImportError:
+        pass
+    else:
+        warnings.filterwarnings(
+            "once", category=pytest.PytestDeprecationWarning
+        )
diff --git a/.venv/lib/python3.12/site-packages/alembic/util/__init__.py b/.venv/lib/python3.12/site-packages/alembic/util/__init__.py
new file mode 100644
index 00000000..786baa2b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/util/__init__.py
@@ -0,0 +1,28 @@
+from .editor import open_in_editor as open_in_editor
+from .exc import AutogenerateDiffsDetected as AutogenerateDiffsDetected
+from .exc import CommandError as CommandError
+from .langhelpers import _with_legacy_names as _with_legacy_names
+from .langhelpers import asbool as asbool
+from .langhelpers import dedupe_tuple as dedupe_tuple
+from .langhelpers import Dispatcher as Dispatcher
+from .langhelpers import EMPTY_DICT as EMPTY_DICT
+from .langhelpers import immutabledict as immutabledict
+from .langhelpers import memoized_property as memoized_property
+from .langhelpers import ModuleClsProxy as ModuleClsProxy
+from .langhelpers import not_none as not_none
+from .langhelpers import rev_id as rev_id
+from .langhelpers import to_list as to_list
+from .langhelpers import to_tuple as to_tuple
+from .langhelpers import unique_list as unique_list
+from .messaging import err as err
+from .messaging import format_as_comma as format_as_comma
+from .messaging import msg as msg
+from .messaging import obfuscate_url_pw as obfuscate_url_pw
+from .messaging import status as status
+from .messaging import warn as warn
+from .messaging import write_outstream as write_outstream
+from .pyfiles import coerce_resource_to_filename as coerce_resource_to_filename
+from .pyfiles import load_python_file as load_python_file
+from .pyfiles import pyc_file_from_path as pyc_file_from_path
+from .pyfiles import template_to_file as template_to_file
+from .sqla_compat import sqla_2 as sqla_2
diff --git a/.venv/lib/python3.12/site-packages/alembic/util/compat.py b/.venv/lib/python3.12/site-packages/alembic/util/compat.py
new file mode 100644
index 00000000..fa8bc02b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/util/compat.py
@@ -0,0 +1,90 @@
+# mypy: no-warn-unused-ignores
+
+from __future__ import annotations
+
+from configparser import ConfigParser
+import io
+import os
+import sys
+import typing
+from typing import Any
+from typing import List
+from typing import Optional
+from typing import Sequence
+from typing import Union
+
+if True:
+    # zimports hack for too-long names
+    from sqlalchemy.util import (  # noqa: F401
+        inspect_getfullargspec as inspect_getfullargspec,
+    )
+    from sqlalchemy.util.compat import (  # noqa: F401
+        inspect_formatargspec as inspect_formatargspec,
+    )
+
+is_posix = os.name == "posix"
+
+py313 = sys.version_info >= (3, 13)
+py311 = sys.version_info >= (3, 11)
+py310 = sys.version_info >= (3, 10)
+py39 = sys.version_info >= (3, 9)
+
+
+# produce a wrapper that allows encoded text to stream
+# into a given buffer, but doesn't close it.
+# not sure of a more idiomatic approach to this.
+class EncodedIO(io.TextIOWrapper):
+    def close(self) -> None:
+        pass
+
+
+if py39:
+    from importlib import resources as _resources
+
+    importlib_resources = _resources
+    from importlib import metadata as _metadata
+
+    importlib_metadata = _metadata
+    from importlib.metadata import EntryPoint as EntryPoint
+else:
+    import importlib_resources  # type:ignore # noqa
+    import importlib_metadata  # type:ignore # noqa
+    from importlib_metadata import EntryPoint  # type:ignore # noqa
+
+
+def importlib_metadata_get(group: str) -> Sequence[EntryPoint]:
+    ep = importlib_metadata.entry_points()
+    if hasattr(ep, "select"):
+        return ep.select(group=group)
+    else:
+        return ep.get(group, ())  # type: ignore
+
+
+def formatannotation_fwdref(
+    annotation: Any, base_module: Optional[Any] = None
+) -> str:
+    """vendored from python 3.7"""
+    # copied over _formatannotation from sqlalchemy 2.0
+
+    if isinstance(annotation, str):
+        return annotation
+
+    if getattr(annotation, "__module__", None) == "typing":
+        return repr(annotation).replace("typing.", "").replace("~", "")
+    if isinstance(annotation, type):
+        if annotation.__module__ in ("builtins", base_module):
+            return repr(annotation.__qualname__)
+        return annotation.__module__ + "." + annotation.__qualname__
+    elif isinstance(annotation, typing.TypeVar):
+        return repr(annotation).replace("~", "")
+    return repr(annotation).replace("~", "")
+
+
+def read_config_parser(
+    file_config: ConfigParser,
+    file_argument: Sequence[Union[str, os.PathLike[str]]],
+) -> List[str]:
+    if py310:
+        return file_config.read(file_argument, encoding="locale")
+    else:
+        return file_config.read(file_argument)
diff --git a/.venv/lib/python3.12/site-packages/alembic/util/editor.py b/.venv/lib/python3.12/site-packages/alembic/util/editor.py
new file mode 100644
index 00000000..f1d1557f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/util/editor.py
@@ -0,0 +1,81 @@
+from __future__ import annotations
+
+import os
+from os.path import exists
+from os.path import join
+from os.path import splitext
+from subprocess import check_call
+from typing import Dict
+from typing import List
+from typing import Mapping
+from typing import Optional
+
+from .compat import is_posix
+from .exc import CommandError
+
+
+def open_in_editor(
+    filename: str, environ: Optional[Dict[str, str]] = None
+) -> None:
+    """
+    Opens the given file in a text editor. If the environment variable
+    ``EDITOR`` is set, this is taken as preference.
+
+    Otherwise, a list of commonly installed editors is tried.
+
+    If no editor matches, an :py:exc:`OSError` is raised.
+
+    :param filename: The filename to open. Will be passed  verbatim to the
+        editor command.
+    :param environ: An optional drop-in replacement for ``os.environ``. Used
+        mainly for testing.
+    """
+    env = os.environ if environ is None else environ
+    try:
+        editor = _find_editor(env)
+        check_call([editor, filename])
+    except Exception as exc:
+        raise CommandError("Error executing editor (%s)" % (exc,)) from exc
+
+
+def _find_editor(environ: Mapping[str, str]) -> str:
+    candidates = _default_editors()
+    for i, var in enumerate(("EDITOR", "VISUAL")):
+        if var in environ:
+            user_choice = environ[var]
+            if exists(user_choice):
+                return user_choice
+            if os.sep not in user_choice:
+                candidates.insert(i, user_choice)
+
+    for candidate in candidates:
+        path = _find_executable(candidate, environ)
+        if path is not None:
+            return path
+    raise OSError(
+        "No suitable editor found. Please set the "
+        '"EDITOR" or "VISUAL" environment variables'
+    )
+
+
+def _find_executable(
+    candidate: str, environ: Mapping[str, str]
+) -> Optional[str]:
+    # Assuming this is on the PATH, we need to determine it's absolute
+    # location. Otherwise, ``check_call`` will fail
+    if not is_posix and splitext(candidate)[1] != ".exe":
+        candidate += ".exe"
+    for path in environ.get("PATH", "").split(os.pathsep):
+        value = join(path, candidate)
+        if exists(value):
+            return value
+    return None
+
+
+def _default_editors() -> List[str]:
+    # Look for an editor. Prefer the user's choice by env-var, fall back to
+    # most commonly installed editor (nano/vim)
+    if is_posix:
+        return ["sensible-editor", "editor", "nano", "vim", "code"]
+    else:
+        return ["code.exe", "notepad++.exe", "notepad.exe"]
diff --git a/.venv/lib/python3.12/site-packages/alembic/util/exc.py b/.venv/lib/python3.12/site-packages/alembic/util/exc.py
new file mode 100644
index 00000000..c790e18a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/util/exc.py
@@ -0,0 +1,25 @@
+from __future__ import annotations
+
+from typing import Any
+from typing import List
+from typing import Tuple
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from alembic.autogenerate import RevisionContext
+
+
+class CommandError(Exception):
+    pass
+
+
+class AutogenerateDiffsDetected(CommandError):
+    def __init__(
+        self,
+        message: str,
+        revision_context: RevisionContext,
+        diffs: List[Tuple[Any, ...]],
+    ) -> None:
+        super().__init__(message)
+        self.revision_context = revision_context
+        self.diffs = diffs
diff --git a/.venv/lib/python3.12/site-packages/alembic/util/langhelpers.py b/.venv/lib/python3.12/site-packages/alembic/util/langhelpers.py
new file mode 100644
index 00000000..80d88cbc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/util/langhelpers.py
@@ -0,0 +1,332 @@
+from __future__ import annotations
+
+import collections
+from collections.abc import Iterable
+import textwrap
+from typing import Any
+from typing import Callable
+from typing import cast
+from typing import Dict
+from typing import List
+from typing import Mapping
+from typing import MutableMapping
+from typing import NoReturn
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import Set
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+import uuid
+import warnings
+
+from sqlalchemy.util import asbool as asbool  # noqa: F401
+from sqlalchemy.util import immutabledict as immutabledict  # noqa: F401
+from sqlalchemy.util import to_list as to_list  # noqa: F401
+from sqlalchemy.util import unique_list as unique_list
+
+from .compat import inspect_getfullargspec
+
+if True:
+    # zimports workaround :(
+    from sqlalchemy.util import (  # noqa: F401
+        memoized_property as memoized_property,
+    )
+
+
+EMPTY_DICT: Mapping[Any, Any] = immutabledict()
+_T = TypeVar("_T", bound=Any)
+
+_C = TypeVar("_C", bound=Callable[..., Any])
+
+
+class _ModuleClsMeta(type):
+    def __setattr__(cls, key: str, value: Callable[..., Any]) -> None:
+        super().__setattr__(key, value)
+        cls._update_module_proxies(key)  # type: ignore
+
+
+class ModuleClsProxy(metaclass=_ModuleClsMeta):
+    """Create module level proxy functions for the
+    methods on a given class.
+
+    The functions will have a compatible signature
+    as the methods.
+
+    """
+
+    _setups: Dict[
+        Type[Any],
+        Tuple[
+            Set[str],
+            List[Tuple[MutableMapping[str, Any], MutableMapping[str, Any]]],
+        ],
+    ] = collections.defaultdict(lambda: (set(), []))
+
+    @classmethod
+    def _update_module_proxies(cls, name: str) -> None:
+        attr_names, modules = cls._setups[cls]
+        for globals_, locals_ in modules:
+            cls._add_proxied_attribute(name, globals_, locals_, attr_names)
+
+    def _install_proxy(self) -> None:
+        attr_names, modules = self._setups[self.__class__]
+        for globals_, locals_ in modules:
+            globals_["_proxy"] = self
+            for attr_name in attr_names:
+                globals_[attr_name] = getattr(self, attr_name)
+
+    def _remove_proxy(self) -> None:
+        attr_names, modules = self._setups[self.__class__]
+        for globals_, locals_ in modules:
+            globals_["_proxy"] = None
+            for attr_name in attr_names:
+                del globals_[attr_name]
+
+    @classmethod
+    def create_module_class_proxy(
+        cls,
+        globals_: MutableMapping[str, Any],
+        locals_: MutableMapping[str, Any],
+    ) -> None:
+        attr_names, modules = cls._setups[cls]
+        modules.append((globals_, locals_))
+        cls._setup_proxy(globals_, locals_, attr_names)
+
+    @classmethod
+    def _setup_proxy(
+        cls,
+        globals_: MutableMapping[str, Any],
+        locals_: MutableMapping[str, Any],
+        attr_names: Set[str],
+    ) -> None:
+        for methname in dir(cls):
+            cls._add_proxied_attribute(methname, globals_, locals_, attr_names)
+
+    @classmethod
+    def _add_proxied_attribute(
+        cls,
+        methname: str,
+        globals_: MutableMapping[str, Any],
+        locals_: MutableMapping[str, Any],
+        attr_names: Set[str],
+    ) -> None:
+        if not methname.startswith("_"):
+            meth = getattr(cls, methname)
+            if callable(meth):
+                locals_[methname] = cls._create_method_proxy(
+                    methname, globals_, locals_
+                )
+            else:
+                attr_names.add(methname)
+
+    @classmethod
+    def _create_method_proxy(
+        cls,
+        name: str,
+        globals_: MutableMapping[str, Any],
+        locals_: MutableMapping[str, Any],
+    ) -> Callable[..., Any]:
+        fn = getattr(cls, name)
+
+        def _name_error(name: str, from_: Exception) -> NoReturn:
+            raise NameError(
+                "Can't invoke function '%s', as the proxy object has "
+                "not yet been "
+                "established for the Alembic '%s' class.  "
+                "Try placing this code inside a callable."
+                % (name, cls.__name__)
+            ) from from_
+
+        globals_["_name_error"] = _name_error
+
+        translations = getattr(fn, "_legacy_translations", [])
+        if translations:
+            spec = inspect_getfullargspec(fn)
+            if spec[0] and spec[0][0] == "self":
+                spec[0].pop(0)
+
+            outer_args = inner_args = "*args, **kw"
+            translate_str = "args, kw = _translate(%r, %r, %r, args, kw)" % (
+                fn.__name__,
+                tuple(spec),
+                translations,
+            )
+
+            def translate(
+                fn_name: str, spec: Any, translations: Any, args: Any, kw: Any
+            ) -> Any:
+                return_kw = {}
+                return_args = []
+
+                for oldname, newname in translations:
+                    if oldname in kw:
+                        warnings.warn(
+                            "Argument %r is now named %r "
+                            "for method %s()." % (oldname, newname, fn_name)
+                        )
+                        return_kw[newname] = kw.pop(oldname)
+                return_kw.update(kw)
+
+                args = list(args)
+                if spec[3]:
+                    pos_only = spec[0][: -len(spec[3])]
+                else:
+                    pos_only = spec[0]
+                for arg in pos_only:
+                    if arg not in return_kw:
+                        try:
+                            return_args.append(args.pop(0))
+                        except IndexError:
+                            raise TypeError(
+                                "missing required positional argument: %s"
+                                % arg
+                            )
+                return_args.extend(args)
+
+                return return_args, return_kw
+
+            globals_["_translate"] = translate
+        else:
+            outer_args = "*args, **kw"
+            inner_args = "*args, **kw"
+            translate_str = ""
+
+        func_text = textwrap.dedent(
+            """\
+        def %(name)s(%(args)s):
+            %(doc)r
+            %(translate)s
+            try:
+                p = _proxy
+            except NameError as ne:
+                _name_error('%(name)s', ne)
+            return _proxy.%(name)s(%(apply_kw)s)
+            e
+        """
+            % {
+                "name": name,
+                "translate": translate_str,
+                "args": outer_args,
+                "apply_kw": inner_args,
+                "doc": fn.__doc__,
+            }
+        )
+        lcl: MutableMapping[str, Any] = {}
+
+        exec(func_text, cast("Dict[str, Any]", globals_), lcl)
+        return cast("Callable[..., Any]", lcl[name])
+
+
+def _with_legacy_names(translations: Any) -> Any:
+    def decorate(fn: _C) -> _C:
+        fn._legacy_translations = translations  # type: ignore[attr-defined]
+        return fn
+
+    return decorate
+
+
+def rev_id() -> str:
+    return uuid.uuid4().hex[-12:]
+
+
+@overload
+def to_tuple(x: Any, default: Tuple[Any, ...]) -> Tuple[Any, ...]: ...
+
+
+@overload
+def to_tuple(x: None, default: Optional[_T] = ...) -> _T: ...
+
+
+@overload
+def to_tuple(
+    x: Any, default: Optional[Tuple[Any, ...]] = None
+) -> Tuple[Any, ...]: ...
+
+
+def to_tuple(
+    x: Any, default: Optional[Tuple[Any, ...]] = None
+) -> Optional[Tuple[Any, ...]]:
+    if x is None:
+        return default
+    elif isinstance(x, str):
+        return (x,)
+    elif isinstance(x, Iterable):
+        return tuple(x)
+    else:
+        return (x,)
+
+
+def dedupe_tuple(tup: Tuple[str, ...]) -> Tuple[str, ...]:
+    return tuple(unique_list(tup))
+
+
+class Dispatcher:
+    def __init__(self, uselist: bool = False) -> None:
+        self._registry: Dict[Tuple[Any, ...], Any] = {}
+        self.uselist = uselist
+
+    def dispatch_for(
+        self, target: Any, qualifier: str = "default"
+    ) -> Callable[[_C], _C]:
+        def decorate(fn: _C) -> _C:
+            if self.uselist:
+                self._registry.setdefault((target, qualifier), []).append(fn)
+            else:
+                assert (target, qualifier) not in self._registry
+                self._registry[(target, qualifier)] = fn
+            return fn
+
+        return decorate
+
+    def dispatch(self, obj: Any, qualifier: str = "default") -> Any:
+        if isinstance(obj, str):
+            targets: Sequence[Any] = [obj]
+        elif isinstance(obj, type):
+            targets = obj.__mro__
+        else:
+            targets = type(obj).__mro__
+
+        for spcls in targets:
+            if qualifier != "default" and (spcls, qualifier) in self._registry:
+                return self._fn_or_list(self._registry[(spcls, qualifier)])
+            elif (spcls, "default") in self._registry:
+                return self._fn_or_list(self._registry[(spcls, "default")])
+        else:
+            raise ValueError("no dispatch function for object: %s" % obj)
+
+    def _fn_or_list(
+        self, fn_or_list: Union[List[Callable[..., Any]], Callable[..., Any]]
+    ) -> Callable[..., Any]:
+        if self.uselist:
+
+            def go(*arg: Any, **kw: Any) -> None:
+                if TYPE_CHECKING:
+                    assert isinstance(fn_or_list, Sequence)
+                for fn in fn_or_list:
+                    fn(*arg, **kw)
+
+            return go
+        else:
+            return fn_or_list  # type: ignore
+
+    def branch(self) -> Dispatcher:
+        """Return a copy of this dispatcher that is independently
+        writable."""
+
+        d = Dispatcher()
+        if self.uselist:
+            d._registry.update(
+                (k, [fn for fn in self._registry[k]]) for k in self._registry
+            )
+        else:
+            d._registry.update(self._registry)
+        return d
+
+
+def not_none(value: Optional[_T]) -> _T:
+    assert value is not None
+    return value
diff --git a/.venv/lib/python3.12/site-packages/alembic/util/messaging.py b/.venv/lib/python3.12/site-packages/alembic/util/messaging.py
new file mode 100644
index 00000000..a2dbefa6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/util/messaging.py
@@ -0,0 +1,118 @@
+from __future__ import annotations
+
+from collections.abc import Iterable
+from contextlib import contextmanager
+import logging
+import sys
+import textwrap
+from typing import Iterator
+from typing import Optional
+from typing import TextIO
+from typing import Union
+import warnings
+
+from sqlalchemy.engine import url
+
+log = logging.getLogger(__name__)
+
+# disable "no handler found" errors
+logging.getLogger("alembic").addHandler(logging.NullHandler())
+
+
+try:
+    import fcntl
+    import termios
+    import struct
+
+    ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0))
+    _h, TERMWIDTH, _hp, _wp = struct.unpack("HHHH", ioctl)
+    if TERMWIDTH <= 0:  # can occur if running in emacs pseudo-tty
+        TERMWIDTH = None
+except (ImportError, OSError):
+    TERMWIDTH = None
+
+
+def write_outstream(
+    stream: TextIO, *text: Union[str, bytes], quiet: bool = False
+) -> None:
+    if quiet:
+        return
+    encoding = getattr(stream, "encoding", "ascii") or "ascii"
+    for t in text:
+        if not isinstance(t, bytes):
+            t = t.encode(encoding, "replace")
+        t = t.decode(encoding)
+        try:
+            stream.write(t)
+        except OSError:
+            # suppress "broken pipe" errors.
+            # no known way to handle this on Python 3 however
+            # as the exception is "ignored" (noisily) in TextIOWrapper.
+            break
+
+
+@contextmanager
+def status(
+    status_msg: str, newline: bool = False, quiet: bool = False
+) -> Iterator[None]:
+    msg(status_msg + " ...", newline, flush=True, quiet=quiet)
+    try:
+        yield
+    except:
+        if not quiet:
+            write_outstream(sys.stdout, "  FAILED\n")
+        raise
+    else:
+        if not quiet:
+            write_outstream(sys.stdout, "  done\n")
+
+
+def err(message: str, quiet: bool = False) -> None:
+    log.error(message)
+    msg(f"FAILED: {message}", quiet=quiet)
+    sys.exit(-1)
+
+
+def obfuscate_url_pw(input_url: str) -> str:
+    return url.make_url(input_url).render_as_string(hide_password=True)
+
+
+def warn(msg: str, stacklevel: int = 2) -> None:
+    warnings.warn(msg, UserWarning, stacklevel=stacklevel)
+
+
+def msg(
+    msg: str, newline: bool = True, flush: bool = False, quiet: bool = False
+) -> None:
+    if quiet:
+        return
+    if TERMWIDTH is None:
+        write_outstream(sys.stdout, msg)
+        if newline:
+            write_outstream(sys.stdout, "\n")
+    else:
+        # left indent output lines
+        indent = "  "
+        lines = textwrap.wrap(
+            msg,
+            TERMWIDTH,
+            initial_indent=indent,
+            subsequent_indent=indent,
+        )
+        if len(lines) > 1:
+            for line in lines[0:-1]:
+                write_outstream(sys.stdout, line, "\n")
+        write_outstream(sys.stdout, lines[-1], ("\n" if newline else ""))
+    if flush:
+        sys.stdout.flush()
+
+
+def format_as_comma(value: Optional[Union[str, Iterable[str]]]) -> str:
+    if value is None:
+        return ""
+    elif isinstance(value, str):
+        return value
+    elif isinstance(value, Iterable):
+        return ", ".join(value)
+    else:
+        raise ValueError("Don't know how to comma-format %r" % value)
diff --git a/.venv/lib/python3.12/site-packages/alembic/util/pyfiles.py b/.venv/lib/python3.12/site-packages/alembic/util/pyfiles.py
new file mode 100644
index 00000000..973bd458
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/util/pyfiles.py
@@ -0,0 +1,114 @@
+from __future__ import annotations
+
+import atexit
+from contextlib import ExitStack
+import importlib
+import importlib.machinery
+import importlib.util
+import os
+import re
+import tempfile
+from types import ModuleType
+from typing import Any
+from typing import Optional
+
+from mako import exceptions
+from mako.template import Template
+
+from . import compat
+from .exc import CommandError
+
+
+def template_to_file(
+    template_file: str, dest: str, output_encoding: str, **kw: Any
+) -> None:
+    template = Template(filename=template_file)
+    try:
+        output = template.render_unicode(**kw).encode(output_encoding)
+    except:
+        with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as ntf:
+            ntf.write(
+                exceptions.text_error_template()
+                .render_unicode()
+                .encode(output_encoding)
+            )
+            fname = ntf.name
+        raise CommandError(
+            "Template rendering failed; see %s for a "
+            "template-oriented traceback." % fname
+        )
+    else:
+        with open(dest, "wb") as f:
+            f.write(output)
+
+
+def coerce_resource_to_filename(fname: str) -> str:
+    """Interpret a filename as either a filesystem location or as a package
+    resource.
+
+    Names that are non absolute paths and contain a colon
+    are interpreted as resources and coerced to a file location.
+
+    """
+    if not os.path.isabs(fname) and ":" in fname:
+        tokens = fname.split(":")
+
+        # from https://importlib-resources.readthedocs.io/en/latest/migration.html#pkg-resources-resource-filename  # noqa E501
+
+        file_manager = ExitStack()
+        atexit.register(file_manager.close)
+
+        ref = compat.importlib_resources.files(tokens[0])
+        for tok in tokens[1:]:
+            ref = ref / tok
+        fname = file_manager.enter_context(  # type: ignore[assignment]
+            compat.importlib_resources.as_file(ref)
+        )
+    return fname
+
+
+def pyc_file_from_path(path: str) -> Optional[str]:
+    """Given a python source path, locate the .pyc."""
+
+    candidate = importlib.util.cache_from_source(path)
+    if os.path.exists(candidate):
+        return candidate
+
+    # even for pep3147, fall back to the old way of finding .pyc files,
+    # to support sourceless operation
+    filepath, ext = os.path.splitext(path)
+    for ext in importlib.machinery.BYTECODE_SUFFIXES:
+        if os.path.exists(filepath + ext):
+            return filepath + ext
+    else:
+        return None
+
+
+def load_python_file(dir_: str, filename: str) -> ModuleType:
+    """Load a file from the given path as a Python module."""
+
+    module_id = re.sub(r"\W", "_", filename)
+    path = os.path.join(dir_, filename)
+    _, ext = os.path.splitext(filename)
+    if ext == ".py":
+        if os.path.exists(path):
+            module = load_module_py(module_id, path)
+        else:
+            pyc_path = pyc_file_from_path(path)
+            if pyc_path is None:
+                raise ImportError("Can't find Python file %s" % path)
+            else:
+                module = load_module_py(module_id, pyc_path)
+    elif ext in (".pyc", ".pyo"):
+        module = load_module_py(module_id, path)
+    else:
+        assert False
+    return module
+
+
+def load_module_py(module_id: str, path: str) -> ModuleType:
+    spec = importlib.util.spec_from_file_location(module_id, path)
+    assert spec
+    module = importlib.util.module_from_spec(spec)
+    spec.loader.exec_module(module)  # type: ignore
+    return module
diff --git a/.venv/lib/python3.12/site-packages/alembic/util/sqla_compat.py b/.venv/lib/python3.12/site-packages/alembic/util/sqla_compat.py
new file mode 100644
index 00000000..a427d3c8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/alembic/util/sqla_compat.py
@@ -0,0 +1,497 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import contextlib
+import re
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import Iterable
+from typing import Iterator
+from typing import Optional
+from typing import Protocol
+from typing import Set
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy import __version__
+from sqlalchemy import schema
+from sqlalchemy import sql
+from sqlalchemy import types as sqltypes
+from sqlalchemy.schema import CheckConstraint
+from sqlalchemy.schema import Column
+from sqlalchemy.schema import ForeignKeyConstraint
+from sqlalchemy.sql import visitors
+from sqlalchemy.sql.base import DialectKWArgs
+from sqlalchemy.sql.elements import BindParameter
+from sqlalchemy.sql.elements import ColumnClause
+from sqlalchemy.sql.elements import TextClause
+from sqlalchemy.sql.elements import UnaryExpression
+from sqlalchemy.sql.visitors import traverse
+from typing_extensions import TypeGuard
+
+if True:
+    from sqlalchemy.sql.naming import _NONE_NAME as _NONE_NAME  # type: ignore[attr-defined] # noqa: E501
+
+if TYPE_CHECKING:
+    from sqlalchemy import ClauseElement
+    from sqlalchemy import Identity
+    from sqlalchemy import Index
+    from sqlalchemy import Table
+    from sqlalchemy.engine import Connection
+    from sqlalchemy.engine import Dialect
+    from sqlalchemy.engine import Transaction
+    from sqlalchemy.sql.base import ColumnCollection
+    from sqlalchemy.sql.compiler import SQLCompiler
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.schema import SchemaItem
+
+_CE = TypeVar("_CE", bound=Union["ColumnElement[Any]", "SchemaItem"])
+
+
+class _CompilerProtocol(Protocol):
+    def __call__(self, element: Any, compiler: Any, **kw: Any) -> str: ...
+
+
+def _safe_int(value: str) -> Union[int, str]:
+    try:
+        return int(value)
+    except:
+        return value
+
+
+_vers = tuple(
+    [_safe_int(x) for x in re.findall(r"(\d+|[abc]\d)", __version__)]
+)
+# https://docs.sqlalchemy.org/en/latest/changelog/changelog_14.html#change-0c6e0cc67dfe6fac5164720e57ef307d
+sqla_14_18 = _vers >= (1, 4, 18)
+sqla_14_26 = _vers >= (1, 4, 26)
+sqla_2 = _vers >= (2,)
+sqlalchemy_version = __version__
+
+if TYPE_CHECKING:
+
+    def compiles(
+        element: Type[ClauseElement], *dialects: str
+    ) -> Callable[[_CompilerProtocol], _CompilerProtocol]: ...
+
+else:
+    from sqlalchemy.ext.compiler import compiles
+
+
+identity_has_dialect_kwargs = issubclass(schema.Identity, DialectKWArgs)
+
+
+def _get_identity_options_dict(
+    identity: Union[Identity, schema.Sequence, None],
+    dialect_kwargs: bool = False,
+) -> Dict[str, Any]:
+    if identity is None:
+        return {}
+    elif identity_has_dialect_kwargs:
+        assert hasattr(identity, "_as_dict")
+        as_dict = identity._as_dict()
+        if dialect_kwargs:
+            assert isinstance(identity, DialectKWArgs)
+            as_dict.update(identity.dialect_kwargs)
+    else:
+        as_dict = {}
+        if isinstance(identity, schema.Identity):
+            # always=None means something different than always=False
+            as_dict["always"] = identity.always
+            if identity.on_null is not None:
+                as_dict["on_null"] = identity.on_null
+        # attributes common to Identity and Sequence
+        attrs = (
+            "start",
+            "increment",
+            "minvalue",
+            "maxvalue",
+            "nominvalue",
+            "nomaxvalue",
+            "cycle",
+            "cache",
+            "order",
+        )
+        as_dict.update(
+            {
+                key: getattr(identity, key, None)
+                for key in attrs
+                if getattr(identity, key, None) is not None
+            }
+        )
+    return as_dict
+
+
+if sqla_2:
+    from sqlalchemy.sql.base import _NoneName
+else:
+    from sqlalchemy.util import symbol as _NoneName  # type: ignore[assignment]
+
+
+_ConstraintName = Union[None, str, _NoneName]
+_ConstraintNameDefined = Union[str, _NoneName]
+
+
+def constraint_name_defined(
+    name: _ConstraintName,
+) -> TypeGuard[_ConstraintNameDefined]:
+    return name is _NONE_NAME or isinstance(name, (str, _NoneName))
+
+
+def constraint_name_string(name: _ConstraintName) -> TypeGuard[str]:
+    return isinstance(name, str)
+
+
+def constraint_name_or_none(name: _ConstraintName) -> Optional[str]:
+    return name if constraint_name_string(name) else None
+
+
+AUTOINCREMENT_DEFAULT = "auto"
+
+
+@contextlib.contextmanager
+def _ensure_scope_for_ddl(
+    connection: Optional[Connection],
+) -> Iterator[None]:
+    try:
+        in_transaction = connection.in_transaction  # type: ignore[union-attr]
+    except AttributeError:
+        # catch for MockConnection, None
+        in_transaction = None
+        pass
+
+    # yield outside the catch
+    if in_transaction is None:
+        yield
+    else:
+        if not in_transaction():
+            assert connection is not None
+            with connection.begin():
+                yield
+        else:
+            yield
+
+
+def _safe_begin_connection_transaction(
+    connection: Connection,
+) -> Transaction:
+    transaction = connection.get_transaction()
+    if transaction:
+        return transaction
+    else:
+        return connection.begin()
+
+
+def _safe_commit_connection_transaction(
+    connection: Connection,
+) -> None:
+    transaction = connection.get_transaction()
+    if transaction:
+        transaction.commit()
+
+
+def _safe_rollback_connection_transaction(
+    connection: Connection,
+) -> None:
+    transaction = connection.get_transaction()
+    if transaction:
+        transaction.rollback()
+
+
+def _get_connection_in_transaction(connection: Optional[Connection]) -> bool:
+    try:
+        in_transaction = connection.in_transaction  # type: ignore
+    except AttributeError:
+        # catch for MockConnection
+        return False
+    else:
+        return in_transaction()
+
+
+def _idx_table_bound_expressions(idx: Index) -> Iterable[ColumnElement[Any]]:
+    return idx.expressions  # type: ignore
+
+
+def _copy(schema_item: _CE, **kw) -> _CE:
+    if hasattr(schema_item, "_copy"):
+        return schema_item._copy(**kw)
+    else:
+        return schema_item.copy(**kw)  # type: ignore[union-attr]
+
+
+def _connectable_has_table(
+    connectable: Connection, tablename: str, schemaname: Union[str, None]
+) -> bool:
+    return connectable.dialect.has_table(connectable, tablename, schemaname)
+
+
+def _exec_on_inspector(inspector, statement, **params):
+    with inspector._operation_context() as conn:
+        return conn.execute(statement, params)
+
+
+def _nullability_might_be_unset(metadata_column):
+    from sqlalchemy.sql import schema
+
+    return metadata_column._user_defined_nullable is schema.NULL_UNSPECIFIED
+
+
+def _server_default_is_computed(*server_default) -> bool:
+    return any(isinstance(sd, schema.Computed) for sd in server_default)
+
+
+def _server_default_is_identity(*server_default) -> bool:
+    return any(isinstance(sd, schema.Identity) for sd in server_default)
+
+
+def _table_for_constraint(constraint: Constraint) -> Table:
+    if isinstance(constraint, ForeignKeyConstraint):
+        table = constraint.parent
+        assert table is not None
+        return table  # type: ignore[return-value]
+    else:
+        return constraint.table
+
+
+def _columns_for_constraint(constraint):
+    if isinstance(constraint, ForeignKeyConstraint):
+        return [fk.parent for fk in constraint.elements]
+    elif isinstance(constraint, CheckConstraint):
+        return _find_columns(constraint.sqltext)
+    else:
+        return list(constraint.columns)
+
+
+def _resolve_for_variant(type_, dialect):
+    if _type_has_variants(type_):
+        base_type, mapping = _get_variant_mapping(type_)
+        return mapping.get(dialect.name, base_type)
+    else:
+        return type_
+
+
+if hasattr(sqltypes.TypeEngine, "_variant_mapping"):  # 2.0
+
+    def _type_has_variants(type_):
+        return bool(type_._variant_mapping)
+
+    def _get_variant_mapping(type_):
+        return type_, type_._variant_mapping
+
+else:
+
+    def _type_has_variants(type_):
+        return type(type_) is sqltypes.Variant
+
+    def _get_variant_mapping(type_):
+        return type_.impl, type_.mapping
+
+
+def _fk_spec(constraint: ForeignKeyConstraint) -> Any:
+    if TYPE_CHECKING:
+        assert constraint.columns is not None
+        assert constraint.elements is not None
+        assert isinstance(constraint.parent, Table)
+
+    source_columns = [
+        constraint.columns[key].name for key in constraint.column_keys
+    ]
+
+    source_table = constraint.parent.name
+    source_schema = constraint.parent.schema
+    target_schema = constraint.elements[0].column.table.schema
+    target_table = constraint.elements[0].column.table.name
+    target_columns = [element.column.name for element in constraint.elements]
+    ondelete = constraint.ondelete
+    onupdate = constraint.onupdate
+    deferrable = constraint.deferrable
+    initially = constraint.initially
+    return (
+        source_schema,
+        source_table,
+        source_columns,
+        target_schema,
+        target_table,
+        target_columns,
+        onupdate,
+        ondelete,
+        deferrable,
+        initially,
+    )
+
+
+def _fk_is_self_referential(constraint: ForeignKeyConstraint) -> bool:
+    spec = constraint.elements[0]._get_colspec()
+    tokens = spec.split(".")
+    tokens.pop(-1)  # colname
+    tablekey = ".".join(tokens)
+    assert constraint.parent is not None
+    return tablekey == constraint.parent.key
+
+
+def _is_type_bound(constraint: Constraint) -> bool:
+    # this deals with SQLAlchemy #3260, don't copy CHECK constraints
+    # that will be generated by the type.
+    # new feature added for #3260
+    return constraint._type_bound
+
+
+def _find_columns(clause):
+    """locate Column objects within the given expression."""
+
+    cols: Set[ColumnElement[Any]] = set()
+    traverse(clause, {}, {"column": cols.add})
+    return cols
+
+
+def _remove_column_from_collection(
+    collection: ColumnCollection, column: Union[Column[Any], ColumnClause[Any]]
+) -> None:
+    """remove a column from a ColumnCollection."""
+
+    # workaround for older SQLAlchemy, remove the
+    # same object that's present
+    assert column.key is not None
+    to_remove = collection[column.key]
+
+    # SQLAlchemy 2.0 will use more ReadOnlyColumnCollection
+    # (renamed from ImmutableColumnCollection)
+    if hasattr(collection, "_immutable") or hasattr(collection, "_readonly"):
+        collection._parent.remove(to_remove)
+    else:
+        collection.remove(to_remove)
+
+
+def _textual_index_column(
+    table: Table, text_: Union[str, TextClause, ColumnElement[Any]]
+) -> Union[ColumnElement[Any], Column[Any]]:
+    """a workaround for the Index construct's severe lack of flexibility"""
+    if isinstance(text_, str):
+        c = Column(text_, sqltypes.NULLTYPE)
+        table.append_column(c)
+        return c
+    elif isinstance(text_, TextClause):
+        return _textual_index_element(table, text_)
+    elif isinstance(text_, _textual_index_element):
+        return _textual_index_column(table, text_.text)
+    elif isinstance(text_, sql.ColumnElement):
+        return _copy_expression(text_, table)
+    else:
+        raise ValueError("String or text() construct expected")
+
+
+def _copy_expression(expression: _CE, target_table: Table) -> _CE:
+    def replace(col):
+        if (
+            isinstance(col, Column)
+            and col.table is not None
+            and col.table is not target_table
+        ):
+            if col.name in target_table.c:
+                return target_table.c[col.name]
+            else:
+                c = _copy(col)
+                target_table.append_column(c)
+                return c
+        else:
+            return None
+
+    return visitors.replacement_traverse(  # type: ignore[call-overload]
+        expression, {}, replace
+    )
+
+
+class _textual_index_element(sql.ColumnElement):
+    """Wrap around a sqlalchemy text() construct in such a way that
+    we appear like a column-oriented SQL expression to an Index
+    construct.
+
+    The issue here is that currently the Postgresql dialect, the biggest
+    recipient of functional indexes, keys all the index expressions to
+    the corresponding column expressions when rendering CREATE INDEX,
+    so the Index we create here needs to have a .columns collection that
+    is the same length as the .expressions collection.  Ultimately
+    SQLAlchemy should support text() expressions in indexes.
+
+    See SQLAlchemy issue 3174.
+
+    """
+
+    __visit_name__ = "_textual_idx_element"
+
+    def __init__(self, table: Table, text: TextClause) -> None:
+        self.table = table
+        self.text = text
+        self.key = text.text
+        self.fake_column = schema.Column(self.text.text, sqltypes.NULLTYPE)
+        table.append_column(self.fake_column)
+
+    def get_children(self, **kw):
+        return [self.fake_column]
+
+
+@compiles(_textual_index_element)
+def _render_textual_index_column(
+    element: _textual_index_element, compiler: SQLCompiler, **kw
+) -> str:
+    return compiler.process(element.text, **kw)
+
+
+class _literal_bindparam(BindParameter):
+    pass
+
+
+@compiles(_literal_bindparam)
+def _render_literal_bindparam(
+    element: _literal_bindparam, compiler: SQLCompiler, **kw
+) -> str:
+    return compiler.render_literal_bindparam(element, **kw)
+
+
+def _get_constraint_final_name(
+    constraint: Union[Index, Constraint], dialect: Optional[Dialect]
+) -> Optional[str]:
+    if constraint.name is None:
+        return None
+    assert dialect is not None
+    # for SQLAlchemy 1.4 we would like to have the option to expand
+    # the use of "deferred" names for constraints as well as to have
+    # some flexibility with "None" name and similar; make use of new
+    # SQLAlchemy API to return what would be the final compiled form of
+    # the name for this dialect.
+    return dialect.identifier_preparer.format_constraint(
+        constraint, _alembic_quote=False
+    )
+
+
+def _constraint_is_named(
+    constraint: Union[Constraint, Index], dialect: Optional[Dialect]
+) -> bool:
+    if constraint.name is None:
+        return False
+    assert dialect is not None
+    name = dialect.identifier_preparer.format_constraint(
+        constraint, _alembic_quote=False
+    )
+    return name is not None
+
+
+def is_expression_index(index: Index) -> bool:
+    for expr in index.expressions:
+        if is_expression(expr):
+            return True
+    return False
+
+
+def is_expression(expr: Any) -> bool:
+    while isinstance(expr, UnaryExpression):
+        expr = expr.element
+    if not isinstance(expr, ColumnClause) or expr.is_literal:
+        return True
+    return False