about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql')
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/__init__.py167
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/_psycopg_common.py187
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/array.py435
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/asyncpg.py1287
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/base.py5041
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/dml.py339
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/ext.py501
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/hstore.py406
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/json.py367
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/named_types.py505
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/operators.py129
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/pg8000.py666
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/pg_catalog.py300
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/provision.py175
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg.py783
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py892
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg2cffi.py61
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/ranges.py1031
-rw-r--r--.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/types.py313
19 files changed, 13585 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/__init__.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/__init__.py
new file mode 100644
index 00000000..88935e20
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/__init__.py
@@ -0,0 +1,167 @@
+# dialects/postgresql/__init__.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+from types import ModuleType
+
+from . import array as arraylib  # noqa # keep above base and other dialects
+from . import asyncpg  # noqa
+from . import base
+from . import pg8000  # noqa
+from . import psycopg  # noqa
+from . import psycopg2  # noqa
+from . import psycopg2cffi  # noqa
+from .array import All
+from .array import Any
+from .array import ARRAY
+from .array import array
+from .base import BIGINT
+from .base import BOOLEAN
+from .base import CHAR
+from .base import DATE
+from .base import DOMAIN
+from .base import DOUBLE_PRECISION
+from .base import FLOAT
+from .base import INTEGER
+from .base import NUMERIC
+from .base import REAL
+from .base import SMALLINT
+from .base import TEXT
+from .base import UUID
+from .base import VARCHAR
+from .dml import Insert
+from .dml import insert
+from .ext import aggregate_order_by
+from .ext import array_agg
+from .ext import ExcludeConstraint
+from .ext import phraseto_tsquery
+from .ext import plainto_tsquery
+from .ext import to_tsquery
+from .ext import to_tsvector
+from .ext import ts_headline
+from .ext import websearch_to_tsquery
+from .hstore import HSTORE
+from .hstore import hstore
+from .json import JSON
+from .json import JSONB
+from .json import JSONPATH
+from .named_types import CreateDomainType
+from .named_types import CreateEnumType
+from .named_types import DropDomainType
+from .named_types import DropEnumType
+from .named_types import ENUM
+from .named_types import NamedType
+from .ranges import AbstractMultiRange
+from .ranges import AbstractRange
+from .ranges import AbstractSingleRange
+from .ranges import DATEMULTIRANGE
+from .ranges import DATERANGE
+from .ranges import INT4MULTIRANGE
+from .ranges import INT4RANGE
+from .ranges import INT8MULTIRANGE
+from .ranges import INT8RANGE
+from .ranges import MultiRange
+from .ranges import NUMMULTIRANGE
+from .ranges import NUMRANGE
+from .ranges import Range
+from .ranges import TSMULTIRANGE
+from .ranges import TSRANGE
+from .ranges import TSTZMULTIRANGE
+from .ranges import TSTZRANGE
+from .types import BIT
+from .types import BYTEA
+from .types import CIDR
+from .types import CITEXT
+from .types import INET
+from .types import INTERVAL
+from .types import MACADDR
+from .types import MACADDR8
+from .types import MONEY
+from .types import OID
+from .types import REGCLASS
+from .types import REGCONFIG
+from .types import TIME
+from .types import TIMESTAMP
+from .types import TSQUERY
+from .types import TSVECTOR
+
+
+# Alias psycopg also as psycopg_async
+psycopg_async = type(
+    "psycopg_async", (ModuleType,), {"dialect": psycopg.dialect_async}
+)
+
+base.dialect = dialect = psycopg2.dialect
+
+
+__all__ = (
+    "INTEGER",
+    "BIGINT",
+    "SMALLINT",
+    "VARCHAR",
+    "CHAR",
+    "TEXT",
+    "NUMERIC",
+    "FLOAT",
+    "REAL",
+    "INET",
+    "CIDR",
+    "CITEXT",
+    "UUID",
+    "BIT",
+    "MACADDR",
+    "MACADDR8",
+    "MONEY",
+    "OID",
+    "REGCLASS",
+    "REGCONFIG",
+    "TSQUERY",
+    "TSVECTOR",
+    "DOUBLE_PRECISION",
+    "TIMESTAMP",
+    "TIME",
+    "DATE",
+    "BYTEA",
+    "BOOLEAN",
+    "INTERVAL",
+    "ARRAY",
+    "ENUM",
+    "DOMAIN",
+    "dialect",
+    "array",
+    "HSTORE",
+    "hstore",
+    "INT4RANGE",
+    "INT8RANGE",
+    "NUMRANGE",
+    "DATERANGE",
+    "INT4MULTIRANGE",
+    "INT8MULTIRANGE",
+    "NUMMULTIRANGE",
+    "DATEMULTIRANGE",
+    "TSVECTOR",
+    "TSRANGE",
+    "TSTZRANGE",
+    "TSMULTIRANGE",
+    "TSTZMULTIRANGE",
+    "JSON",
+    "JSONB",
+    "JSONPATH",
+    "Any",
+    "All",
+    "DropEnumType",
+    "DropDomainType",
+    "CreateDomainType",
+    "NamedType",
+    "CreateEnumType",
+    "ExcludeConstraint",
+    "Range",
+    "aggregate_order_by",
+    "array_agg",
+    "insert",
+    "Insert",
+)
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/_psycopg_common.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/_psycopg_common.py
new file mode 100644
index 00000000..d827e054
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/_psycopg_common.py
@@ -0,0 +1,187 @@
+# dialects/postgresql/_psycopg_common.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+from __future__ import annotations
+
+import decimal
+
+from .array import ARRAY as PGARRAY
+from .base import _DECIMAL_TYPES
+from .base import _FLOAT_TYPES
+from .base import _INT_TYPES
+from .base import PGDialect
+from .base import PGExecutionContext
+from .hstore import HSTORE
+from .pg_catalog import _SpaceVector
+from .pg_catalog import INT2VECTOR
+from .pg_catalog import OIDVECTOR
+from ... import exc
+from ... import types as sqltypes
+from ... import util
+from ...engine import processors
+
+_server_side_id = util.counter()
+
+
+class _PsycopgNumeric(sqltypes.Numeric):
+    def bind_processor(self, dialect):
+        return None
+
+    def result_processor(self, dialect, coltype):
+        if self.asdecimal:
+            if coltype in _FLOAT_TYPES:
+                return processors.to_decimal_processor_factory(
+                    decimal.Decimal, self._effective_decimal_return_scale
+                )
+            elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+                # psycopg returns Decimal natively for 1700
+                return None
+            else:
+                raise exc.InvalidRequestError(
+                    "Unknown PG numeric type: %d" % coltype
+                )
+        else:
+            if coltype in _FLOAT_TYPES:
+                # psycopg returns float natively for 701
+                return None
+            elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+                return processors.to_float
+            else:
+                raise exc.InvalidRequestError(
+                    "Unknown PG numeric type: %d" % coltype
+                )
+
+
+class _PsycopgFloat(_PsycopgNumeric):
+    __visit_name__ = "float"
+
+
+class _PsycopgHStore(HSTORE):
+    def bind_processor(self, dialect):
+        if dialect._has_native_hstore:
+            return None
+        else:
+            return super().bind_processor(dialect)
+
+    def result_processor(self, dialect, coltype):
+        if dialect._has_native_hstore:
+            return None
+        else:
+            return super().result_processor(dialect, coltype)
+
+
+class _PsycopgARRAY(PGARRAY):
+    render_bind_cast = True
+
+
+class _PsycopgINT2VECTOR(_SpaceVector, INT2VECTOR):
+    pass
+
+
+class _PsycopgOIDVECTOR(_SpaceVector, OIDVECTOR):
+    pass
+
+
+class _PGExecutionContext_common_psycopg(PGExecutionContext):
+    def create_server_side_cursor(self):
+        # use server-side cursors:
+        # psycopg
+        # https://www.psycopg.org/psycopg3/docs/advanced/cursors.html#server-side-cursors
+        # psycopg2
+        # https://www.psycopg.org/docs/usage.html#server-side-cursors
+        ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
+        return self._dbapi_connection.cursor(ident)
+
+
+class _PGDialect_common_psycopg(PGDialect):
+    supports_statement_cache = True
+    supports_server_side_cursors = True
+
+    default_paramstyle = "pyformat"
+
+    _has_native_hstore = True
+
+    colspecs = util.update_copy(
+        PGDialect.colspecs,
+        {
+            sqltypes.Numeric: _PsycopgNumeric,
+            sqltypes.Float: _PsycopgFloat,
+            HSTORE: _PsycopgHStore,
+            sqltypes.ARRAY: _PsycopgARRAY,
+            INT2VECTOR: _PsycopgINT2VECTOR,
+            OIDVECTOR: _PsycopgOIDVECTOR,
+        },
+    )
+
+    def __init__(
+        self,
+        client_encoding=None,
+        use_native_hstore=True,
+        **kwargs,
+    ):
+        PGDialect.__init__(self, **kwargs)
+        if not use_native_hstore:
+            self._has_native_hstore = False
+        self.use_native_hstore = use_native_hstore
+        self.client_encoding = client_encoding
+
+    def create_connect_args(self, url):
+        opts = url.translate_connect_args(username="user", database="dbname")
+
+        multihosts, multiports = self._split_multihost_from_url(url)
+
+        if opts or url.query:
+            if not opts:
+                opts = {}
+            if "port" in opts:
+                opts["port"] = int(opts["port"])
+            opts.update(url.query)
+
+            if multihosts:
+                opts["host"] = ",".join(multihosts)
+                comma_ports = ",".join(str(p) if p else "" for p in multiports)
+                if comma_ports:
+                    opts["port"] = comma_ports
+            return ([], opts)
+        else:
+            # no connection arguments whatsoever; psycopg2.connect()
+            # requires that "dsn" be present as a blank string.
+            return ([""], opts)
+
+    def get_isolation_level_values(self, dbapi_connection):
+        return (
+            "AUTOCOMMIT",
+            "READ COMMITTED",
+            "READ UNCOMMITTED",
+            "REPEATABLE READ",
+            "SERIALIZABLE",
+        )
+
+    def set_deferrable(self, connection, value):
+        connection.deferrable = value
+
+    def get_deferrable(self, connection):
+        return connection.deferrable
+
+    def _do_autocommit(self, connection, value):
+        connection.autocommit = value
+
+    def do_ping(self, dbapi_connection):
+        cursor = None
+        before_autocommit = dbapi_connection.autocommit
+
+        if not before_autocommit:
+            dbapi_connection.autocommit = True
+        cursor = dbapi_connection.cursor()
+        try:
+            cursor.execute(self._dialect_specific_select_one)
+        finally:
+            cursor.close()
+            if not before_autocommit and not dbapi_connection.closed:
+                dbapi_connection.autocommit = before_autocommit
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/array.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/array.py
new file mode 100644
index 00000000..7708769c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/array.py
@@ -0,0 +1,435 @@
+# dialects/postgresql/array.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+
+from __future__ import annotations
+
+import re
+from typing import Any
+from typing import Optional
+from typing import TypeVar
+
+from .operators import CONTAINED_BY
+from .operators import CONTAINS
+from .operators import OVERLAP
+from ... import types as sqltypes
+from ... import util
+from ...sql import expression
+from ...sql import operators
+from ...sql._typing import _TypeEngineArgument
+
+
+_T = TypeVar("_T", bound=Any)
+
+
+def Any(other, arrexpr, operator=operators.eq):
+    """A synonym for the ARRAY-level :meth:`.ARRAY.Comparator.any` method.
+    See that method for details.
+
+    """
+
+    return arrexpr.any(other, operator)
+
+
+def All(other, arrexpr, operator=operators.eq):
+    """A synonym for the ARRAY-level :meth:`.ARRAY.Comparator.all` method.
+    See that method for details.
+
+    """
+
+    return arrexpr.all(other, operator)
+
+
+class array(expression.ExpressionClauseList[_T]):
+    """A PostgreSQL ARRAY literal.
+
+    This is used to produce ARRAY literals in SQL expressions, e.g.::
+
+        from sqlalchemy.dialects.postgresql import array
+        from sqlalchemy.dialects import postgresql
+        from sqlalchemy import select, func
+
+        stmt = select(array([1, 2]) + array([3, 4, 5]))
+
+        print(stmt.compile(dialect=postgresql.dialect()))
+
+    Produces the SQL:
+
+    .. sourcecode:: sql
+
+        SELECT ARRAY[%(param_1)s, %(param_2)s] ||
+            ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
+
+    An instance of :class:`.array` will always have the datatype
+    :class:`_types.ARRAY`.  The "inner" type of the array is inferred from
+    the values present, unless the ``type_`` keyword argument is passed::
+
+        array(["foo", "bar"], type_=CHAR)
+
+    Multidimensional arrays are produced by nesting :class:`.array` constructs.
+    The dimensionality of the final :class:`_types.ARRAY`
+    type is calculated by
+    recursively adding the dimensions of the inner :class:`_types.ARRAY`
+    type::
+
+        stmt = select(
+            array(
+                [array([1, 2]), array([3, 4]), array([column("q"), column("x")])]
+            )
+        )
+        print(stmt.compile(dialect=postgresql.dialect()))
+
+    Produces:
+
+    .. sourcecode:: sql
+
+        SELECT ARRAY[
+            ARRAY[%(param_1)s, %(param_2)s],
+            ARRAY[%(param_3)s, %(param_4)s],
+            ARRAY[q, x]
+        ] AS anon_1
+
+    .. versionadded:: 1.3.6 added support for multidimensional array literals
+
+    .. seealso::
+
+        :class:`_postgresql.ARRAY`
+
+    """  # noqa: E501
+
+    __visit_name__ = "array"
+
+    stringify_dialect = "postgresql"
+    inherit_cache = True
+
+    def __init__(self, clauses, **kw):
+        type_arg = kw.pop("type_", None)
+        super().__init__(operators.comma_op, *clauses, **kw)
+
+        self._type_tuple = [arg.type for arg in self.clauses]
+
+        main_type = (
+            type_arg
+            if type_arg is not None
+            else self._type_tuple[0] if self._type_tuple else sqltypes.NULLTYPE
+        )
+
+        if isinstance(main_type, ARRAY):
+            self.type = ARRAY(
+                main_type.item_type,
+                dimensions=(
+                    main_type.dimensions + 1
+                    if main_type.dimensions is not None
+                    else 2
+                ),
+            )
+        else:
+            self.type = ARRAY(main_type)
+
+    @property
+    def _select_iterable(self):
+        return (self,)
+
+    def _bind_param(self, operator, obj, _assume_scalar=False, type_=None):
+        if _assume_scalar or operator is operators.getitem:
+            return expression.BindParameter(
+                None,
+                obj,
+                _compared_to_operator=operator,
+                type_=type_,
+                _compared_to_type=self.type,
+                unique=True,
+            )
+
+        else:
+            return array(
+                [
+                    self._bind_param(
+                        operator, o, _assume_scalar=True, type_=type_
+                    )
+                    for o in obj
+                ]
+            )
+
+    def self_group(self, against=None):
+        if against in (operators.any_op, operators.all_op, operators.getitem):
+            return expression.Grouping(self)
+        else:
+            return self
+
+
+class ARRAY(sqltypes.ARRAY):
+    """PostgreSQL ARRAY type.
+
+    The :class:`_postgresql.ARRAY` type is constructed in the same way
+    as the core :class:`_types.ARRAY` type; a member type is required, and a
+    number of dimensions is recommended if the type is to be used for more
+    than one dimension::
+
+        from sqlalchemy.dialects import postgresql
+
+        mytable = Table(
+            "mytable",
+            metadata,
+            Column("data", postgresql.ARRAY(Integer, dimensions=2)),
+        )
+
+    The :class:`_postgresql.ARRAY` type provides all operations defined on the
+    core :class:`_types.ARRAY` type, including support for "dimensions",
+    indexed access, and simple matching such as
+    :meth:`.types.ARRAY.Comparator.any` and
+    :meth:`.types.ARRAY.Comparator.all`.  :class:`_postgresql.ARRAY`
+    class also
+    provides PostgreSQL-specific methods for containment operations, including
+    :meth:`.postgresql.ARRAY.Comparator.contains`
+    :meth:`.postgresql.ARRAY.Comparator.contained_by`, and
+    :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
+
+        mytable.c.data.contains([1, 2])
+
+    Indexed access is one-based by default, to match that of PostgreSQL;
+    for zero-based indexed access, set
+    :paramref:`_postgresql.ARRAY.zero_indexes`.
+
+    Additionally, the :class:`_postgresql.ARRAY`
+    type does not work directly in
+    conjunction with the :class:`.ENUM` type.  For a workaround, see the
+    special type at :ref:`postgresql_array_of_enum`.
+
+    .. container:: topic
+
+        **Detecting Changes in ARRAY columns when using the ORM**
+
+        The :class:`_postgresql.ARRAY` type, when used with the SQLAlchemy ORM,
+        does not detect in-place mutations to the array. In order to detect
+        these, the :mod:`sqlalchemy.ext.mutable` extension must be used, using
+        the :class:`.MutableList` class::
+
+            from sqlalchemy.dialects.postgresql import ARRAY
+            from sqlalchemy.ext.mutable import MutableList
+
+
+            class SomeOrmClass(Base):
+                # ...
+
+                data = Column(MutableList.as_mutable(ARRAY(Integer)))
+
+        This extension will allow "in-place" changes such to the array
+        such as ``.append()`` to produce events which will be detected by the
+        unit of work.  Note that changes to elements **inside** the array,
+        including subarrays that are mutated in place, are **not** detected.
+
+        Alternatively, assigning a new array value to an ORM element that
+        replaces the old one will always trigger a change event.
+
+    .. seealso::
+
+        :class:`_types.ARRAY` - base array type
+
+        :class:`_postgresql.array` - produces a literal array value.
+
+    """
+
+    def __init__(
+        self,
+        item_type: _TypeEngineArgument[Any],
+        as_tuple: bool = False,
+        dimensions: Optional[int] = None,
+        zero_indexes: bool = False,
+    ):
+        """Construct an ARRAY.
+
+        E.g.::
+
+          Column("myarray", ARRAY(Integer))
+
+        Arguments are:
+
+        :param item_type: The data type of items of this array. Note that
+          dimensionality is irrelevant here, so multi-dimensional arrays like
+          ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
+          ``ARRAY(ARRAY(Integer))`` or such.
+
+        :param as_tuple=False: Specify whether return results
+          should be converted to tuples from lists. DBAPIs such
+          as psycopg2 return lists by default. When tuples are
+          returned, the results are hashable.
+
+        :param dimensions: if non-None, the ARRAY will assume a fixed
+         number of dimensions.  This will cause the DDL emitted for this
+         ARRAY to include the exact number of bracket clauses ``[]``,
+         and will also optimize the performance of the type overall.
+         Note that PG arrays are always implicitly "non-dimensioned",
+         meaning they can store any number of dimensions no matter how
+         they were declared.
+
+        :param zero_indexes=False: when True, index values will be converted
+         between Python zero-based and PostgreSQL one-based indexes, e.g.
+         a value of one will be added to all index values before passing
+         to the database.
+
+        """
+        if isinstance(item_type, ARRAY):
+            raise ValueError(
+                "Do not nest ARRAY types; ARRAY(basetype) "
+                "handles multi-dimensional arrays of basetype"
+            )
+        if isinstance(item_type, type):
+            item_type = item_type()
+        self.item_type = item_type
+        self.as_tuple = as_tuple
+        self.dimensions = dimensions
+        self.zero_indexes = zero_indexes
+
+    class Comparator(sqltypes.ARRAY.Comparator):
+        """Define comparison operations for :class:`_types.ARRAY`.
+
+        Note that these operations are in addition to those provided
+        by the base :class:`.types.ARRAY.Comparator` class, including
+        :meth:`.types.ARRAY.Comparator.any` and
+        :meth:`.types.ARRAY.Comparator.all`.
+
+        """
+
+        def contains(self, other, **kwargs):
+            """Boolean expression.  Test if elements are a superset of the
+            elements of the argument array expression.
+
+            kwargs may be ignored by this operator but are required for API
+            conformance.
+            """
+            return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
+
+        def contained_by(self, other):
+            """Boolean expression.  Test if elements are a proper subset of the
+            elements of the argument array expression.
+            """
+            return self.operate(
+                CONTAINED_BY, other, result_type=sqltypes.Boolean
+            )
+
+        def overlap(self, other):
+            """Boolean expression.  Test if array has elements in common with
+            an argument array expression.
+            """
+            return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
+
+    comparator_factory = Comparator
+
+    @property
+    def hashable(self):
+        return self.as_tuple
+
+    @property
+    def python_type(self):
+        return list
+
+    def compare_values(self, x, y):
+        return x == y
+
+    @util.memoized_property
+    def _against_native_enum(self):
+        return (
+            isinstance(self.item_type, sqltypes.Enum)
+            and self.item_type.native_enum
+        )
+
+    def literal_processor(self, dialect):
+        item_proc = self.item_type.dialect_impl(dialect).literal_processor(
+            dialect
+        )
+        if item_proc is None:
+            return None
+
+        def to_str(elements):
+            return f"ARRAY[{', '.join(elements)}]"
+
+        def process(value):
+            inner = self._apply_item_processor(
+                value, item_proc, self.dimensions, to_str
+            )
+            return inner
+
+        return process
+
+    def bind_processor(self, dialect):
+        item_proc = self.item_type.dialect_impl(dialect).bind_processor(
+            dialect
+        )
+
+        def process(value):
+            if value is None:
+                return value
+            else:
+                return self._apply_item_processor(
+                    value, item_proc, self.dimensions, list
+                )
+
+        return process
+
+    def result_processor(self, dialect, coltype):
+        item_proc = self.item_type.dialect_impl(dialect).result_processor(
+            dialect, coltype
+        )
+
+        def process(value):
+            if value is None:
+                return value
+            else:
+                return self._apply_item_processor(
+                    value,
+                    item_proc,
+                    self.dimensions,
+                    tuple if self.as_tuple else list,
+                )
+
+        if self._against_native_enum:
+            super_rp = process
+            pattern = re.compile(r"^{(.*)}$")
+
+            def handle_raw_string(value):
+                inner = pattern.match(value).group(1)
+                return _split_enum_values(inner)
+
+            def process(value):
+                if value is None:
+                    return value
+                # isinstance(value, str) is required to handle
+                # the case where a TypeDecorator for and Array of Enum is
+                # used like was required in sa < 1.3.17
+                return super_rp(
+                    handle_raw_string(value)
+                    if isinstance(value, str)
+                    else value
+                )
+
+        return process
+
+
+def _split_enum_values(array_string):
+    if '"' not in array_string:
+        # no escape char is present so it can just split on the comma
+        return array_string.split(",") if array_string else []
+
+    # handles quoted strings from:
+    # r'abc,"quoted","also\\\\quoted", "quoted, comma", "esc \" quot", qpr'
+    # returns
+    # ['abc', 'quoted', 'also\\quoted', 'quoted, comma', 'esc " quot', 'qpr']
+    text = array_string.replace(r"\"", "_$ESC_QUOTE$_")
+    text = text.replace(r"\\", "\\")
+    result = []
+    on_quotes = re.split(r'(")', text)
+    in_quotes = False
+    for tok in on_quotes:
+        if tok == '"':
+            in_quotes = not in_quotes
+        elif in_quotes:
+            result.append(tok.replace("_$ESC_QUOTE$_", '"'))
+        else:
+            result.extend(re.findall(r"([^\s,]+),?", tok))
+    return result
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/asyncpg.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/asyncpg.py
new file mode 100644
index 00000000..09689212
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/asyncpg.py
@@ -0,0 +1,1287 @@
+# dialects/postgresql/asyncpg.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors <see AUTHORS
+# file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+r"""
+.. dialect:: postgresql+asyncpg
+    :name: asyncpg
+    :dbapi: asyncpg
+    :connectstring: postgresql+asyncpg://user:password@host:port/dbname[?key=value&key=value...]
+    :url: https://magicstack.github.io/asyncpg/
+
+The asyncpg dialect is SQLAlchemy's first Python asyncio dialect.
+
+Using a special asyncio mediation layer, the asyncpg dialect is usable
+as the backend for the :ref:`SQLAlchemy asyncio <asyncio_toplevel>`
+extension package.
+
+This dialect should normally be used only with the
+:func:`_asyncio.create_async_engine` engine creation function::
+
+    from sqlalchemy.ext.asyncio import create_async_engine
+
+    engine = create_async_engine(
+        "postgresql+asyncpg://user:pass@hostname/dbname"
+    )
+
+.. versionadded:: 1.4
+
+.. note::
+
+    By default asyncpg does not decode the ``json`` and ``jsonb`` types and
+    returns them as strings. SQLAlchemy sets default type decoder for ``json``
+    and ``jsonb`` types using the python builtin ``json.loads`` function.
+    The json implementation used can be changed by setting the attribute
+    ``json_deserializer`` when creating the engine with
+    :func:`create_engine` or :func:`create_async_engine`.
+
+.. _asyncpg_multihost:
+
+Multihost Connections
+--------------------------
+
+The asyncpg dialect features support for multiple fallback hosts in the
+same way as that of the psycopg2 and psycopg dialects.  The
+syntax is the same,
+using ``host=<host>:<port>`` combinations as additional query string arguments;
+however, there is no default port, so all hosts must have a complete port number
+present, otherwise an exception is raised::
+
+    engine = create_async_engine(
+        "postgresql+asyncpg://user:password@/dbname?host=HostA:5432&host=HostB:5432&host=HostC:5432"
+    )
+
+For complete background on this syntax, see :ref:`psycopg2_multi_host`.
+
+.. versionadded:: 2.0.18
+
+.. seealso::
+
+    :ref:`psycopg2_multi_host`
+
+.. _asyncpg_prepared_statement_cache:
+
+Prepared Statement Cache
+--------------------------
+
+The asyncpg SQLAlchemy dialect makes use of ``asyncpg.connection.prepare()``
+for all statements.   The prepared statement objects are cached after
+construction which appears to grant a 10% or more performance improvement for
+statement invocation.   The cache is on a per-DBAPI connection basis, which
+means that the primary storage for prepared statements is within DBAPI
+connections pooled within the connection pool.   The size of this cache
+defaults to 100 statements per DBAPI connection and may be adjusted using the
+``prepared_statement_cache_size`` DBAPI argument (note that while this argument
+is implemented by SQLAlchemy, it is part of the DBAPI emulation portion of the
+asyncpg dialect, therefore is handled as a DBAPI argument, not a dialect
+argument)::
+
+
+    engine = create_async_engine(
+        "postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=500"
+    )
+
+To disable the prepared statement cache, use a value of zero::
+
+    engine = create_async_engine(
+        "postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=0"
+    )
+
+.. versionadded:: 1.4.0b2 Added ``prepared_statement_cache_size`` for asyncpg.
+
+
+.. warning::  The ``asyncpg`` database driver necessarily uses caches for
+   PostgreSQL type OIDs, which become stale when custom PostgreSQL datatypes
+   such as ``ENUM`` objects are changed via DDL operations.   Additionally,
+   prepared statements themselves which are optionally cached by SQLAlchemy's
+   driver as described above may also become "stale" when DDL has been emitted
+   to the PostgreSQL database which modifies the tables or other objects
+   involved in a particular prepared statement.
+
+   The SQLAlchemy asyncpg dialect will invalidate these caches within its local
+   process when statements that represent DDL are emitted on a local
+   connection, but this is only controllable within a single Python process /
+   database engine.     If DDL changes are made from other database engines
+   and/or processes, a running application may encounter asyncpg exceptions
+   ``InvalidCachedStatementError`` and/or ``InternalServerError("cache lookup
+   failed for type <oid>")`` if it refers to pooled database connections which
+   operated upon the previous structures. The SQLAlchemy asyncpg dialect will
+   recover from these error cases when the driver raises these exceptions by
+   clearing its internal caches as well as those of the asyncpg driver in
+   response to them, but cannot prevent them from being raised in the first
+   place if the cached prepared statement or asyncpg type caches have gone
+   stale, nor can it retry the statement as the PostgreSQL transaction is
+   invalidated when these errors occur.
+
+.. _asyncpg_prepared_statement_name:
+
+Prepared Statement Name with PGBouncer
+--------------------------------------
+
+By default, asyncpg enumerates prepared statements in numeric order, which
+can lead to errors if a name has already been taken for another prepared
+statement. This issue can arise if your application uses database proxies
+such as PgBouncer to handle connections. One possible workaround is to
+use dynamic prepared statement names, which asyncpg now supports through
+an optional ``name`` value for the statement name. This allows you to
+generate your own unique names that won't conflict with existing ones.
+To achieve this, you can provide a function that will be called every time
+a prepared statement is prepared::
+
+    from uuid import uuid4
+
+    engine = create_async_engine(
+        "postgresql+asyncpg://user:pass@somepgbouncer/dbname",
+        poolclass=NullPool,
+        connect_args={
+            "prepared_statement_name_func": lambda: f"__asyncpg_{uuid4()}__",
+        },
+    )
+
+.. seealso::
+
+   https://github.com/MagicStack/asyncpg/issues/837
+
+   https://github.com/sqlalchemy/sqlalchemy/issues/6467
+
+.. warning:: When using PGBouncer, to prevent a buildup of useless prepared statements in
+   your application, it's important to use the :class:`.NullPool` pool
+   class, and to configure PgBouncer to use `DISCARD <https://www.postgresql.org/docs/current/sql-discard.html>`_
+   when returning connections.  The DISCARD command is used to release resources held by the db connection,
+   including prepared statements. Without proper setup, prepared statements can
+   accumulate quickly and cause performance issues.
+
+Disabling the PostgreSQL JIT to improve ENUM datatype handling
+---------------------------------------------------------------
+
+Asyncpg has an `issue <https://github.com/MagicStack/asyncpg/issues/727>`_ when
+using PostgreSQL ENUM datatypes, where upon the creation of new database
+connections, an expensive query may be emitted in order to retrieve metadata
+regarding custom types which has been shown to negatively affect performance.
+To mitigate this issue, the PostgreSQL "jit" setting may be disabled from the
+client using this setting passed to :func:`_asyncio.create_async_engine`::
+
+    engine = create_async_engine(
+        "postgresql+asyncpg://user:password@localhost/tmp",
+        connect_args={"server_settings": {"jit": "off"}},
+    )
+
+.. seealso::
+
+    https://github.com/MagicStack/asyncpg/issues/727
+
+"""  # noqa
+
+from __future__ import annotations
+
+from collections import deque
+import decimal
+import json as _py_json
+import re
+import time
+
+from . import json
+from . import ranges
+from .array import ARRAY as PGARRAY
+from .base import _DECIMAL_TYPES
+from .base import _FLOAT_TYPES
+from .base import _INT_TYPES
+from .base import ENUM
+from .base import INTERVAL
+from .base import OID
+from .base import PGCompiler
+from .base import PGDialect
+from .base import PGExecutionContext
+from .base import PGIdentifierPreparer
+from .base import REGCLASS
+from .base import REGCONFIG
+from .types import BIT
+from .types import BYTEA
+from .types import CITEXT
+from ... import exc
+from ... import pool
+from ... import util
+from ...engine import AdaptedConnection
+from ...engine import processors
+from ...sql import sqltypes
+from ...util.concurrency import asyncio
+from ...util.concurrency import await_fallback
+from ...util.concurrency import await_only
+
+
+class AsyncpgARRAY(PGARRAY):
+    render_bind_cast = True
+
+
+class AsyncpgString(sqltypes.String):
+    render_bind_cast = True
+
+
+class AsyncpgREGCONFIG(REGCONFIG):
+    render_bind_cast = True
+
+
+class AsyncpgTime(sqltypes.Time):
+    render_bind_cast = True
+
+
+class AsyncpgBit(BIT):
+    render_bind_cast = True
+
+
+class AsyncpgByteA(BYTEA):
+    render_bind_cast = True
+
+
+class AsyncpgDate(sqltypes.Date):
+    render_bind_cast = True
+
+
+class AsyncpgDateTime(sqltypes.DateTime):
+    render_bind_cast = True
+
+
+class AsyncpgBoolean(sqltypes.Boolean):
+    render_bind_cast = True
+
+
+class AsyncPgInterval(INTERVAL):
+    render_bind_cast = True
+
+    @classmethod
+    def adapt_emulated_to_native(cls, interval, **kw):
+        return AsyncPgInterval(precision=interval.second_precision)
+
+
+class AsyncPgEnum(ENUM):
+    render_bind_cast = True
+
+
+class AsyncpgInteger(sqltypes.Integer):
+    render_bind_cast = True
+
+
+class AsyncpgSmallInteger(sqltypes.SmallInteger):
+    render_bind_cast = True
+
+
+class AsyncpgBigInteger(sqltypes.BigInteger):
+    render_bind_cast = True
+
+
+class AsyncpgJSON(json.JSON):
+    def result_processor(self, dialect, coltype):
+        return None
+
+
+class AsyncpgJSONB(json.JSONB):
+    def result_processor(self, dialect, coltype):
+        return None
+
+
+class AsyncpgJSONIndexType(sqltypes.JSON.JSONIndexType):
+    pass
+
+
+class AsyncpgJSONIntIndexType(sqltypes.JSON.JSONIntIndexType):
+    __visit_name__ = "json_int_index"
+
+    render_bind_cast = True
+
+
+class AsyncpgJSONStrIndexType(sqltypes.JSON.JSONStrIndexType):
+    __visit_name__ = "json_str_index"
+
+    render_bind_cast = True
+
+
+class AsyncpgJSONPathType(json.JSONPathType):
+    def bind_processor(self, dialect):
+        def process(value):
+            if isinstance(value, str):
+                # If it's already a string assume that it's in json path
+                # format. This allows using cast with json paths literals
+                return value
+            elif value:
+                tokens = [str(elem) for elem in value]
+                return tokens
+            else:
+                return []
+
+        return process
+
+
+class AsyncpgNumeric(sqltypes.Numeric):
+    render_bind_cast = True
+
+    def bind_processor(self, dialect):
+        return None
+
+    def result_processor(self, dialect, coltype):
+        if self.asdecimal:
+            if coltype in _FLOAT_TYPES:
+                return processors.to_decimal_processor_factory(
+                    decimal.Decimal, self._effective_decimal_return_scale
+                )
+            elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+                # pg8000 returns Decimal natively for 1700
+                return None
+            else:
+                raise exc.InvalidRequestError(
+                    "Unknown PG numeric type: %d" % coltype
+                )
+        else:
+            if coltype in _FLOAT_TYPES:
+                # pg8000 returns float natively for 701
+                return None
+            elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+                return processors.to_float
+            else:
+                raise exc.InvalidRequestError(
+                    "Unknown PG numeric type: %d" % coltype
+                )
+
+
+class AsyncpgFloat(AsyncpgNumeric, sqltypes.Float):
+    __visit_name__ = "float"
+    render_bind_cast = True
+
+
+class AsyncpgREGCLASS(REGCLASS):
+    render_bind_cast = True
+
+
+class AsyncpgOID(OID):
+    render_bind_cast = True
+
+
+class AsyncpgCHAR(sqltypes.CHAR):
+    render_bind_cast = True
+
+
+class _AsyncpgRange(ranges.AbstractSingleRangeImpl):
+    def bind_processor(self, dialect):
+        asyncpg_Range = dialect.dbapi.asyncpg.Range
+
+        def to_range(value):
+            if isinstance(value, ranges.Range):
+                value = asyncpg_Range(
+                    value.lower,
+                    value.upper,
+                    lower_inc=value.bounds[0] == "[",
+                    upper_inc=value.bounds[1] == "]",
+                    empty=value.empty,
+                )
+            return value
+
+        return to_range
+
+    def result_processor(self, dialect, coltype):
+        def to_range(value):
+            if value is not None:
+                empty = value.isempty
+                value = ranges.Range(
+                    value.lower,
+                    value.upper,
+                    bounds=f"{'[' if empty or value.lower_inc else '('}"  # type: ignore  # noqa: E501
+                    f"{']' if not empty and value.upper_inc else ')'}",
+                    empty=empty,
+                )
+            return value
+
+        return to_range
+
+
+class _AsyncpgMultiRange(ranges.AbstractMultiRangeImpl):
+    def bind_processor(self, dialect):
+        asyncpg_Range = dialect.dbapi.asyncpg.Range
+
+        NoneType = type(None)
+
+        def to_range(value):
+            if isinstance(value, (str, NoneType)):
+                return value
+
+            def to_range(value):
+                if isinstance(value, ranges.Range):
+                    value = asyncpg_Range(
+                        value.lower,
+                        value.upper,
+                        lower_inc=value.bounds[0] == "[",
+                        upper_inc=value.bounds[1] == "]",
+                        empty=value.empty,
+                    )
+                return value
+
+            return [to_range(element) for element in value]
+
+        return to_range
+
+    def result_processor(self, dialect, coltype):
+        def to_range_array(value):
+            def to_range(rvalue):
+                if rvalue is not None:
+                    empty = rvalue.isempty
+                    rvalue = ranges.Range(
+                        rvalue.lower,
+                        rvalue.upper,
+                        bounds=f"{'[' if empty or rvalue.lower_inc else '('}"  # type: ignore  # noqa: E501
+                        f"{']' if not empty and rvalue.upper_inc else ')'}",
+                        empty=empty,
+                    )
+                return rvalue
+
+            if value is not None:
+                value = ranges.MultiRange(to_range(elem) for elem in value)
+
+            return value
+
+        return to_range_array
+
+
+class PGExecutionContext_asyncpg(PGExecutionContext):
+    def handle_dbapi_exception(self, e):
+        if isinstance(
+            e,
+            (
+                self.dialect.dbapi.InvalidCachedStatementError,
+                self.dialect.dbapi.InternalServerError,
+            ),
+        ):
+            self.dialect._invalidate_schema_cache()
+
+    def pre_exec(self):
+        if self.isddl:
+            self.dialect._invalidate_schema_cache()
+
+        self.cursor._invalidate_schema_cache_asof = (
+            self.dialect._invalidate_schema_cache_asof
+        )
+
+        if not self.compiled:
+            return
+
+    def create_server_side_cursor(self):
+        return self._dbapi_connection.cursor(server_side=True)
+
+
+class PGCompiler_asyncpg(PGCompiler):
+    pass
+
+
+class PGIdentifierPreparer_asyncpg(PGIdentifierPreparer):
+    pass
+
+
+class AsyncAdapt_asyncpg_cursor:
+    __slots__ = (
+        "_adapt_connection",
+        "_connection",
+        "_rows",
+        "description",
+        "arraysize",
+        "rowcount",
+        "_cursor",
+        "_invalidate_schema_cache_asof",
+    )
+
+    server_side = False
+
+    def __init__(self, adapt_connection):
+        self._adapt_connection = adapt_connection
+        self._connection = adapt_connection._connection
+        self._rows = deque()
+        self._cursor = None
+        self.description = None
+        self.arraysize = 1
+        self.rowcount = -1
+        self._invalidate_schema_cache_asof = 0
+
+    def close(self):
+        self._rows.clear()
+
+    def _handle_exception(self, error):
+        self._adapt_connection._handle_exception(error)
+
+    async def _prepare_and_execute(self, operation, parameters):
+        adapt_connection = self._adapt_connection
+
+        async with adapt_connection._execute_mutex:
+            if not adapt_connection._started:
+                await adapt_connection._start_transaction()
+
+            if parameters is None:
+                parameters = ()
+
+            try:
+                prepared_stmt, attributes = await adapt_connection._prepare(
+                    operation, self._invalidate_schema_cache_asof
+                )
+
+                if attributes:
+                    self.description = [
+                        (
+                            attr.name,
+                            attr.type.oid,
+                            None,
+                            None,
+                            None,
+                            None,
+                            None,
+                        )
+                        for attr in attributes
+                    ]
+                else:
+                    self.description = None
+
+                if self.server_side:
+                    self._cursor = await prepared_stmt.cursor(*parameters)
+                    self.rowcount = -1
+                else:
+                    self._rows = deque(await prepared_stmt.fetch(*parameters))
+                    status = prepared_stmt.get_statusmsg()
+
+                    reg = re.match(
+                        r"(?:SELECT|UPDATE|DELETE|INSERT \d+) (\d+)",
+                        status or "",
+                    )
+                    if reg:
+                        self.rowcount = int(reg.group(1))
+                    else:
+                        self.rowcount = -1
+
+            except Exception as error:
+                self._handle_exception(error)
+
+    async def _executemany(self, operation, seq_of_parameters):
+        adapt_connection = self._adapt_connection
+
+        self.description = None
+        async with adapt_connection._execute_mutex:
+            await adapt_connection._check_type_cache_invalidation(
+                self._invalidate_schema_cache_asof
+            )
+
+            if not adapt_connection._started:
+                await adapt_connection._start_transaction()
+
+            try:
+                return await self._connection.executemany(
+                    operation, seq_of_parameters
+                )
+            except Exception as error:
+                self._handle_exception(error)
+
+    def execute(self, operation, parameters=None):
+        self._adapt_connection.await_(
+            self._prepare_and_execute(operation, parameters)
+        )
+
+    def executemany(self, operation, seq_of_parameters):
+        return self._adapt_connection.await_(
+            self._executemany(operation, seq_of_parameters)
+        )
+
+    def setinputsizes(self, *inputsizes):
+        raise NotImplementedError()
+
+    def __iter__(self):
+        while self._rows:
+            yield self._rows.popleft()
+
+    def fetchone(self):
+        if self._rows:
+            return self._rows.popleft()
+        else:
+            return None
+
+    def fetchmany(self, size=None):
+        if size is None:
+            size = self.arraysize
+
+        rr = self._rows
+        return [rr.popleft() for _ in range(min(size, len(rr)))]
+
+    def fetchall(self):
+        retval = list(self._rows)
+        self._rows.clear()
+        return retval
+
+
+class AsyncAdapt_asyncpg_ss_cursor(AsyncAdapt_asyncpg_cursor):
+    server_side = True
+    __slots__ = ("_rowbuffer",)
+
+    def __init__(self, adapt_connection):
+        super().__init__(adapt_connection)
+        self._rowbuffer = deque()
+
+    def close(self):
+        self._cursor = None
+        self._rowbuffer.clear()
+
+    def _buffer_rows(self):
+        assert self._cursor is not None
+        new_rows = self._adapt_connection.await_(self._cursor.fetch(50))
+        self._rowbuffer.extend(new_rows)
+
+    def __aiter__(self):
+        return self
+
+    async def __anext__(self):
+        while True:
+            while self._rowbuffer:
+                yield self._rowbuffer.popleft()
+
+            self._buffer_rows()
+            if not self._rowbuffer:
+                break
+
+    def fetchone(self):
+        if not self._rowbuffer:
+            self._buffer_rows()
+            if not self._rowbuffer:
+                return None
+        return self._rowbuffer.popleft()
+
+    def fetchmany(self, size=None):
+        if size is None:
+            return self.fetchall()
+
+        if not self._rowbuffer:
+            self._buffer_rows()
+
+        assert self._cursor is not None
+        rb = self._rowbuffer
+        lb = len(rb)
+        if size > lb:
+            rb.extend(
+                self._adapt_connection.await_(self._cursor.fetch(size - lb))
+            )
+
+        return [rb.popleft() for _ in range(min(size, len(rb)))]
+
+    def fetchall(self):
+        ret = list(self._rowbuffer)
+        ret.extend(self._adapt_connection.await_(self._all()))
+        self._rowbuffer.clear()
+        return ret
+
+    async def _all(self):
+        rows = []
+
+        # TODO: looks like we have to hand-roll some kind of batching here.
+        # hardcoding for the moment but this should be improved.
+        while True:
+            batch = await self._cursor.fetch(1000)
+            if batch:
+                rows.extend(batch)
+                continue
+            else:
+                break
+        return rows
+
+    def executemany(self, operation, seq_of_parameters):
+        raise NotImplementedError(
+            "server side cursor doesn't support executemany yet"
+        )
+
+
+class AsyncAdapt_asyncpg_connection(AdaptedConnection):
+    __slots__ = (
+        "dbapi",
+        "isolation_level",
+        "_isolation_setting",
+        "readonly",
+        "deferrable",
+        "_transaction",
+        "_started",
+        "_prepared_statement_cache",
+        "_prepared_statement_name_func",
+        "_invalidate_schema_cache_asof",
+        "_execute_mutex",
+    )
+
+    await_ = staticmethod(await_only)
+
+    def __init__(
+        self,
+        dbapi,
+        connection,
+        prepared_statement_cache_size=100,
+        prepared_statement_name_func=None,
+    ):
+        self.dbapi = dbapi
+        self._connection = connection
+        self.isolation_level = self._isolation_setting = None
+        self.readonly = False
+        self.deferrable = False
+        self._transaction = None
+        self._started = False
+        self._invalidate_schema_cache_asof = time.time()
+        self._execute_mutex = asyncio.Lock()
+
+        if prepared_statement_cache_size:
+            self._prepared_statement_cache = util.LRUCache(
+                prepared_statement_cache_size
+            )
+        else:
+            self._prepared_statement_cache = None
+
+        if prepared_statement_name_func:
+            self._prepared_statement_name_func = prepared_statement_name_func
+        else:
+            self._prepared_statement_name_func = self._default_name_func
+
+    async def _check_type_cache_invalidation(self, invalidate_timestamp):
+        if invalidate_timestamp > self._invalidate_schema_cache_asof:
+            await self._connection.reload_schema_state()
+            self._invalidate_schema_cache_asof = invalidate_timestamp
+
+    async def _prepare(self, operation, invalidate_timestamp):
+        await self._check_type_cache_invalidation(invalidate_timestamp)
+
+        cache = self._prepared_statement_cache
+        if cache is None:
+            prepared_stmt = await self._connection.prepare(
+                operation, name=self._prepared_statement_name_func()
+            )
+            attributes = prepared_stmt.get_attributes()
+            return prepared_stmt, attributes
+
+        # asyncpg uses a type cache for the "attributes" which seems to go
+        # stale independently of the PreparedStatement itself, so place that
+        # collection in the cache as well.
+        if operation in cache:
+            prepared_stmt, attributes, cached_timestamp = cache[operation]
+
+            # preparedstatements themselves also go stale for certain DDL
+            # changes such as size of a VARCHAR changing, so there is also
+            # a cross-connection invalidation timestamp
+            if cached_timestamp > invalidate_timestamp:
+                return prepared_stmt, attributes
+
+        prepared_stmt = await self._connection.prepare(
+            operation, name=self._prepared_statement_name_func()
+        )
+        attributes = prepared_stmt.get_attributes()
+        cache[operation] = (prepared_stmt, attributes, time.time())
+
+        return prepared_stmt, attributes
+
+    def _handle_exception(self, error):
+        if self._connection.is_closed():
+            self._transaction = None
+            self._started = False
+
+        if not isinstance(error, AsyncAdapt_asyncpg_dbapi.Error):
+            exception_mapping = self.dbapi._asyncpg_error_translate
+
+            for super_ in type(error).__mro__:
+                if super_ in exception_mapping:
+                    translated_error = exception_mapping[super_](
+                        "%s: %s" % (type(error), error)
+                    )
+                    translated_error.pgcode = translated_error.sqlstate = (
+                        getattr(error, "sqlstate", None)
+                    )
+                    raise translated_error from error
+            else:
+                raise error
+        else:
+            raise error
+
+    @property
+    def autocommit(self):
+        return self.isolation_level == "autocommit"
+
+    @autocommit.setter
+    def autocommit(self, value):
+        if value:
+            self.isolation_level = "autocommit"
+        else:
+            self.isolation_level = self._isolation_setting
+
+    def ping(self):
+        try:
+            _ = self.await_(self._async_ping())
+        except Exception as error:
+            self._handle_exception(error)
+
+    async def _async_ping(self):
+        if self._transaction is None and self.isolation_level != "autocommit":
+            # create a tranasction explicitly to support pgbouncer
+            # transaction mode.   See #10226
+            tr = self._connection.transaction()
+            await tr.start()
+            try:
+                await self._connection.fetchrow(";")
+            finally:
+                await tr.rollback()
+        else:
+            await self._connection.fetchrow(";")
+
+    def set_isolation_level(self, level):
+        if self._started:
+            self.rollback()
+        self.isolation_level = self._isolation_setting = level
+
+    async def _start_transaction(self):
+        if self.isolation_level == "autocommit":
+            return
+
+        try:
+            self._transaction = self._connection.transaction(
+                isolation=self.isolation_level,
+                readonly=self.readonly,
+                deferrable=self.deferrable,
+            )
+            await self._transaction.start()
+        except Exception as error:
+            self._handle_exception(error)
+        else:
+            self._started = True
+
+    def cursor(self, server_side=False):
+        if server_side:
+            return AsyncAdapt_asyncpg_ss_cursor(self)
+        else:
+            return AsyncAdapt_asyncpg_cursor(self)
+
+    async def _rollback_and_discard(self):
+        try:
+            await self._transaction.rollback()
+        finally:
+            # if asyncpg .rollback() was actually called, then whether or
+            # not it raised or succeeded, the transation is done, discard it
+            self._transaction = None
+            self._started = False
+
+    async def _commit_and_discard(self):
+        try:
+            await self._transaction.commit()
+        finally:
+            # if asyncpg .commit() was actually called, then whether or
+            # not it raised or succeeded, the transation is done, discard it
+            self._transaction = None
+            self._started = False
+
+    def rollback(self):
+        if self._started:
+            try:
+                self.await_(self._rollback_and_discard())
+                self._transaction = None
+                self._started = False
+            except Exception as error:
+                # don't dereference asyncpg transaction if we didn't
+                # actually try to call rollback() on it
+                self._handle_exception(error)
+
+    def commit(self):
+        if self._started:
+            try:
+                self.await_(self._commit_and_discard())
+                self._transaction = None
+                self._started = False
+            except Exception as error:
+                # don't dereference asyncpg transaction if we didn't
+                # actually try to call commit() on it
+                self._handle_exception(error)
+
+    def close(self):
+        self.rollback()
+
+        self.await_(self._connection.close())
+
+    def terminate(self):
+        if util.concurrency.in_greenlet():
+            # in a greenlet; this is the connection was invalidated
+            # case.
+            try:
+                # try to gracefully close; see #10717
+                # timeout added in asyncpg 0.14.0 December 2017
+                self.await_(asyncio.shield(self._connection.close(timeout=2)))
+            except (
+                asyncio.TimeoutError,
+                asyncio.CancelledError,
+                OSError,
+                self.dbapi.asyncpg.PostgresError,
+            ):
+                # in the case where we are recycling an old connection
+                # that may have already been disconnected, close() will
+                # fail with the above timeout.  in this case, terminate
+                # the connection without any further waiting.
+                # see issue #8419
+                self._connection.terminate()
+        else:
+            # not in a greenlet; this is the gc cleanup case
+            self._connection.terminate()
+        self._started = False
+
+    @staticmethod
+    def _default_name_func():
+        return None
+
+
+class AsyncAdaptFallback_asyncpg_connection(AsyncAdapt_asyncpg_connection):
+    __slots__ = ()
+
+    await_ = staticmethod(await_fallback)
+
+
+class AsyncAdapt_asyncpg_dbapi:
+    def __init__(self, asyncpg):
+        self.asyncpg = asyncpg
+        self.paramstyle = "numeric_dollar"
+
+    def connect(self, *arg, **kw):
+        async_fallback = kw.pop("async_fallback", False)
+        creator_fn = kw.pop("async_creator_fn", self.asyncpg.connect)
+        prepared_statement_cache_size = kw.pop(
+            "prepared_statement_cache_size", 100
+        )
+        prepared_statement_name_func = kw.pop(
+            "prepared_statement_name_func", None
+        )
+
+        if util.asbool(async_fallback):
+            return AsyncAdaptFallback_asyncpg_connection(
+                self,
+                await_fallback(creator_fn(*arg, **kw)),
+                prepared_statement_cache_size=prepared_statement_cache_size,
+                prepared_statement_name_func=prepared_statement_name_func,
+            )
+        else:
+            return AsyncAdapt_asyncpg_connection(
+                self,
+                await_only(creator_fn(*arg, **kw)),
+                prepared_statement_cache_size=prepared_statement_cache_size,
+                prepared_statement_name_func=prepared_statement_name_func,
+            )
+
+    class Error(Exception):
+        pass
+
+    class Warning(Exception):  # noqa
+        pass
+
+    class InterfaceError(Error):
+        pass
+
+    class DatabaseError(Error):
+        pass
+
+    class InternalError(DatabaseError):
+        pass
+
+    class OperationalError(DatabaseError):
+        pass
+
+    class ProgrammingError(DatabaseError):
+        pass
+
+    class IntegrityError(DatabaseError):
+        pass
+
+    class DataError(DatabaseError):
+        pass
+
+    class NotSupportedError(DatabaseError):
+        pass
+
+    class InternalServerError(InternalError):
+        pass
+
+    class InvalidCachedStatementError(NotSupportedError):
+        def __init__(self, message):
+            super().__init__(
+                message + " (SQLAlchemy asyncpg dialect will now invalidate "
+                "all prepared caches in response to this exception)",
+            )
+
+    # pep-249 datatype placeholders.  As of SQLAlchemy 2.0 these aren't
+    # used, however the test suite looks for these in a few cases.
+    STRING = util.symbol("STRING")
+    NUMBER = util.symbol("NUMBER")
+    DATETIME = util.symbol("DATETIME")
+
+    @util.memoized_property
+    def _asyncpg_error_translate(self):
+        import asyncpg
+
+        return {
+            asyncpg.exceptions.IntegrityConstraintViolationError: self.IntegrityError,  # noqa: E501
+            asyncpg.exceptions.PostgresError: self.Error,
+            asyncpg.exceptions.SyntaxOrAccessError: self.ProgrammingError,
+            asyncpg.exceptions.InterfaceError: self.InterfaceError,
+            asyncpg.exceptions.InvalidCachedStatementError: self.InvalidCachedStatementError,  # noqa: E501
+            asyncpg.exceptions.InternalServerError: self.InternalServerError,
+        }
+
+    def Binary(self, value):
+        return value
+
+
+class PGDialect_asyncpg(PGDialect):
+    driver = "asyncpg"
+    supports_statement_cache = True
+
+    supports_server_side_cursors = True
+
+    render_bind_cast = True
+    has_terminate = True
+
+    default_paramstyle = "numeric_dollar"
+    supports_sane_multi_rowcount = False
+    execution_ctx_cls = PGExecutionContext_asyncpg
+    statement_compiler = PGCompiler_asyncpg
+    preparer = PGIdentifierPreparer_asyncpg
+
+    colspecs = util.update_copy(
+        PGDialect.colspecs,
+        {
+            sqltypes.String: AsyncpgString,
+            sqltypes.ARRAY: AsyncpgARRAY,
+            BIT: AsyncpgBit,
+            CITEXT: CITEXT,
+            REGCONFIG: AsyncpgREGCONFIG,
+            sqltypes.Time: AsyncpgTime,
+            sqltypes.Date: AsyncpgDate,
+            sqltypes.DateTime: AsyncpgDateTime,
+            sqltypes.Interval: AsyncPgInterval,
+            INTERVAL: AsyncPgInterval,
+            sqltypes.Boolean: AsyncpgBoolean,
+            sqltypes.Integer: AsyncpgInteger,
+            sqltypes.SmallInteger: AsyncpgSmallInteger,
+            sqltypes.BigInteger: AsyncpgBigInteger,
+            sqltypes.Numeric: AsyncpgNumeric,
+            sqltypes.Float: AsyncpgFloat,
+            sqltypes.JSON: AsyncpgJSON,
+            sqltypes.LargeBinary: AsyncpgByteA,
+            json.JSONB: AsyncpgJSONB,
+            sqltypes.JSON.JSONPathType: AsyncpgJSONPathType,
+            sqltypes.JSON.JSONIndexType: AsyncpgJSONIndexType,
+            sqltypes.JSON.JSONIntIndexType: AsyncpgJSONIntIndexType,
+            sqltypes.JSON.JSONStrIndexType: AsyncpgJSONStrIndexType,
+            sqltypes.Enum: AsyncPgEnum,
+            OID: AsyncpgOID,
+            REGCLASS: AsyncpgREGCLASS,
+            sqltypes.CHAR: AsyncpgCHAR,
+            ranges.AbstractSingleRange: _AsyncpgRange,
+            ranges.AbstractMultiRange: _AsyncpgMultiRange,
+        },
+    )
+    is_async = True
+    _invalidate_schema_cache_asof = 0
+
+    def _invalidate_schema_cache(self):
+        self._invalidate_schema_cache_asof = time.time()
+
+    @util.memoized_property
+    def _dbapi_version(self):
+        if self.dbapi and hasattr(self.dbapi, "__version__"):
+            return tuple(
+                [
+                    int(x)
+                    for x in re.findall(
+                        r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__
+                    )
+                ]
+            )
+        else:
+            return (99, 99, 99)
+
+    @classmethod
+    def import_dbapi(cls):
+        return AsyncAdapt_asyncpg_dbapi(__import__("asyncpg"))
+
+    @util.memoized_property
+    def _isolation_lookup(self):
+        return {
+            "AUTOCOMMIT": "autocommit",
+            "READ COMMITTED": "read_committed",
+            "REPEATABLE READ": "repeatable_read",
+            "SERIALIZABLE": "serializable",
+        }
+
+    def get_isolation_level_values(self, dbapi_connection):
+        return list(self._isolation_lookup)
+
+    def set_isolation_level(self, dbapi_connection, level):
+        dbapi_connection.set_isolation_level(self._isolation_lookup[level])
+
+    def set_readonly(self, connection, value):
+        connection.readonly = value
+
+    def get_readonly(self, connection):
+        return connection.readonly
+
+    def set_deferrable(self, connection, value):
+        connection.deferrable = value
+
+    def get_deferrable(self, connection):
+        return connection.deferrable
+
+    def do_terminate(self, dbapi_connection) -> None:
+        dbapi_connection.terminate()
+
+    def create_connect_args(self, url):
+        opts = url.translate_connect_args(username="user")
+        multihosts, multiports = self._split_multihost_from_url(url)
+
+        opts.update(url.query)
+
+        if multihosts:
+            assert multiports
+            if len(multihosts) == 1:
+                opts["host"] = multihosts[0]
+                if multiports[0] is not None:
+                    opts["port"] = multiports[0]
+            elif not all(multihosts):
+                raise exc.ArgumentError(
+                    "All hosts are required to be present"
+                    " for asyncpg multiple host URL"
+                )
+            elif not all(multiports):
+                raise exc.ArgumentError(
+                    "All ports are required to be present"
+                    " for asyncpg multiple host URL"
+                )
+            else:
+                opts["host"] = list(multihosts)
+                opts["port"] = list(multiports)
+        else:
+            util.coerce_kw_type(opts, "port", int)
+        util.coerce_kw_type(opts, "prepared_statement_cache_size", int)
+        return ([], opts)
+
+    def do_ping(self, dbapi_connection):
+        dbapi_connection.ping()
+        return True
+
+    @classmethod
+    def get_pool_class(cls, url):
+        async_fallback = url.query.get("async_fallback", False)
+
+        if util.asbool(async_fallback):
+            return pool.FallbackAsyncAdaptedQueuePool
+        else:
+            return pool.AsyncAdaptedQueuePool
+
+    def is_disconnect(self, e, connection, cursor):
+        if connection:
+            return connection._connection.is_closed()
+        else:
+            return isinstance(
+                e, self.dbapi.InterfaceError
+            ) and "connection is closed" in str(e)
+
+    async def setup_asyncpg_json_codec(self, conn):
+        """set up JSON codec for asyncpg.
+
+        This occurs for all new connections and
+        can be overridden by third party dialects.
+
+        .. versionadded:: 1.4.27
+
+        """
+
+        asyncpg_connection = conn._connection
+        deserializer = self._json_deserializer or _py_json.loads
+
+        def _json_decoder(bin_value):
+            return deserializer(bin_value.decode())
+
+        await asyncpg_connection.set_type_codec(
+            "json",
+            encoder=str.encode,
+            decoder=_json_decoder,
+            schema="pg_catalog",
+            format="binary",
+        )
+
+    async def setup_asyncpg_jsonb_codec(self, conn):
+        """set up JSONB codec for asyncpg.
+
+        This occurs for all new connections and
+        can be overridden by third party dialects.
+
+        .. versionadded:: 1.4.27
+
+        """
+
+        asyncpg_connection = conn._connection
+        deserializer = self._json_deserializer or _py_json.loads
+
+        def _jsonb_encoder(str_value):
+            # \x01 is the prefix for jsonb used by PostgreSQL.
+            # asyncpg requires it when format='binary'
+            return b"\x01" + str_value.encode()
+
+        deserializer = self._json_deserializer or _py_json.loads
+
+        def _jsonb_decoder(bin_value):
+            # the byte is the \x01 prefix for jsonb used by PostgreSQL.
+            # asyncpg returns it when format='binary'
+            return deserializer(bin_value[1:].decode())
+
+        await asyncpg_connection.set_type_codec(
+            "jsonb",
+            encoder=_jsonb_encoder,
+            decoder=_jsonb_decoder,
+            schema="pg_catalog",
+            format="binary",
+        )
+
+    async def _disable_asyncpg_inet_codecs(self, conn):
+        asyncpg_connection = conn._connection
+
+        await asyncpg_connection.set_type_codec(
+            "inet",
+            encoder=lambda s: s,
+            decoder=lambda s: s,
+            schema="pg_catalog",
+            format="text",
+        )
+
+        await asyncpg_connection.set_type_codec(
+            "cidr",
+            encoder=lambda s: s,
+            decoder=lambda s: s,
+            schema="pg_catalog",
+            format="text",
+        )
+
+    def on_connect(self):
+        """on_connect for asyncpg
+
+        A major component of this for asyncpg is to set up type decoders at the
+        asyncpg level.
+
+        See https://github.com/MagicStack/asyncpg/issues/623 for
+        notes on JSON/JSONB implementation.
+
+        """
+
+        super_connect = super().on_connect()
+
+        def connect(conn):
+            conn.await_(self.setup_asyncpg_json_codec(conn))
+            conn.await_(self.setup_asyncpg_jsonb_codec(conn))
+
+            if self._native_inet_types is False:
+                conn.await_(self._disable_asyncpg_inet_codecs(conn))
+            if super_connect is not None:
+                super_connect(conn)
+
+        return connect
+
+    def get_driver_connection(self, connection):
+        return connection._connection
+
+
+dialect = PGDialect_asyncpg
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/base.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/base.py
new file mode 100644
index 00000000..ca2a3fa5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/base.py
@@ -0,0 +1,5041 @@
+# dialects/postgresql/base.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+r"""
+.. dialect:: postgresql
+    :name: PostgreSQL
+    :normal_support: 9.6+
+    :best_effort: 9+
+
+.. _postgresql_sequences:
+
+Sequences/SERIAL/IDENTITY
+-------------------------
+
+PostgreSQL supports sequences, and SQLAlchemy uses these as the default means
+of creating new primary key values for integer-based primary key columns. When
+creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for
+integer-based primary key columns, which generates a sequence and server side
+default corresponding to the column.
+
+To specify a specific named sequence to be used for primary key generation,
+use the :func:`~sqlalchemy.schema.Sequence` construct::
+
+    Table(
+        "sometable",
+        metadata,
+        Column(
+            "id", Integer, Sequence("some_id_seq", start=1), primary_key=True
+        ),
+    )
+
+When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
+having the "last insert identifier" available, a RETURNING clause is added to
+the INSERT statement which specifies the primary key columns should be
+returned after the statement completes. The RETURNING functionality only takes
+place if PostgreSQL 8.2 or later is in use. As a fallback approach, the
+sequence, whether specified explicitly or implicitly via ``SERIAL``, is
+executed independently beforehand, the returned value to be used in the
+subsequent insert. Note that when an
+:func:`~sqlalchemy.sql.expression.insert()` construct is executed using
+"executemany" semantics, the "last inserted identifier" functionality does not
+apply; no RETURNING clause is emitted nor is the sequence pre-executed in this
+case.
+
+
+PostgreSQL 10 and above IDENTITY columns
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+PostgreSQL 10 and above have a new IDENTITY feature that supersedes the use
+of SERIAL. The :class:`_schema.Identity` construct in a
+:class:`_schema.Column` can be used to control its behavior::
+
+    from sqlalchemy import Table, Column, MetaData, Integer, Computed
+
+    metadata = MetaData()
+
+    data = Table(
+        "data",
+        metadata,
+        Column(
+            "id", Integer, Identity(start=42, cycle=True), primary_key=True
+        ),
+        Column("data", String),
+    )
+
+The CREATE TABLE for the above :class:`_schema.Table` object would be:
+
+.. sourcecode:: sql
+
+    CREATE TABLE data (
+        id INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 42 CYCLE),
+        data VARCHAR,
+        PRIMARY KEY (id)
+    )
+
+.. versionchanged::  1.4   Added :class:`_schema.Identity` construct
+   in a :class:`_schema.Column` to specify the option of an autoincrementing
+   column.
+
+.. note::
+
+   Previous versions of SQLAlchemy did not have built-in support for rendering
+   of IDENTITY, and could use the following compilation hook to replace
+   occurrences of SERIAL with IDENTITY::
+
+       from sqlalchemy.schema import CreateColumn
+       from sqlalchemy.ext.compiler import compiles
+
+
+       @compiles(CreateColumn, "postgresql")
+       def use_identity(element, compiler, **kw):
+           text = compiler.visit_create_column(element, **kw)
+           text = text.replace("SERIAL", "INT GENERATED BY DEFAULT AS IDENTITY")
+           return text
+
+   Using the above, a table such as::
+
+       t = Table(
+           "t", m, Column("id", Integer, primary_key=True), Column("data", String)
+       )
+
+   Will generate on the backing database as:
+
+   .. sourcecode:: sql
+
+       CREATE TABLE t (
+           id INT GENERATED BY DEFAULT AS IDENTITY,
+           data VARCHAR,
+           PRIMARY KEY (id)
+       )
+
+.. _postgresql_ss_cursors:
+
+Server Side Cursors
+-------------------
+
+Server-side cursor support is available for the psycopg2, asyncpg
+dialects and may also be available in others.
+
+Server side cursors are enabled on a per-statement basis by using the
+:paramref:`.Connection.execution_options.stream_results` connection execution
+option::
+
+    with engine.connect() as conn:
+        result = conn.execution_options(stream_results=True).execute(
+            text("select * from table")
+        )
+
+Note that some kinds of SQL statements may not be supported with
+server side cursors; generally, only SQL statements that return rows should be
+used with this option.
+
+.. deprecated:: 1.4  The dialect-level server_side_cursors flag is deprecated
+   and will be removed in a future release.  Please use the
+   :paramref:`_engine.Connection.stream_results` execution option for
+   unbuffered cursor support.
+
+.. seealso::
+
+    :ref:`engine_stream_results`
+
+.. _postgresql_isolation_level:
+
+Transaction Isolation Level
+---------------------------
+
+Most SQLAlchemy dialects support setting of transaction isolation level
+using the :paramref:`_sa.create_engine.isolation_level` parameter
+at the :func:`_sa.create_engine` level, and at the :class:`_engine.Connection`
+level via the :paramref:`.Connection.execution_options.isolation_level`
+parameter.
+
+For PostgreSQL dialects, this feature works either by making use of the
+DBAPI-specific features, such as psycopg2's isolation level flags which will
+embed the isolation level setting inline with the ``"BEGIN"`` statement, or for
+DBAPIs with no direct support by emitting ``SET SESSION CHARACTERISTICS AS
+TRANSACTION ISOLATION LEVEL <level>`` ahead of the ``"BEGIN"`` statement
+emitted by the DBAPI.   For the special AUTOCOMMIT isolation level,
+DBAPI-specific techniques are used which is typically an ``.autocommit``
+flag on the DBAPI connection object.
+
+To set isolation level using :func:`_sa.create_engine`::
+
+    engine = create_engine(
+        "postgresql+pg8000://scott:tiger@localhost/test",
+        isolation_level="REPEATABLE READ",
+    )
+
+To set using per-connection execution options::
+
+    with engine.connect() as conn:
+        conn = conn.execution_options(isolation_level="REPEATABLE READ")
+        with conn.begin():
+            ...  # work with transaction
+
+There are also more options for isolation level configurations, such as
+"sub-engine" objects linked to a main :class:`_engine.Engine` which each apply
+different isolation level settings.  See the discussion at
+:ref:`dbapi_autocommit` for background.
+
+Valid values for ``isolation_level`` on most PostgreSQL dialects include:
+
+* ``READ COMMITTED``
+* ``READ UNCOMMITTED``
+* ``REPEATABLE READ``
+* ``SERIALIZABLE``
+* ``AUTOCOMMIT``
+
+.. seealso::
+
+    :ref:`dbapi_autocommit`
+
+    :ref:`postgresql_readonly_deferrable`
+
+    :ref:`psycopg2_isolation_level`
+
+    :ref:`pg8000_isolation_level`
+
+.. _postgresql_readonly_deferrable:
+
+Setting READ ONLY / DEFERRABLE
+------------------------------
+
+Most PostgreSQL dialects support setting the "READ ONLY" and "DEFERRABLE"
+characteristics of the transaction, which is in addition to the isolation level
+setting. These two attributes can be established either in conjunction with or
+independently of the isolation level by passing the ``postgresql_readonly`` and
+``postgresql_deferrable`` flags with
+:meth:`_engine.Connection.execution_options`.  The example below illustrates
+passing the ``"SERIALIZABLE"`` isolation level at the same time as setting
+"READ ONLY" and "DEFERRABLE"::
+
+    with engine.connect() as conn:
+        conn = conn.execution_options(
+            isolation_level="SERIALIZABLE",
+            postgresql_readonly=True,
+            postgresql_deferrable=True,
+        )
+        with conn.begin():
+            ...  # work with transaction
+
+Note that some DBAPIs such as asyncpg only support "readonly" with
+SERIALIZABLE isolation.
+
+.. versionadded:: 1.4 added support for the ``postgresql_readonly``
+   and ``postgresql_deferrable`` execution options.
+
+.. _postgresql_reset_on_return:
+
+Temporary Table / Resource Reset for Connection Pooling
+-------------------------------------------------------
+
+The :class:`.QueuePool` connection pool implementation used
+by the SQLAlchemy :class:`.Engine` object includes
+:ref:`reset on return <pool_reset_on_return>` behavior that will invoke
+the DBAPI ``.rollback()`` method when connections are returned to the pool.
+While this rollback will clear out the immediate state used by the previous
+transaction, it does not cover a wider range of session-level state, including
+temporary tables as well as other server state such as prepared statement
+handles and statement caches.   The PostgreSQL database includes a variety
+of commands which may be used to reset this state, including
+``DISCARD``, ``RESET``, ``DEALLOCATE``, and ``UNLISTEN``.
+
+
+To install
+one or more of these commands as the means of performing reset-on-return,
+the :meth:`.PoolEvents.reset` event hook may be used, as demonstrated
+in the example below. The implementation
+will end transactions in progress as well as discard temporary tables
+using the ``CLOSE``, ``RESET`` and ``DISCARD`` commands; see the PostgreSQL
+documentation for background on what each of these statements do.
+
+The :paramref:`_sa.create_engine.pool_reset_on_return` parameter
+is set to ``None`` so that the custom scheme can replace the default behavior
+completely.   The custom hook implementation calls ``.rollback()`` in any case,
+as it's usually important that the DBAPI's own tracking of commit/rollback
+will remain consistent with the state of the transaction::
+
+
+    from sqlalchemy import create_engine
+    from sqlalchemy import event
+
+    postgresql_engine = create_engine(
+        "postgresql+pyscopg2://scott:tiger@hostname/dbname",
+        # disable default reset-on-return scheme
+        pool_reset_on_return=None,
+    )
+
+
+    @event.listens_for(postgresql_engine, "reset")
+    def _reset_postgresql(dbapi_connection, connection_record, reset_state):
+        if not reset_state.terminate_only:
+            dbapi_connection.execute("CLOSE ALL")
+            dbapi_connection.execute("RESET ALL")
+            dbapi_connection.execute("DISCARD TEMP")
+
+        # so that the DBAPI itself knows that the connection has been
+        # reset
+        dbapi_connection.rollback()
+
+.. versionchanged:: 2.0.0b3  Added additional state arguments to
+   the :meth:`.PoolEvents.reset` event and additionally ensured the event
+   is invoked for all "reset" occurrences, so that it's appropriate
+   as a place for custom "reset" handlers.   Previous schemes which
+   use the :meth:`.PoolEvents.checkin` handler remain usable as well.
+
+.. seealso::
+
+    :ref:`pool_reset_on_return` - in the :ref:`pooling_toplevel` documentation
+
+.. _postgresql_alternate_search_path:
+
+Setting Alternate Search Paths on Connect
+------------------------------------------
+
+The PostgreSQL ``search_path`` variable refers to the list of schema names
+that will be implicitly referenced when a particular table or other
+object is referenced in a SQL statement.  As detailed in the next section
+:ref:`postgresql_schema_reflection`, SQLAlchemy is generally organized around
+the concept of keeping this variable at its default value of ``public``,
+however, in order to have it set to any arbitrary name or names when connections
+are used automatically, the "SET SESSION search_path" command may be invoked
+for all connections in a pool using the following event handler, as discussed
+at :ref:`schema_set_default_connections`::
+
+    from sqlalchemy import event
+    from sqlalchemy import create_engine
+
+    engine = create_engine("postgresql+psycopg2://scott:tiger@host/dbname")
+
+
+    @event.listens_for(engine, "connect", insert=True)
+    def set_search_path(dbapi_connection, connection_record):
+        existing_autocommit = dbapi_connection.autocommit
+        dbapi_connection.autocommit = True
+        cursor = dbapi_connection.cursor()
+        cursor.execute("SET SESSION search_path='%s'" % schema_name)
+        cursor.close()
+        dbapi_connection.autocommit = existing_autocommit
+
+The reason the recipe is complicated by use of the ``.autocommit`` DBAPI
+attribute is so that when the ``SET SESSION search_path`` directive is invoked,
+it is invoked outside of the scope of any transaction and therefore will not
+be reverted when the DBAPI connection has a rollback.
+
+.. seealso::
+
+  :ref:`schema_set_default_connections` - in the :ref:`metadata_toplevel` documentation
+
+.. _postgresql_schema_reflection:
+
+Remote-Schema Table Introspection and PostgreSQL search_path
+------------------------------------------------------------
+
+.. admonition:: Section Best Practices Summarized
+
+    keep the ``search_path`` variable set to its default of ``public``, without
+    any other schema names. Ensure the username used to connect **does not**
+    match remote schemas, or ensure the ``"$user"`` token is **removed** from
+    ``search_path``.  For other schema names, name these explicitly
+    within :class:`_schema.Table` definitions. Alternatively, the
+    ``postgresql_ignore_search_path`` option will cause all reflected
+    :class:`_schema.Table` objects to have a :attr:`_schema.Table.schema`
+    attribute set up.
+
+The PostgreSQL dialect can reflect tables from any schema, as outlined in
+:ref:`metadata_reflection_schemas`.
+
+In all cases, the first thing SQLAlchemy does when reflecting tables is
+to **determine the default schema for the current database connection**.
+It does this using the PostgreSQL ``current_schema()``
+function, illustated below using a PostgreSQL client session (i.e. using
+the ``psql`` tool):
+
+.. sourcecode:: sql
+
+    test=> select current_schema();
+    current_schema
+    ----------------
+    public
+    (1 row)
+
+Above we see that on a plain install of PostgreSQL, the default schema name
+is the name ``public``.
+
+However, if your database username **matches the name of a schema**, PostgreSQL's
+default is to then **use that name as the default schema**.  Below, we log in
+using the username ``scott``.  When we create a schema named ``scott``, **it
+implicitly changes the default schema**:
+
+.. sourcecode:: sql
+
+    test=> select current_schema();
+    current_schema
+    ----------------
+    public
+    (1 row)
+
+    test=> create schema scott;
+    CREATE SCHEMA
+    test=> select current_schema();
+    current_schema
+    ----------------
+    scott
+    (1 row)
+
+The behavior of ``current_schema()`` is derived from the
+`PostgreSQL search path
+<https://www.postgresql.org/docs/current/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_
+variable ``search_path``, which in modern PostgreSQL versions defaults to this:
+
+.. sourcecode:: sql
+
+    test=> show search_path;
+    search_path
+    -----------------
+    "$user", public
+    (1 row)
+
+Where above, the ``"$user"`` variable will inject the current username as the
+default schema, if one exists.   Otherwise, ``public`` is used.
+
+When a :class:`_schema.Table` object is reflected, if it is present in the
+schema indicated by the ``current_schema()`` function, **the schema name assigned
+to the ".schema" attribute of the Table is the Python "None" value**.  Otherwise, the
+".schema" attribute will be assigned the string name of that schema.
+
+With regards to tables which these :class:`_schema.Table`
+objects refer to via foreign key constraint, a decision must be made as to how
+the ``.schema`` is represented in those remote tables, in the case where that
+remote schema name is also a member of the current ``search_path``.
+
+By default, the PostgreSQL dialect mimics the behavior encouraged by
+PostgreSQL's own ``pg_get_constraintdef()`` builtin procedure.  This function
+returns a sample definition for a particular foreign key constraint,
+omitting the referenced schema name from that definition when the name is
+also in the PostgreSQL schema search path.  The interaction below
+illustrates this behavior:
+
+.. sourcecode:: sql
+
+    test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY);
+    CREATE TABLE
+    test=> CREATE TABLE referring(
+    test(>         id INTEGER PRIMARY KEY,
+    test(>         referred_id INTEGER REFERENCES test_schema.referred(id));
+    CREATE TABLE
+    test=> SET search_path TO public, test_schema;
+    test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
+    test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
+    test-> ON n.oid = c.relnamespace
+    test-> JOIN pg_catalog.pg_constraint r  ON c.oid = r.conrelid
+    test-> WHERE c.relname='referring' AND r.contype = 'f'
+    test-> ;
+                   pg_get_constraintdef
+    ---------------------------------------------------
+     FOREIGN KEY (referred_id) REFERENCES referred(id)
+    (1 row)
+
+Above, we created a table ``referred`` as a member of the remote schema
+``test_schema``, however when we added ``test_schema`` to the
+PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the
+``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of
+the function.
+
+On the other hand, if we set the search path back to the typical default
+of ``public``:
+
+.. sourcecode:: sql
+
+    test=> SET search_path TO public;
+    SET
+
+The same query against ``pg_get_constraintdef()`` now returns the fully
+schema-qualified name for us:
+
+.. sourcecode:: sql
+
+    test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
+    test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
+    test-> ON n.oid = c.relnamespace
+    test-> JOIN pg_catalog.pg_constraint r  ON c.oid = r.conrelid
+    test-> WHERE c.relname='referring' AND r.contype = 'f';
+                         pg_get_constraintdef
+    ---------------------------------------------------------------
+     FOREIGN KEY (referred_id) REFERENCES test_schema.referred(id)
+    (1 row)
+
+SQLAlchemy will by default use the return value of ``pg_get_constraintdef()``
+in order to determine the remote schema name.  That is, if our ``search_path``
+were set to include ``test_schema``, and we invoked a table
+reflection process as follows::
+
+    >>> from sqlalchemy import Table, MetaData, create_engine, text
+    >>> engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test")
+    >>> with engine.connect() as conn:
+    ...     conn.execute(text("SET search_path TO test_schema, public"))
+    ...     metadata_obj = MetaData()
+    ...     referring = Table("referring", metadata_obj, autoload_with=conn)
+    <sqlalchemy.engine.result.CursorResult object at 0x101612ed0>
+
+The above process would deliver to the :attr:`_schema.MetaData.tables`
+collection
+``referred`` table named **without** the schema::
+
+    >>> metadata_obj.tables["referred"].schema is None
+    True
+
+To alter the behavior of reflection such that the referred schema is
+maintained regardless of the ``search_path`` setting, use the
+``postgresql_ignore_search_path`` option, which can be specified as a
+dialect-specific argument to both :class:`_schema.Table` as well as
+:meth:`_schema.MetaData.reflect`::
+
+    >>> with engine.connect() as conn:
+    ...     conn.execute(text("SET search_path TO test_schema, public"))
+    ...     metadata_obj = MetaData()
+    ...     referring = Table(
+    ...         "referring",
+    ...         metadata_obj,
+    ...         autoload_with=conn,
+    ...         postgresql_ignore_search_path=True,
+    ...     )
+    <sqlalchemy.engine.result.CursorResult object at 0x1016126d0>
+
+We will now have ``test_schema.referred`` stored as schema-qualified::
+
+    >>> metadata_obj.tables["test_schema.referred"].schema
+    'test_schema'
+
+.. sidebar:: Best Practices for PostgreSQL Schema reflection
+
+    The description of PostgreSQL schema reflection behavior is complex, and
+    is the product of many years of dealing with widely varied use cases and
+    user preferences. But in fact, there's no need to understand any of it if
+    you just stick to the simplest use pattern: leave the ``search_path`` set
+    to its default of ``public`` only, never refer to the name ``public`` as
+    an explicit schema name otherwise, and refer to all other schema names
+    explicitly when building up a :class:`_schema.Table` object.  The options
+    described here are only for those users who can't, or prefer not to, stay
+    within these guidelines.
+
+.. seealso::
+
+    :ref:`reflection_schema_qualified_interaction` - discussion of the issue
+    from a backend-agnostic perspective
+
+    `The Schema Search Path
+    <https://www.postgresql.org/docs/current/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_
+    - on the PostgreSQL website.
+
+INSERT/UPDATE...RETURNING
+-------------------------
+
+The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and
+``DELETE..RETURNING`` syntaxes.   ``INSERT..RETURNING`` is used by default
+for single-row INSERT statements in order to fetch newly generated
+primary key identifiers.   To specify an explicit ``RETURNING`` clause,
+use the :meth:`._UpdateBase.returning` method on a per-statement basis::
+
+    # INSERT..RETURNING
+    result = (
+        table.insert().returning(table.c.col1, table.c.col2).values(name="foo")
+    )
+    print(result.fetchall())
+
+    # UPDATE..RETURNING
+    result = (
+        table.update()
+        .returning(table.c.col1, table.c.col2)
+        .where(table.c.name == "foo")
+        .values(name="bar")
+    )
+    print(result.fetchall())
+
+    # DELETE..RETURNING
+    result = (
+        table.delete()
+        .returning(table.c.col1, table.c.col2)
+        .where(table.c.name == "foo")
+    )
+    print(result.fetchall())
+
+.. _postgresql_insert_on_conflict:
+
+INSERT...ON CONFLICT (Upsert)
+------------------------------
+
+Starting with version 9.5, PostgreSQL allows "upserts" (update or insert) of
+rows into a table via the ``ON CONFLICT`` clause of the ``INSERT`` statement. A
+candidate row will only be inserted if that row does not violate any unique
+constraints.  In the case of a unique constraint violation, a secondary action
+can occur which can be either "DO UPDATE", indicating that the data in the
+target row should be updated, or "DO NOTHING", which indicates to silently skip
+this row.
+
+Conflicts are determined using existing unique constraints and indexes.  These
+constraints may be identified either using their name as stated in DDL,
+or they may be inferred by stating the columns and conditions that comprise
+the indexes.
+
+SQLAlchemy provides ``ON CONFLICT`` support via the PostgreSQL-specific
+:func:`_postgresql.insert()` function, which provides
+the generative methods :meth:`_postgresql.Insert.on_conflict_do_update`
+and :meth:`~.postgresql.Insert.on_conflict_do_nothing`:
+
+.. sourcecode:: pycon+sql
+
+    >>> from sqlalchemy.dialects.postgresql import insert
+    >>> insert_stmt = insert(my_table).values(
+    ...     id="some_existing_id", data="inserted value"
+    ... )
+    >>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["id"])
+    >>> print(do_nothing_stmt)
+    {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
+    ON CONFLICT (id) DO NOTHING
+    {stop}
+
+    >>> do_update_stmt = insert_stmt.on_conflict_do_update(
+    ...     constraint="pk_my_table", set_=dict(data="updated value")
+    ... )
+    >>> print(do_update_stmt)
+    {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
+    ON CONFLICT ON CONSTRAINT pk_my_table DO UPDATE SET data = %(param_1)s
+
+.. seealso::
+
+    `INSERT .. ON CONFLICT
+    <https://www.postgresql.org/docs/current/static/sql-insert.html#SQL-ON-CONFLICT>`_
+    - in the PostgreSQL documentation.
+
+Specifying the Target
+^^^^^^^^^^^^^^^^^^^^^
+
+Both methods supply the "target" of the conflict using either the
+named constraint or by column inference:
+
+* The :paramref:`_postgresql.Insert.on_conflict_do_update.index_elements` argument
+  specifies a sequence containing string column names, :class:`_schema.Column`
+  objects, and/or SQL expression elements, which would identify a unique
+  index:
+
+  .. sourcecode:: pycon+sql
+
+    >>> do_update_stmt = insert_stmt.on_conflict_do_update(
+    ...     index_elements=["id"], set_=dict(data="updated value")
+    ... )
+    >>> print(do_update_stmt)
+    {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
+    ON CONFLICT (id) DO UPDATE SET data = %(param_1)s
+    {stop}
+
+    >>> do_update_stmt = insert_stmt.on_conflict_do_update(
+    ...     index_elements=[my_table.c.id], set_=dict(data="updated value")
+    ... )
+    >>> print(do_update_stmt)
+    {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
+    ON CONFLICT (id) DO UPDATE SET data = %(param_1)s
+
+* When using :paramref:`_postgresql.Insert.on_conflict_do_update.index_elements` to
+  infer an index, a partial index can be inferred by also specifying the
+  use the :paramref:`_postgresql.Insert.on_conflict_do_update.index_where` parameter:
+
+  .. sourcecode:: pycon+sql
+
+    >>> stmt = insert(my_table).values(user_email="a@b.com", data="inserted data")
+    >>> stmt = stmt.on_conflict_do_update(
+    ...     index_elements=[my_table.c.user_email],
+    ...     index_where=my_table.c.user_email.like("%@gmail.com"),
+    ...     set_=dict(data=stmt.excluded.data),
+    ... )
+    >>> print(stmt)
+    {printsql}INSERT INTO my_table (data, user_email)
+    VALUES (%(data)s, %(user_email)s) ON CONFLICT (user_email)
+    WHERE user_email LIKE %(user_email_1)s DO UPDATE SET data = excluded.data
+
+* The :paramref:`_postgresql.Insert.on_conflict_do_update.constraint` argument is
+  used to specify an index directly rather than inferring it.  This can be
+  the name of a UNIQUE constraint, a PRIMARY KEY constraint, or an INDEX:
+
+  .. sourcecode:: pycon+sql
+
+    >>> do_update_stmt = insert_stmt.on_conflict_do_update(
+    ...     constraint="my_table_idx_1", set_=dict(data="updated value")
+    ... )
+    >>> print(do_update_stmt)
+    {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
+    ON CONFLICT ON CONSTRAINT my_table_idx_1 DO UPDATE SET data = %(param_1)s
+    {stop}
+
+    >>> do_update_stmt = insert_stmt.on_conflict_do_update(
+    ...     constraint="my_table_pk", set_=dict(data="updated value")
+    ... )
+    >>> print(do_update_stmt)
+    {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
+    ON CONFLICT ON CONSTRAINT my_table_pk DO UPDATE SET data = %(param_1)s
+    {stop}
+
+* The :paramref:`_postgresql.Insert.on_conflict_do_update.constraint` argument may
+  also refer to a SQLAlchemy construct representing a constraint,
+  e.g. :class:`.UniqueConstraint`, :class:`.PrimaryKeyConstraint`,
+  :class:`.Index`, or :class:`.ExcludeConstraint`.   In this use,
+  if the constraint has a name, it is used directly.  Otherwise, if the
+  constraint is unnamed, then inference will be used, where the expressions
+  and optional WHERE clause of the constraint will be spelled out in the
+  construct.  This use is especially convenient
+  to refer to the named or unnamed primary key of a :class:`_schema.Table`
+  using the
+  :attr:`_schema.Table.primary_key` attribute:
+
+  .. sourcecode:: pycon+sql
+
+    >>> do_update_stmt = insert_stmt.on_conflict_do_update(
+    ...     constraint=my_table.primary_key, set_=dict(data="updated value")
+    ... )
+    >>> print(do_update_stmt)
+    {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
+    ON CONFLICT (id) DO UPDATE SET data = %(param_1)s
+
+The SET Clause
+^^^^^^^^^^^^^^^
+
+``ON CONFLICT...DO UPDATE`` is used to perform an update of the already
+existing row, using any combination of new values as well as values
+from the proposed insertion.   These values are specified using the
+:paramref:`_postgresql.Insert.on_conflict_do_update.set_` parameter.  This
+parameter accepts a dictionary which consists of direct values
+for UPDATE:
+
+.. sourcecode:: pycon+sql
+
+    >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
+    >>> do_update_stmt = stmt.on_conflict_do_update(
+    ...     index_elements=["id"], set_=dict(data="updated value")
+    ... )
+    >>> print(do_update_stmt)
+    {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
+    ON CONFLICT (id) DO UPDATE SET data = %(param_1)s
+
+.. warning::
+
+    The :meth:`_expression.Insert.on_conflict_do_update`
+    method does **not** take into
+    account Python-side default UPDATE values or generation functions, e.g.
+    those specified using :paramref:`_schema.Column.onupdate`.
+    These values will not be exercised for an ON CONFLICT style of UPDATE,
+    unless they are manually specified in the
+    :paramref:`_postgresql.Insert.on_conflict_do_update.set_` dictionary.
+
+Updating using the Excluded INSERT Values
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In order to refer to the proposed insertion row, the special alias
+:attr:`~.postgresql.Insert.excluded` is available as an attribute on
+the :class:`_postgresql.Insert` object; this object is a
+:class:`_expression.ColumnCollection`
+which alias contains all columns of the target
+table:
+
+.. sourcecode:: pycon+sql
+
+    >>> stmt = insert(my_table).values(
+    ...     id="some_id", data="inserted value", author="jlh"
+    ... )
+    >>> do_update_stmt = stmt.on_conflict_do_update(
+    ...     index_elements=["id"],
+    ...     set_=dict(data="updated value", author=stmt.excluded.author),
+    ... )
+    >>> print(do_update_stmt)
+    {printsql}INSERT INTO my_table (id, data, author)
+    VALUES (%(id)s, %(data)s, %(author)s)
+    ON CONFLICT (id) DO UPDATE SET data = %(param_1)s, author = excluded.author
+
+Additional WHERE Criteria
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :meth:`_expression.Insert.on_conflict_do_update` method also accepts
+a WHERE clause using the :paramref:`_postgresql.Insert.on_conflict_do_update.where`
+parameter, which will limit those rows which receive an UPDATE:
+
+.. sourcecode:: pycon+sql
+
+    >>> stmt = insert(my_table).values(
+    ...     id="some_id", data="inserted value", author="jlh"
+    ... )
+    >>> on_update_stmt = stmt.on_conflict_do_update(
+    ...     index_elements=["id"],
+    ...     set_=dict(data="updated value", author=stmt.excluded.author),
+    ...     where=(my_table.c.status == 2),
+    ... )
+    >>> print(on_update_stmt)
+    {printsql}INSERT INTO my_table (id, data, author)
+    VALUES (%(id)s, %(data)s, %(author)s)
+    ON CONFLICT (id) DO UPDATE SET data = %(param_1)s, author = excluded.author
+    WHERE my_table.status = %(status_1)s
+
+Skipping Rows with DO NOTHING
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``ON CONFLICT`` may be used to skip inserting a row entirely
+if any conflict with a unique or exclusion constraint occurs; below
+this is illustrated using the
+:meth:`~.postgresql.Insert.on_conflict_do_nothing` method:
+
+.. sourcecode:: pycon+sql
+
+    >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
+    >>> stmt = stmt.on_conflict_do_nothing(index_elements=["id"])
+    >>> print(stmt)
+    {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
+    ON CONFLICT (id) DO NOTHING
+
+If ``DO NOTHING`` is used without specifying any columns or constraint,
+it has the effect of skipping the INSERT for any unique or exclusion
+constraint violation which occurs:
+
+.. sourcecode:: pycon+sql
+
+    >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
+    >>> stmt = stmt.on_conflict_do_nothing()
+    >>> print(stmt)
+    {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
+    ON CONFLICT DO NOTHING
+
+.. _postgresql_match:
+
+Full Text Search
+----------------
+
+PostgreSQL's full text search system is available through the use of the
+:data:`.func` namespace, combined with the use of custom operators
+via the :meth:`.Operators.bool_op` method.    For simple cases with some
+degree of cross-backend compatibility, the :meth:`.Operators.match` operator
+may also be used.
+
+.. _postgresql_simple_match:
+
+Simple plain text matching with ``match()``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :meth:`.Operators.match` operator provides for cross-compatible simple
+text matching.   For the PostgreSQL backend, it's hardcoded to generate
+an expression using the ``@@`` operator in conjunction with the
+``plainto_tsquery()`` PostgreSQL function.
+
+On the PostgreSQL dialect, an expression like the following::
+
+    select(sometable.c.text.match("search string"))
+
+would emit to the database:
+
+.. sourcecode:: sql
+
+    SELECT text @@ plainto_tsquery('search string') FROM table
+
+Above, passing a plain string to :meth:`.Operators.match` will automatically
+make use of ``plainto_tsquery()`` to specify the type of tsquery.  This
+establishes basic database cross-compatibility for :meth:`.Operators.match`
+with other backends.
+
+.. versionchanged:: 2.0 The default tsquery generation function used by the
+   PostgreSQL dialect with :meth:`.Operators.match` is ``plainto_tsquery()``.
+
+   To render exactly what was rendered in 1.4, use the following form::
+
+        from sqlalchemy import func
+
+        select(sometable.c.text.bool_op("@@")(func.to_tsquery("search string")))
+
+   Which would emit:
+
+   .. sourcecode:: sql
+
+        SELECT text @@ to_tsquery('search string') FROM table
+
+Using PostgreSQL full text functions and operators directly
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Text search operations beyond the simple use of :meth:`.Operators.match`
+may make use of the :data:`.func` namespace to generate PostgreSQL full-text
+functions, in combination with :meth:`.Operators.bool_op` to generate
+any boolean operator.
+
+For example, the query::
+
+    select(func.to_tsquery("cat").bool_op("@>")(func.to_tsquery("cat & rat")))
+
+would generate:
+
+.. sourcecode:: sql
+
+    SELECT to_tsquery('cat') @> to_tsquery('cat & rat')
+
+
+The :class:`_postgresql.TSVECTOR` type can provide for explicit CAST::
+
+    from sqlalchemy.dialects.postgresql import TSVECTOR
+    from sqlalchemy import select, cast
+
+    select(cast("some text", TSVECTOR))
+
+produces a statement equivalent to:
+
+.. sourcecode:: sql
+
+    SELECT CAST('some text' AS TSVECTOR) AS anon_1
+
+The ``func`` namespace is augmented by the PostgreSQL dialect to set up
+correct argument and return types for most full text search functions.
+These functions are used automatically by the :attr:`_sql.func` namespace
+assuming the ``sqlalchemy.dialects.postgresql`` package has been imported,
+or :func:`_sa.create_engine` has been invoked using a ``postgresql``
+dialect.  These functions are documented at:
+
+* :class:`_postgresql.to_tsvector`
+* :class:`_postgresql.to_tsquery`
+* :class:`_postgresql.plainto_tsquery`
+* :class:`_postgresql.phraseto_tsquery`
+* :class:`_postgresql.websearch_to_tsquery`
+* :class:`_postgresql.ts_headline`
+
+Specifying the "regconfig" with ``match()`` or custom operators
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+PostgreSQL's ``plainto_tsquery()`` function accepts an optional
+"regconfig" argument that is used to instruct PostgreSQL to use a
+particular pre-computed GIN or GiST index in order to perform the search.
+When using :meth:`.Operators.match`, this additional parameter may be
+specified using the ``postgresql_regconfig`` parameter, such as::
+
+    select(mytable.c.id).where(
+        mytable.c.title.match("somestring", postgresql_regconfig="english")
+    )
+
+Which would emit:
+
+.. sourcecode:: sql
+
+    SELECT mytable.id FROM mytable
+    WHERE mytable.title @@ plainto_tsquery('english', 'somestring')
+
+When using other PostgreSQL search functions with :data:`.func`, the
+"regconfig" parameter may be passed directly as the initial argument::
+
+    select(mytable.c.id).where(
+        func.to_tsvector("english", mytable.c.title).bool_op("@@")(
+            func.to_tsquery("english", "somestring")
+        )
+    )
+
+produces a statement equivalent to:
+
+.. sourcecode:: sql
+
+    SELECT mytable.id FROM mytable
+    WHERE to_tsvector('english', mytable.title) @@
+        to_tsquery('english', 'somestring')
+
+It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from
+PostgreSQL to ensure that you are generating queries with SQLAlchemy that
+take full advantage of any indexes you may have created for full text search.
+
+.. seealso::
+
+    `Full Text Search <https://www.postgresql.org/docs/current/textsearch-controls.html>`_ - in the PostgreSQL documentation
+
+
+FROM ONLY ...
+-------------
+
+The dialect supports PostgreSQL's ONLY keyword for targeting only a particular
+table in an inheritance hierarchy. This can be used to produce the
+``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...``
+syntaxes. It uses SQLAlchemy's hints mechanism::
+
+    # SELECT ... FROM ONLY ...
+    result = table.select().with_hint(table, "ONLY", "postgresql")
+    print(result.fetchall())
+
+    # UPDATE ONLY ...
+    table.update(values=dict(foo="bar")).with_hint(
+        "ONLY", dialect_name="postgresql"
+    )
+
+    # DELETE FROM ONLY ...
+    table.delete().with_hint("ONLY", dialect_name="postgresql")
+
+.. _postgresql_indexes:
+
+PostgreSQL-Specific Index Options
+---------------------------------
+
+Several extensions to the :class:`.Index` construct are available, specific
+to the PostgreSQL dialect.
+
+Covering Indexes
+^^^^^^^^^^^^^^^^
+
+The ``postgresql_include`` option renders INCLUDE(colname) for the given
+string names::
+
+    Index("my_index", table.c.x, postgresql_include=["y"])
+
+would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
+
+Note that this feature requires PostgreSQL 11 or later.
+
+.. versionadded:: 1.4
+
+.. _postgresql_partial_indexes:
+
+Partial Indexes
+^^^^^^^^^^^^^^^
+
+Partial indexes add criterion to the index definition so that the index is
+applied to a subset of rows.   These can be specified on :class:`.Index`
+using the ``postgresql_where`` keyword argument::
+
+  Index("my_index", my_table.c.id, postgresql_where=my_table.c.value > 10)
+
+.. _postgresql_operator_classes:
+
+Operator Classes
+^^^^^^^^^^^^^^^^
+
+PostgreSQL allows the specification of an *operator class* for each column of
+an index (see
+https://www.postgresql.org/docs/current/interactive/indexes-opclass.html).
+The :class:`.Index` construct allows these to be specified via the
+``postgresql_ops`` keyword argument::
+
+    Index(
+        "my_index",
+        my_table.c.id,
+        my_table.c.data,
+        postgresql_ops={"data": "text_pattern_ops", "id": "int4_ops"},
+    )
+
+Note that the keys in the ``postgresql_ops`` dictionaries are the
+"key" name of the :class:`_schema.Column`, i.e. the name used to access it from
+the ``.c`` collection of :class:`_schema.Table`, which can be configured to be
+different than the actual name of the column as expressed in the database.
+
+If ``postgresql_ops`` is to be used against a complex SQL expression such
+as a function call, then to apply to the column it must be given a label
+that is identified in the dictionary by name, e.g.::
+
+    Index(
+        "my_index",
+        my_table.c.id,
+        func.lower(my_table.c.data).label("data_lower"),
+        postgresql_ops={"data_lower": "text_pattern_ops", "id": "int4_ops"},
+    )
+
+Operator classes are also supported by the
+:class:`_postgresql.ExcludeConstraint` construct using the
+:paramref:`_postgresql.ExcludeConstraint.ops` parameter. See that parameter for
+details.
+
+.. versionadded:: 1.3.21 added support for operator classes with
+   :class:`_postgresql.ExcludeConstraint`.
+
+
+Index Types
+^^^^^^^^^^^
+
+PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well
+as the ability for users to create their own (see
+https://www.postgresql.org/docs/current/static/indexes-types.html). These can be
+specified on :class:`.Index` using the ``postgresql_using`` keyword argument::
+
+    Index("my_index", my_table.c.data, postgresql_using="gin")
+
+The value passed to the keyword argument will be simply passed through to the
+underlying CREATE INDEX command, so it *must* be a valid index type for your
+version of PostgreSQL.
+
+.. _postgresql_index_storage:
+
+Index Storage Parameters
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+PostgreSQL allows storage parameters to be set on indexes. The storage
+parameters available depend on the index method used by the index. Storage
+parameters can be specified on :class:`.Index` using the ``postgresql_with``
+keyword argument::
+
+    Index("my_index", my_table.c.data, postgresql_with={"fillfactor": 50})
+
+PostgreSQL allows to define the tablespace in which to create the index.
+The tablespace can be specified on :class:`.Index` using the
+``postgresql_tablespace`` keyword argument::
+
+    Index("my_index", my_table.c.data, postgresql_tablespace="my_tablespace")
+
+Note that the same option is available on :class:`_schema.Table` as well.
+
+.. _postgresql_index_concurrently:
+
+Indexes with CONCURRENTLY
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The PostgreSQL index option CONCURRENTLY is supported by passing the
+flag ``postgresql_concurrently`` to the :class:`.Index` construct::
+
+    tbl = Table("testtbl", m, Column("data", Integer))
+
+    idx1 = Index("test_idx1", tbl.c.data, postgresql_concurrently=True)
+
+The above index construct will render DDL for CREATE INDEX, assuming
+PostgreSQL 8.2 or higher is detected or for a connection-less dialect, as:
+
+.. sourcecode:: sql
+
+    CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)
+
+For DROP INDEX, assuming PostgreSQL 9.2 or higher is detected or for
+a connection-less dialect, it will emit:
+
+.. sourcecode:: sql
+
+    DROP INDEX CONCURRENTLY test_idx1
+
+When using CONCURRENTLY, the PostgreSQL database requires that the statement
+be invoked outside of a transaction block.   The Python DBAPI enforces that
+even for a single statement, a transaction is present, so to use this
+construct, the DBAPI's "autocommit" mode must be used::
+
+    metadata = MetaData()
+    table = Table("foo", metadata, Column("id", String))
+    index = Index("foo_idx", table.c.id, postgresql_concurrently=True)
+
+    with engine.connect() as conn:
+        with conn.execution_options(isolation_level="AUTOCOMMIT"):
+            table.create(conn)
+
+.. seealso::
+
+    :ref:`postgresql_isolation_level`
+
+.. _postgresql_index_reflection:
+
+PostgreSQL Index Reflection
+---------------------------
+
+The PostgreSQL database creates a UNIQUE INDEX implicitly whenever the
+UNIQUE CONSTRAINT construct is used.   When inspecting a table using
+:class:`_reflection.Inspector`, the :meth:`_reflection.Inspector.get_indexes`
+and the :meth:`_reflection.Inspector.get_unique_constraints`
+will report on these
+two constructs distinctly; in the case of the index, the key
+``duplicates_constraint`` will be present in the index entry if it is
+detected as mirroring a constraint.   When performing reflection using
+``Table(..., autoload_with=engine)``, the UNIQUE INDEX is **not** returned
+in :attr:`_schema.Table.indexes` when it is detected as mirroring a
+:class:`.UniqueConstraint` in the :attr:`_schema.Table.constraints` collection
+.
+
+Special Reflection Options
+--------------------------
+
+The :class:`_reflection.Inspector`
+used for the PostgreSQL backend is an instance
+of :class:`.PGInspector`, which offers additional methods::
+
+    from sqlalchemy import create_engine, inspect
+
+    engine = create_engine("postgresql+psycopg2://localhost/test")
+    insp = inspect(engine)  # will be a PGInspector
+
+    print(insp.get_enums())
+
+.. autoclass:: PGInspector
+    :members:
+
+.. _postgresql_table_options:
+
+PostgreSQL Table Options
+------------------------
+
+Several options for CREATE TABLE are supported directly by the PostgreSQL
+dialect in conjunction with the :class:`_schema.Table` construct:
+
+* ``INHERITS``::
+
+    Table("some_table", metadata, ..., postgresql_inherits="some_supertable")
+
+    Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...))
+
+* ``ON COMMIT``::
+
+    Table("some_table", metadata, ..., postgresql_on_commit="PRESERVE ROWS")
+
+*
+  ``PARTITION BY``::
+
+    Table(
+        "some_table",
+        metadata,
+        ...,
+        postgresql_partition_by="LIST (part_column)",
+    )
+
+  .. versionadded:: 1.2.6
+
+*
+  ``TABLESPACE``::
+
+    Table("some_table", metadata, ..., postgresql_tablespace="some_tablespace")
+
+  The above option is also available on the :class:`.Index` construct.
+
+*
+  ``USING``::
+
+    Table("some_table", metadata, ..., postgresql_using="heap")
+
+  .. versionadded:: 2.0.26
+
+* ``WITH OIDS``::
+
+    Table("some_table", metadata, ..., postgresql_with_oids=True)
+
+* ``WITHOUT OIDS``::
+
+    Table("some_table", metadata, ..., postgresql_with_oids=False)
+
+.. seealso::
+
+    `PostgreSQL CREATE TABLE options
+    <https://www.postgresql.org/docs/current/static/sql-createtable.html>`_ -
+    in the PostgreSQL documentation.
+
+.. _postgresql_constraint_options:
+
+PostgreSQL Constraint Options
+-----------------------------
+
+The following option(s) are supported by the PostgreSQL dialect in conjunction
+with selected constraint constructs:
+
+* ``NOT VALID``:  This option applies towards CHECK and FOREIGN KEY constraints
+  when the constraint is being added to an existing table via ALTER TABLE,
+  and has the effect that existing rows are not scanned during the ALTER
+  operation against the constraint being added.
+
+  When using a SQL migration tool such as `Alembic <https://alembic.sqlalchemy.org>`_
+  that renders ALTER TABLE constructs, the ``postgresql_not_valid`` argument
+  may be specified as an additional keyword argument within the operation
+  that creates the constraint, as in the following Alembic example::
+
+        def update():
+            op.create_foreign_key(
+                "fk_user_address",
+                "address",
+                "user",
+                ["user_id"],
+                ["id"],
+                postgresql_not_valid=True,
+            )
+
+  The keyword is ultimately accepted directly by the
+  :class:`_schema.CheckConstraint`, :class:`_schema.ForeignKeyConstraint`
+  and :class:`_schema.ForeignKey` constructs; when using a tool like
+  Alembic, dialect-specific keyword arguments are passed through to
+  these constructs from the migration operation directives::
+
+       CheckConstraint("some_field IS NOT NULL", postgresql_not_valid=True)
+
+       ForeignKeyConstraint(
+           ["some_id"], ["some_table.some_id"], postgresql_not_valid=True
+       )
+
+  .. versionadded:: 1.4.32
+
+  .. seealso::
+
+      `PostgreSQL ALTER TABLE options
+      <https://www.postgresql.org/docs/current/static/sql-altertable.html>`_ -
+      in the PostgreSQL documentation.
+
+.. _postgresql_table_valued_overview:
+
+Table values, Table and Column valued functions, Row and Tuple objects
+-----------------------------------------------------------------------
+
+PostgreSQL makes great use of modern SQL forms such as table-valued functions,
+tables and rows as values.   These constructs are commonly used as part
+of PostgreSQL's support for complex datatypes such as JSON, ARRAY, and other
+datatypes.  SQLAlchemy's SQL expression language has native support for
+most table-valued and row-valued forms.
+
+.. _postgresql_table_valued:
+
+Table-Valued Functions
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Many PostgreSQL built-in functions are intended to be used in the FROM clause
+of a SELECT statement, and are capable of returning table rows or sets of table
+rows. A large portion of PostgreSQL's JSON functions for example such as
+``json_array_elements()``, ``json_object_keys()``, ``json_each_text()``,
+``json_each()``, ``json_to_record()``, ``json_populate_recordset()`` use such
+forms. These classes of SQL function calling forms in SQLAlchemy are available
+using the :meth:`_functions.FunctionElement.table_valued` method in conjunction
+with :class:`_functions.Function` objects generated from the :data:`_sql.func`
+namespace.
+
+Examples from PostgreSQL's reference documentation follow below:
+
+* ``json_each()``:
+
+  .. sourcecode:: pycon+sql
+
+    >>> from sqlalchemy import select, func
+    >>> stmt = select(
+    ...     func.json_each('{"a":"foo", "b":"bar"}').table_valued("key", "value")
+    ... )
+    >>> print(stmt)
+    {printsql}SELECT anon_1.key, anon_1.value
+    FROM json_each(:json_each_1) AS anon_1
+
+* ``json_populate_record()``:
+
+  .. sourcecode:: pycon+sql
+
+    >>> from sqlalchemy import select, func, literal_column
+    >>> stmt = select(
+    ...     func.json_populate_record(
+    ...         literal_column("null::myrowtype"), '{"a":1,"b":2}'
+    ...     ).table_valued("a", "b", name="x")
+    ... )
+    >>> print(stmt)
+    {printsql}SELECT x.a, x.b
+    FROM json_populate_record(null::myrowtype, :json_populate_record_1) AS x
+
+* ``json_to_record()`` - this form uses a PostgreSQL specific form of derived
+  columns in the alias, where we may make use of :func:`_sql.column` elements with
+  types to produce them.  The :meth:`_functions.FunctionElement.table_valued`
+  method produces  a :class:`_sql.TableValuedAlias` construct, and the method
+  :meth:`_sql.TableValuedAlias.render_derived` method sets up the derived
+  columns specification:
+
+  .. sourcecode:: pycon+sql
+
+    >>> from sqlalchemy import select, func, column, Integer, Text
+    >>> stmt = select(
+    ...     func.json_to_record('{"a":1,"b":[1,2,3],"c":"bar"}')
+    ...     .table_valued(
+    ...         column("a", Integer),
+    ...         column("b", Text),
+    ...         column("d", Text),
+    ...     )
+    ...     .render_derived(name="x", with_types=True)
+    ... )
+    >>> print(stmt)
+    {printsql}SELECT x.a, x.b, x.d
+    FROM json_to_record(:json_to_record_1) AS x(a INTEGER, b TEXT, d TEXT)
+
+* ``WITH ORDINALITY`` - part of the SQL standard, ``WITH ORDINALITY`` adds an
+  ordinal counter to the output of a function and is accepted by a limited set
+  of PostgreSQL functions including ``unnest()`` and ``generate_series()``. The
+  :meth:`_functions.FunctionElement.table_valued` method accepts a keyword
+  parameter ``with_ordinality`` for this purpose, which accepts the string name
+  that will be applied to the "ordinality" column:
+
+  .. sourcecode:: pycon+sql
+
+    >>> from sqlalchemy import select, func
+    >>> stmt = select(
+    ...     func.generate_series(4, 1, -1)
+    ...     .table_valued("value", with_ordinality="ordinality")
+    ...     .render_derived()
+    ... )
+    >>> print(stmt)
+    {printsql}SELECT anon_1.value, anon_1.ordinality
+    FROM generate_series(:generate_series_1, :generate_series_2, :generate_series_3)
+    WITH ORDINALITY AS anon_1(value, ordinality)
+
+.. versionadded:: 1.4.0b2
+
+.. seealso::
+
+    :ref:`tutorial_functions_table_valued` - in the :ref:`unified_tutorial`
+
+.. _postgresql_column_valued:
+
+Column Valued Functions
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Similar to the table valued function, a column valued function is present
+in the FROM clause, but delivers itself to the columns clause as a single
+scalar value.  PostgreSQL functions such as ``json_array_elements()``,
+``unnest()`` and ``generate_series()`` may use this form. Column valued functions are available using the
+:meth:`_functions.FunctionElement.column_valued` method of :class:`_functions.FunctionElement`:
+
+* ``json_array_elements()``:
+
+  .. sourcecode:: pycon+sql
+
+    >>> from sqlalchemy import select, func
+    >>> stmt = select(
+    ...     func.json_array_elements('["one", "two"]').column_valued("x")
+    ... )
+    >>> print(stmt)
+    {printsql}SELECT x
+    FROM json_array_elements(:json_array_elements_1) AS x
+
+* ``unnest()`` - in order to generate a PostgreSQL ARRAY literal, the
+  :func:`_postgresql.array` construct may be used:
+
+  .. sourcecode:: pycon+sql
+
+    >>> from sqlalchemy.dialects.postgresql import array
+    >>> from sqlalchemy import select, func
+    >>> stmt = select(func.unnest(array([1, 2])).column_valued())
+    >>> print(stmt)
+    {printsql}SELECT anon_1
+    FROM unnest(ARRAY[%(param_1)s, %(param_2)s]) AS anon_1
+
+  The function can of course be used against an existing table-bound column
+  that's of type :class:`_types.ARRAY`:
+
+  .. sourcecode:: pycon+sql
+
+    >>> from sqlalchemy import table, column, ARRAY, Integer
+    >>> from sqlalchemy import select, func
+    >>> t = table("t", column("value", ARRAY(Integer)))
+    >>> stmt = select(func.unnest(t.c.value).column_valued("unnested_value"))
+    >>> print(stmt)
+    {printsql}SELECT unnested_value
+    FROM unnest(t.value) AS unnested_value
+
+.. seealso::
+
+    :ref:`tutorial_functions_column_valued` - in the :ref:`unified_tutorial`
+
+
+Row Types
+^^^^^^^^^
+
+Built-in support for rendering a ``ROW`` may be approximated using
+``func.ROW`` with the :attr:`_sa.func` namespace, or by using the
+:func:`_sql.tuple_` construct:
+
+.. sourcecode:: pycon+sql
+
+    >>> from sqlalchemy import table, column, func, tuple_
+    >>> t = table("t", column("id"), column("fk"))
+    >>> stmt = (
+    ...     t.select()
+    ...     .where(tuple_(t.c.id, t.c.fk) > (1, 2))
+    ...     .where(func.ROW(t.c.id, t.c.fk) < func.ROW(3, 7))
+    ... )
+    >>> print(stmt)
+    {printsql}SELECT t.id, t.fk
+    FROM t
+    WHERE (t.id, t.fk) > (:param_1, :param_2) AND ROW(t.id, t.fk) < ROW(:ROW_1, :ROW_2)
+
+.. seealso::
+
+    `PostgreSQL Row Constructors
+    <https://www.postgresql.org/docs/current/sql-expressions.html#SQL-SYNTAX-ROW-CONSTRUCTORS>`_
+
+    `PostgreSQL Row Constructor Comparison
+    <https://www.postgresql.org/docs/current/functions-comparisons.html#ROW-WISE-COMPARISON>`_
+
+Table Types passed to Functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+PostgreSQL supports passing a table as an argument to a function, which is
+known as a "record" type. SQLAlchemy :class:`_sql.FromClause` objects
+such as :class:`_schema.Table` support this special form using the
+:meth:`_sql.FromClause.table_valued` method, which is comparable to the
+:meth:`_functions.FunctionElement.table_valued` method except that the collection
+of columns is already established by that of the :class:`_sql.FromClause`
+itself:
+
+.. sourcecode:: pycon+sql
+
+    >>> from sqlalchemy import table, column, func, select
+    >>> a = table("a", column("id"), column("x"), column("y"))
+    >>> stmt = select(func.row_to_json(a.table_valued()))
+    >>> print(stmt)
+    {printsql}SELECT row_to_json(a) AS row_to_json_1
+    FROM a
+
+.. versionadded:: 1.4.0b2
+
+
+
+"""  # noqa: E501
+
+from __future__ import annotations
+
+from collections import defaultdict
+from functools import lru_cache
+import re
+from typing import Any
+from typing import cast
+from typing import List
+from typing import Optional
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from . import arraylib as _array
+from . import json as _json
+from . import pg_catalog
+from . import ranges as _ranges
+from .ext import _regconfig_fn
+from .ext import aggregate_order_by
+from .hstore import HSTORE
+from .named_types import CreateDomainType as CreateDomainType  # noqa: F401
+from .named_types import CreateEnumType as CreateEnumType  # noqa: F401
+from .named_types import DOMAIN as DOMAIN  # noqa: F401
+from .named_types import DropDomainType as DropDomainType  # noqa: F401
+from .named_types import DropEnumType as DropEnumType  # noqa: F401
+from .named_types import ENUM as ENUM  # noqa: F401
+from .named_types import NamedType as NamedType  # noqa: F401
+from .types import _DECIMAL_TYPES  # noqa: F401
+from .types import _FLOAT_TYPES  # noqa: F401
+from .types import _INT_TYPES  # noqa: F401
+from .types import BIT as BIT
+from .types import BYTEA as BYTEA
+from .types import CIDR as CIDR
+from .types import CITEXT as CITEXT
+from .types import INET as INET
+from .types import INTERVAL as INTERVAL
+from .types import MACADDR as MACADDR
+from .types import MACADDR8 as MACADDR8
+from .types import MONEY as MONEY
+from .types import OID as OID
+from .types import PGBit as PGBit  # noqa: F401
+from .types import PGCidr as PGCidr  # noqa: F401
+from .types import PGInet as PGInet  # noqa: F401
+from .types import PGInterval as PGInterval  # noqa: F401
+from .types import PGMacAddr as PGMacAddr  # noqa: F401
+from .types import PGMacAddr8 as PGMacAddr8  # noqa: F401
+from .types import PGUuid as PGUuid
+from .types import REGCLASS as REGCLASS
+from .types import REGCONFIG as REGCONFIG  # noqa: F401
+from .types import TIME as TIME
+from .types import TIMESTAMP as TIMESTAMP
+from .types import TSVECTOR as TSVECTOR
+from ... import exc
+from ... import schema
+from ... import select
+from ... import sql
+from ... import util
+from ...engine import characteristics
+from ...engine import default
+from ...engine import interfaces
+from ...engine import ObjectKind
+from ...engine import ObjectScope
+from ...engine import reflection
+from ...engine import URL
+from ...engine.reflection import ReflectionDefaults
+from ...sql import bindparam
+from ...sql import coercions
+from ...sql import compiler
+from ...sql import elements
+from ...sql import expression
+from ...sql import roles
+from ...sql import sqltypes
+from ...sql import util as sql_util
+from ...sql.compiler import InsertmanyvaluesSentinelOpts
+from ...sql.visitors import InternalTraversal
+from ...types import BIGINT
+from ...types import BOOLEAN
+from ...types import CHAR
+from ...types import DATE
+from ...types import DOUBLE_PRECISION
+from ...types import FLOAT
+from ...types import INTEGER
+from ...types import NUMERIC
+from ...types import REAL
+from ...types import SMALLINT
+from ...types import TEXT
+from ...types import UUID as UUID
+from ...types import VARCHAR
+from ...util.typing import TypedDict
+
+IDX_USING = re.compile(r"^(?:btree|hash|gist|gin|[\w_]+)$", re.I)
+
+RESERVED_WORDS = {
+    "all",
+    "analyse",
+    "analyze",
+    "and",
+    "any",
+    "array",
+    "as",
+    "asc",
+    "asymmetric",
+    "both",
+    "case",
+    "cast",
+    "check",
+    "collate",
+    "column",
+    "constraint",
+    "create",
+    "current_catalog",
+    "current_date",
+    "current_role",
+    "current_time",
+    "current_timestamp",
+    "current_user",
+    "default",
+    "deferrable",
+    "desc",
+    "distinct",
+    "do",
+    "else",
+    "end",
+    "except",
+    "false",
+    "fetch",
+    "for",
+    "foreign",
+    "from",
+    "grant",
+    "group",
+    "having",
+    "in",
+    "initially",
+    "intersect",
+    "into",
+    "leading",
+    "limit",
+    "localtime",
+    "localtimestamp",
+    "new",
+    "not",
+    "null",
+    "of",
+    "off",
+    "offset",
+    "old",
+    "on",
+    "only",
+    "or",
+    "order",
+    "placing",
+    "primary",
+    "references",
+    "returning",
+    "select",
+    "session_user",
+    "some",
+    "symmetric",
+    "table",
+    "then",
+    "to",
+    "trailing",
+    "true",
+    "union",
+    "unique",
+    "user",
+    "using",
+    "variadic",
+    "when",
+    "where",
+    "window",
+    "with",
+    "authorization",
+    "between",
+    "binary",
+    "cross",
+    "current_schema",
+    "freeze",
+    "full",
+    "ilike",
+    "inner",
+    "is",
+    "isnull",
+    "join",
+    "left",
+    "like",
+    "natural",
+    "notnull",
+    "outer",
+    "over",
+    "overlaps",
+    "right",
+    "similar",
+    "verbose",
+}
+
+colspecs = {
+    sqltypes.ARRAY: _array.ARRAY,
+    sqltypes.Interval: INTERVAL,
+    sqltypes.Enum: ENUM,
+    sqltypes.JSON.JSONPathType: _json.JSONPATH,
+    sqltypes.JSON: _json.JSON,
+    sqltypes.Uuid: PGUuid,
+}
+
+
+ischema_names = {
+    "_array": _array.ARRAY,
+    "hstore": HSTORE,
+    "json": _json.JSON,
+    "jsonb": _json.JSONB,
+    "int4range": _ranges.INT4RANGE,
+    "int8range": _ranges.INT8RANGE,
+    "numrange": _ranges.NUMRANGE,
+    "daterange": _ranges.DATERANGE,
+    "tsrange": _ranges.TSRANGE,
+    "tstzrange": _ranges.TSTZRANGE,
+    "int4multirange": _ranges.INT4MULTIRANGE,
+    "int8multirange": _ranges.INT8MULTIRANGE,
+    "nummultirange": _ranges.NUMMULTIRANGE,
+    "datemultirange": _ranges.DATEMULTIRANGE,
+    "tsmultirange": _ranges.TSMULTIRANGE,
+    "tstzmultirange": _ranges.TSTZMULTIRANGE,
+    "integer": INTEGER,
+    "bigint": BIGINT,
+    "smallint": SMALLINT,
+    "character varying": VARCHAR,
+    "character": CHAR,
+    '"char"': sqltypes.String,
+    "name": sqltypes.String,
+    "text": TEXT,
+    "numeric": NUMERIC,
+    "float": FLOAT,
+    "real": REAL,
+    "inet": INET,
+    "cidr": CIDR,
+    "citext": CITEXT,
+    "uuid": UUID,
+    "bit": BIT,
+    "bit varying": BIT,
+    "macaddr": MACADDR,
+    "macaddr8": MACADDR8,
+    "money": MONEY,
+    "oid": OID,
+    "regclass": REGCLASS,
+    "double precision": DOUBLE_PRECISION,
+    "timestamp": TIMESTAMP,
+    "timestamp with time zone": TIMESTAMP,
+    "timestamp without time zone": TIMESTAMP,
+    "time with time zone": TIME,
+    "time without time zone": TIME,
+    "date": DATE,
+    "time": TIME,
+    "bytea": BYTEA,
+    "boolean": BOOLEAN,
+    "interval": INTERVAL,
+    "tsvector": TSVECTOR,
+}
+
+
+class PGCompiler(compiler.SQLCompiler):
+    def visit_to_tsvector_func(self, element, **kw):
+        return self._assert_pg_ts_ext(element, **kw)
+
+    def visit_to_tsquery_func(self, element, **kw):
+        return self._assert_pg_ts_ext(element, **kw)
+
+    def visit_plainto_tsquery_func(self, element, **kw):
+        return self._assert_pg_ts_ext(element, **kw)
+
+    def visit_phraseto_tsquery_func(self, element, **kw):
+        return self._assert_pg_ts_ext(element, **kw)
+
+    def visit_websearch_to_tsquery_func(self, element, **kw):
+        return self._assert_pg_ts_ext(element, **kw)
+
+    def visit_ts_headline_func(self, element, **kw):
+        return self._assert_pg_ts_ext(element, **kw)
+
+    def _assert_pg_ts_ext(self, element, **kw):
+        if not isinstance(element, _regconfig_fn):
+            # other options here include trying to rewrite the function
+            # with the correct types.  however, that means we have to
+            # "un-SQL-ize" the first argument, which can't work in a
+            # generalized way. Also, parent compiler class has already added
+            # the incorrect return type to the result map.   So let's just
+            # make sure the function we want is used up front.
+
+            raise exc.CompileError(
+                f'Can\'t compile "{element.name}()" full text search '
+                f"function construct that does not originate from the "
+                f'"sqlalchemy.dialects.postgresql" package.  '
+                f'Please ensure "import sqlalchemy.dialects.postgresql" is '
+                f"called before constructing "
+                f'"sqlalchemy.func.{element.name}()" to ensure registration '
+                f"of the correct argument and return types."
+            )
+
+        return f"{element.name}{self.function_argspec(element, **kw)}"
+
+    def render_bind_cast(self, type_, dbapi_type, sqltext):
+        if dbapi_type._type_affinity is sqltypes.String and dbapi_type.length:
+            # use VARCHAR with no length for VARCHAR cast.
+            # see #9511
+            dbapi_type = sqltypes.STRINGTYPE
+        return f"""{sqltext}::{
+            self.dialect.type_compiler_instance.process(
+                dbapi_type, identifier_preparer=self.preparer
+            )
+        }"""
+
+    def visit_array(self, element, **kw):
+        return "ARRAY[%s]" % self.visit_clauselist(element, **kw)
+
+    def visit_slice(self, element, **kw):
+        return "%s:%s" % (
+            self.process(element.start, **kw),
+            self.process(element.stop, **kw),
+        )
+
+    def visit_bitwise_xor_op_binary(self, binary, operator, **kw):
+        return self._generate_generic_binary(binary, " # ", **kw)
+
+    def visit_json_getitem_op_binary(
+        self, binary, operator, _cast_applied=False, **kw
+    ):
+        if (
+            not _cast_applied
+            and binary.type._type_affinity is not sqltypes.JSON
+        ):
+            kw["_cast_applied"] = True
+            return self.process(sql.cast(binary, binary.type), **kw)
+
+        kw["eager_grouping"] = True
+
+        return self._generate_generic_binary(
+            binary, " -> " if not _cast_applied else " ->> ", **kw
+        )
+
+    def visit_json_path_getitem_op_binary(
+        self, binary, operator, _cast_applied=False, **kw
+    ):
+        if (
+            not _cast_applied
+            and binary.type._type_affinity is not sqltypes.JSON
+        ):
+            kw["_cast_applied"] = True
+            return self.process(sql.cast(binary, binary.type), **kw)
+
+        kw["eager_grouping"] = True
+        return self._generate_generic_binary(
+            binary, " #> " if not _cast_applied else " #>> ", **kw
+        )
+
+    def visit_getitem_binary(self, binary, operator, **kw):
+        return "%s[%s]" % (
+            self.process(binary.left, **kw),
+            self.process(binary.right, **kw),
+        )
+
+    def visit_aggregate_order_by(self, element, **kw):
+        return "%s ORDER BY %s" % (
+            self.process(element.target, **kw),
+            self.process(element.order_by, **kw),
+        )
+
+    def visit_match_op_binary(self, binary, operator, **kw):
+        if "postgresql_regconfig" in binary.modifiers:
+            regconfig = self.render_literal_value(
+                binary.modifiers["postgresql_regconfig"], sqltypes.STRINGTYPE
+            )
+            if regconfig:
+                return "%s @@ plainto_tsquery(%s, %s)" % (
+                    self.process(binary.left, **kw),
+                    regconfig,
+                    self.process(binary.right, **kw),
+                )
+        return "%s @@ plainto_tsquery(%s)" % (
+            self.process(binary.left, **kw),
+            self.process(binary.right, **kw),
+        )
+
+    def visit_ilike_case_insensitive_operand(self, element, **kw):
+        return element.element._compiler_dispatch(self, **kw)
+
+    def visit_ilike_op_binary(self, binary, operator, **kw):
+        escape = binary.modifiers.get("escape", None)
+
+        return "%s ILIKE %s" % (
+            self.process(binary.left, **kw),
+            self.process(binary.right, **kw),
+        ) + (
+            " ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
+            if escape is not None
+            else ""
+        )
+
+    def visit_not_ilike_op_binary(self, binary, operator, **kw):
+        escape = binary.modifiers.get("escape", None)
+        return "%s NOT ILIKE %s" % (
+            self.process(binary.left, **kw),
+            self.process(binary.right, **kw),
+        ) + (
+            " ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
+            if escape is not None
+            else ""
+        )
+
+    def _regexp_match(self, base_op, binary, operator, kw):
+        flags = binary.modifiers["flags"]
+        if flags is None:
+            return self._generate_generic_binary(
+                binary, " %s " % base_op, **kw
+            )
+        if flags == "i":
+            return self._generate_generic_binary(
+                binary, " %s* " % base_op, **kw
+            )
+        return "%s %s CONCAT('(?', %s, ')', %s)" % (
+            self.process(binary.left, **kw),
+            base_op,
+            self.render_literal_value(flags, sqltypes.STRINGTYPE),
+            self.process(binary.right, **kw),
+        )
+
+    def visit_regexp_match_op_binary(self, binary, operator, **kw):
+        return self._regexp_match("~", binary, operator, kw)
+
+    def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
+        return self._regexp_match("!~", binary, operator, kw)
+
+    def visit_regexp_replace_op_binary(self, binary, operator, **kw):
+        string = self.process(binary.left, **kw)
+        pattern_replace = self.process(binary.right, **kw)
+        flags = binary.modifiers["flags"]
+        if flags is None:
+            return "REGEXP_REPLACE(%s, %s)" % (
+                string,
+                pattern_replace,
+            )
+        else:
+            return "REGEXP_REPLACE(%s, %s, %s)" % (
+                string,
+                pattern_replace,
+                self.render_literal_value(flags, sqltypes.STRINGTYPE),
+            )
+
+    def visit_empty_set_expr(self, element_types, **kw):
+        # cast the empty set to the type we are comparing against.  if
+        # we are comparing against the null type, pick an arbitrary
+        # datatype for the empty set
+        return "SELECT %s WHERE 1!=1" % (
+            ", ".join(
+                "CAST(NULL AS %s)"
+                % self.dialect.type_compiler_instance.process(
+                    INTEGER() if type_._isnull else type_
+                )
+                for type_ in element_types or [INTEGER()]
+            ),
+        )
+
+    def render_literal_value(self, value, type_):
+        value = super().render_literal_value(value, type_)
+
+        if self.dialect._backslash_escapes:
+            value = value.replace("\\", "\\\\")
+        return value
+
+    def visit_aggregate_strings_func(self, fn, **kw):
+        return "string_agg%s" % self.function_argspec(fn)
+
+    def visit_sequence(self, seq, **kw):
+        return "nextval('%s')" % self.preparer.format_sequence(seq)
+
+    def limit_clause(self, select, **kw):
+        text = ""
+        if select._limit_clause is not None:
+            text += " \n LIMIT " + self.process(select._limit_clause, **kw)
+        if select._offset_clause is not None:
+            if select._limit_clause is None:
+                text += "\n LIMIT ALL"
+            text += " OFFSET " + self.process(select._offset_clause, **kw)
+        return text
+
+    def format_from_hint_text(self, sqltext, table, hint, iscrud):
+        if hint.upper() != "ONLY":
+            raise exc.CompileError("Unrecognized hint: %r" % hint)
+        return "ONLY " + sqltext
+
+    def get_select_precolumns(self, select, **kw):
+        # Do not call super().get_select_precolumns because
+        # it will warn/raise when distinct on is present
+        if select._distinct or select._distinct_on:
+            if select._distinct_on:
+                return (
+                    "DISTINCT ON ("
+                    + ", ".join(
+                        [
+                            self.process(col, **kw)
+                            for col in select._distinct_on
+                        ]
+                    )
+                    + ") "
+                )
+            else:
+                return "DISTINCT "
+        else:
+            return ""
+
+    def for_update_clause(self, select, **kw):
+        if select._for_update_arg.read:
+            if select._for_update_arg.key_share:
+                tmp = " FOR KEY SHARE"
+            else:
+                tmp = " FOR SHARE"
+        elif select._for_update_arg.key_share:
+            tmp = " FOR NO KEY UPDATE"
+        else:
+            tmp = " FOR UPDATE"
+
+        if select._for_update_arg.of:
+            tables = util.OrderedSet()
+            for c in select._for_update_arg.of:
+                tables.update(sql_util.surface_selectables_only(c))
+
+            of_kw = dict(kw)
+            of_kw.update(ashint=True, use_schema=False)
+            tmp += " OF " + ", ".join(
+                self.process(table, **of_kw) for table in tables
+            )
+
+        if select._for_update_arg.nowait:
+            tmp += " NOWAIT"
+        if select._for_update_arg.skip_locked:
+            tmp += " SKIP LOCKED"
+
+        return tmp
+
+    def visit_substring_func(self, func, **kw):
+        s = self.process(func.clauses.clauses[0], **kw)
+        start = self.process(func.clauses.clauses[1], **kw)
+        if len(func.clauses.clauses) > 2:
+            length = self.process(func.clauses.clauses[2], **kw)
+            return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
+        else:
+            return "SUBSTRING(%s FROM %s)" % (s, start)
+
+    def _on_conflict_target(self, clause, **kw):
+        if clause.constraint_target is not None:
+            # target may be a name of an Index, UniqueConstraint or
+            # ExcludeConstraint.  While there is a separate
+            # "max_identifier_length" for indexes, PostgreSQL uses the same
+            # length for all objects so we can use
+            # truncate_and_render_constraint_name
+            target_text = (
+                "ON CONSTRAINT %s"
+                % self.preparer.truncate_and_render_constraint_name(
+                    clause.constraint_target
+                )
+            )
+        elif clause.inferred_target_elements is not None:
+            target_text = "(%s)" % ", ".join(
+                (
+                    self.preparer.quote(c)
+                    if isinstance(c, str)
+                    else self.process(c, include_table=False, use_schema=False)
+                )
+                for c in clause.inferred_target_elements
+            )
+            if clause.inferred_target_whereclause is not None:
+                target_text += " WHERE %s" % self.process(
+                    clause.inferred_target_whereclause,
+                    include_table=False,
+                    use_schema=False,
+                )
+        else:
+            target_text = ""
+
+        return target_text
+
+    def visit_on_conflict_do_nothing(self, on_conflict, **kw):
+        target_text = self._on_conflict_target(on_conflict, **kw)
+
+        if target_text:
+            return "ON CONFLICT %s DO NOTHING" % target_text
+        else:
+            return "ON CONFLICT DO NOTHING"
+
+    def visit_on_conflict_do_update(self, on_conflict, **kw):
+        clause = on_conflict
+
+        target_text = self._on_conflict_target(on_conflict, **kw)
+
+        action_set_ops = []
+
+        set_parameters = dict(clause.update_values_to_set)
+        # create a list of column assignment clauses as tuples
+
+        insert_statement = self.stack[-1]["selectable"]
+        cols = insert_statement.table.c
+        for c in cols:
+            col_key = c.key
+
+            if col_key in set_parameters:
+                value = set_parameters.pop(col_key)
+            elif c in set_parameters:
+                value = set_parameters.pop(c)
+            else:
+                continue
+
+            # TODO: this coercion should be up front.  we can't cache
+            # SQL constructs with non-bound literals buried in them
+            if coercions._is_literal(value):
+                value = elements.BindParameter(None, value, type_=c.type)
+
+            else:
+                if (
+                    isinstance(value, elements.BindParameter)
+                    and value.type._isnull
+                ):
+                    value = value._clone()
+                    value.type = c.type
+            value_text = self.process(value.self_group(), use_schema=False)
+
+            key_text = self.preparer.quote(c.name)
+            action_set_ops.append("%s = %s" % (key_text, value_text))
+
+        # check for names that don't match columns
+        if set_parameters:
+            util.warn(
+                "Additional column names not matching "
+                "any column keys in table '%s': %s"
+                % (
+                    self.current_executable.table.name,
+                    (", ".join("'%s'" % c for c in set_parameters)),
+                )
+            )
+            for k, v in set_parameters.items():
+                key_text = (
+                    self.preparer.quote(k)
+                    if isinstance(k, str)
+                    else self.process(k, use_schema=False)
+                )
+                value_text = self.process(
+                    coercions.expect(roles.ExpressionElementRole, v),
+                    use_schema=False,
+                )
+                action_set_ops.append("%s = %s" % (key_text, value_text))
+
+        action_text = ", ".join(action_set_ops)
+        if clause.update_whereclause is not None:
+            action_text += " WHERE %s" % self.process(
+                clause.update_whereclause, include_table=True, use_schema=False
+            )
+
+        return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
+
+    def update_from_clause(
+        self, update_stmt, from_table, extra_froms, from_hints, **kw
+    ):
+        kw["asfrom"] = True
+        return "FROM " + ", ".join(
+            t._compiler_dispatch(self, fromhints=from_hints, **kw)
+            for t in extra_froms
+        )
+
+    def delete_extra_from_clause(
+        self, delete_stmt, from_table, extra_froms, from_hints, **kw
+    ):
+        """Render the DELETE .. USING clause specific to PostgreSQL."""
+        kw["asfrom"] = True
+        return "USING " + ", ".join(
+            t._compiler_dispatch(self, fromhints=from_hints, **kw)
+            for t in extra_froms
+        )
+
+    def fetch_clause(self, select, **kw):
+        # pg requires parens for non literal clauses. It's also required for
+        # bind parameters if a ::type casts is used by the driver (asyncpg),
+        # so it's easiest to just always add it
+        text = ""
+        if select._offset_clause is not None:
+            text += "\n OFFSET (%s) ROWS" % self.process(
+                select._offset_clause, **kw
+            )
+        if select._fetch_clause is not None:
+            text += "\n FETCH FIRST (%s)%s ROWS %s" % (
+                self.process(select._fetch_clause, **kw),
+                " PERCENT" if select._fetch_clause_options["percent"] else "",
+                (
+                    "WITH TIES"
+                    if select._fetch_clause_options["with_ties"]
+                    else "ONLY"
+                ),
+            )
+        return text
+
+
+class PGDDLCompiler(compiler.DDLCompiler):
+    def get_column_specification(self, column, **kwargs):
+        colspec = self.preparer.format_column(column)
+        impl_type = column.type.dialect_impl(self.dialect)
+        if isinstance(impl_type, sqltypes.TypeDecorator):
+            impl_type = impl_type.impl
+
+        has_identity = (
+            column.identity is not None
+            and self.dialect.supports_identity_columns
+        )
+
+        if (
+            column.primary_key
+            and column is column.table._autoincrement_column
+            and (
+                self.dialect.supports_smallserial
+                or not isinstance(impl_type, sqltypes.SmallInteger)
+            )
+            and not has_identity
+            and (
+                column.default is None
+                or (
+                    isinstance(column.default, schema.Sequence)
+                    and column.default.optional
+                )
+            )
+        ):
+            if isinstance(impl_type, sqltypes.BigInteger):
+                colspec += " BIGSERIAL"
+            elif isinstance(impl_type, sqltypes.SmallInteger):
+                colspec += " SMALLSERIAL"
+            else:
+                colspec += " SERIAL"
+        else:
+            colspec += " " + self.dialect.type_compiler_instance.process(
+                column.type,
+                type_expression=column,
+                identifier_preparer=self.preparer,
+            )
+            default = self.get_column_default_string(column)
+            if default is not None:
+                colspec += " DEFAULT " + default
+
+        if column.computed is not None:
+            colspec += " " + self.process(column.computed)
+        if has_identity:
+            colspec += " " + self.process(column.identity)
+
+        if not column.nullable and not has_identity:
+            colspec += " NOT NULL"
+        elif column.nullable and has_identity:
+            colspec += " NULL"
+        return colspec
+
+    def _define_constraint_validity(self, constraint):
+        not_valid = constraint.dialect_options["postgresql"]["not_valid"]
+        return " NOT VALID" if not_valid else ""
+
+    def visit_check_constraint(self, constraint, **kw):
+        if constraint._type_bound:
+            typ = list(constraint.columns)[0].type
+            if (
+                isinstance(typ, sqltypes.ARRAY)
+                and isinstance(typ.item_type, sqltypes.Enum)
+                and not typ.item_type.native_enum
+            ):
+                raise exc.CompileError(
+                    "PostgreSQL dialect cannot produce the CHECK constraint "
+                    "for ARRAY of non-native ENUM; please specify "
+                    "create_constraint=False on this Enum datatype."
+                )
+
+        text = super().visit_check_constraint(constraint)
+        text += self._define_constraint_validity(constraint)
+        return text
+
+    def visit_foreign_key_constraint(self, constraint, **kw):
+        text = super().visit_foreign_key_constraint(constraint)
+        text += self._define_constraint_validity(constraint)
+        return text
+
+    def visit_create_enum_type(self, create, **kw):
+        type_ = create.element
+
+        return "CREATE TYPE %s AS ENUM (%s)" % (
+            self.preparer.format_type(type_),
+            ", ".join(
+                self.sql_compiler.process(sql.literal(e), literal_binds=True)
+                for e in type_.enums
+            ),
+        )
+
+    def visit_drop_enum_type(self, drop, **kw):
+        type_ = drop.element
+
+        return "DROP TYPE %s" % (self.preparer.format_type(type_))
+
+    def visit_create_domain_type(self, create, **kw):
+        domain: DOMAIN = create.element
+
+        options = []
+        if domain.collation is not None:
+            options.append(f"COLLATE {self.preparer.quote(domain.collation)}")
+        if domain.default is not None:
+            default = self.render_default_string(domain.default)
+            options.append(f"DEFAULT {default}")
+        if domain.constraint_name is not None:
+            name = self.preparer.truncate_and_render_constraint_name(
+                domain.constraint_name
+            )
+            options.append(f"CONSTRAINT {name}")
+        if domain.not_null:
+            options.append("NOT NULL")
+        if domain.check is not None:
+            check = self.sql_compiler.process(
+                domain.check, include_table=False, literal_binds=True
+            )
+            options.append(f"CHECK ({check})")
+
+        return (
+            f"CREATE DOMAIN {self.preparer.format_type(domain)} AS "
+            f"{self.type_compiler.process(domain.data_type)} "
+            f"{' '.join(options)}"
+        )
+
+    def visit_drop_domain_type(self, drop, **kw):
+        domain = drop.element
+        return f"DROP DOMAIN {self.preparer.format_type(domain)}"
+
+    def visit_create_index(self, create, **kw):
+        preparer = self.preparer
+        index = create.element
+        self._verify_index_table(index)
+        text = "CREATE "
+        if index.unique:
+            text += "UNIQUE "
+
+        text += "INDEX "
+
+        if self.dialect._supports_create_index_concurrently:
+            concurrently = index.dialect_options["postgresql"]["concurrently"]
+            if concurrently:
+                text += "CONCURRENTLY "
+
+        if create.if_not_exists:
+            text += "IF NOT EXISTS "
+
+        text += "%s ON %s " % (
+            self._prepared_index_name(index, include_schema=False),
+            preparer.format_table(index.table),
+        )
+
+        using = index.dialect_options["postgresql"]["using"]
+        if using:
+            text += (
+                "USING %s "
+                % self.preparer.validate_sql_phrase(using, IDX_USING).lower()
+            )
+
+        ops = index.dialect_options["postgresql"]["ops"]
+        text += "(%s)" % (
+            ", ".join(
+                [
+                    self.sql_compiler.process(
+                        (
+                            expr.self_group()
+                            if not isinstance(expr, expression.ColumnClause)
+                            else expr
+                        ),
+                        include_table=False,
+                        literal_binds=True,
+                    )
+                    + (
+                        (" " + ops[expr.key])
+                        if hasattr(expr, "key") and expr.key in ops
+                        else ""
+                    )
+                    for expr in index.expressions
+                ]
+            )
+        )
+
+        includeclause = index.dialect_options["postgresql"]["include"]
+        if includeclause:
+            inclusions = [
+                index.table.c[col] if isinstance(col, str) else col
+                for col in includeclause
+            ]
+            text += " INCLUDE (%s)" % ", ".join(
+                [preparer.quote(c.name) for c in inclusions]
+            )
+
+        nulls_not_distinct = index.dialect_options["postgresql"][
+            "nulls_not_distinct"
+        ]
+        if nulls_not_distinct is True:
+            text += " NULLS NOT DISTINCT"
+        elif nulls_not_distinct is False:
+            text += " NULLS DISTINCT"
+
+        withclause = index.dialect_options["postgresql"]["with"]
+        if withclause:
+            text += " WITH (%s)" % (
+                ", ".join(
+                    [
+                        "%s = %s" % storage_parameter
+                        for storage_parameter in withclause.items()
+                    ]
+                )
+            )
+
+        tablespace_name = index.dialect_options["postgresql"]["tablespace"]
+        if tablespace_name:
+            text += " TABLESPACE %s" % preparer.quote(tablespace_name)
+
+        whereclause = index.dialect_options["postgresql"]["where"]
+        if whereclause is not None:
+            whereclause = coercions.expect(
+                roles.DDLExpressionRole, whereclause
+            )
+
+            where_compiled = self.sql_compiler.process(
+                whereclause, include_table=False, literal_binds=True
+            )
+            text += " WHERE " + where_compiled
+
+        return text
+
+    def define_unique_constraint_distinct(self, constraint, **kw):
+        nulls_not_distinct = constraint.dialect_options["postgresql"][
+            "nulls_not_distinct"
+        ]
+        if nulls_not_distinct is True:
+            nulls_not_distinct_param = "NULLS NOT DISTINCT "
+        elif nulls_not_distinct is False:
+            nulls_not_distinct_param = "NULLS DISTINCT "
+        else:
+            nulls_not_distinct_param = ""
+        return nulls_not_distinct_param
+
+    def visit_drop_index(self, drop, **kw):
+        index = drop.element
+
+        text = "\nDROP INDEX "
+
+        if self.dialect._supports_drop_index_concurrently:
+            concurrently = index.dialect_options["postgresql"]["concurrently"]
+            if concurrently:
+                text += "CONCURRENTLY "
+
+        if drop.if_exists:
+            text += "IF EXISTS "
+
+        text += self._prepared_index_name(index, include_schema=True)
+        return text
+
+    def visit_exclude_constraint(self, constraint, **kw):
+        text = ""
+        if constraint.name is not None:
+            text += "CONSTRAINT %s " % self.preparer.format_constraint(
+                constraint
+            )
+        elements = []
+        kw["include_table"] = False
+        kw["literal_binds"] = True
+        for expr, name, op in constraint._render_exprs:
+            exclude_element = self.sql_compiler.process(expr, **kw) + (
+                (" " + constraint.ops[expr.key])
+                if hasattr(expr, "key") and expr.key in constraint.ops
+                else ""
+            )
+
+            elements.append("%s WITH %s" % (exclude_element, op))
+        text += "EXCLUDE USING %s (%s)" % (
+            self.preparer.validate_sql_phrase(
+                constraint.using, IDX_USING
+            ).lower(),
+            ", ".join(elements),
+        )
+        if constraint.where is not None:
+            text += " WHERE (%s)" % self.sql_compiler.process(
+                constraint.where, literal_binds=True
+            )
+        text += self.define_constraint_deferrability(constraint)
+        return text
+
+    def post_create_table(self, table):
+        table_opts = []
+        pg_opts = table.dialect_options["postgresql"]
+
+        inherits = pg_opts.get("inherits")
+        if inherits is not None:
+            if not isinstance(inherits, (list, tuple)):
+                inherits = (inherits,)
+            table_opts.append(
+                "\n INHERITS ( "
+                + ", ".join(self.preparer.quote(name) for name in inherits)
+                + " )"
+            )
+
+        if pg_opts["partition_by"]:
+            table_opts.append("\n PARTITION BY %s" % pg_opts["partition_by"])
+
+        if pg_opts["using"]:
+            table_opts.append("\n USING %s" % pg_opts["using"])
+
+        if pg_opts["with_oids"] is True:
+            table_opts.append("\n WITH OIDS")
+        elif pg_opts["with_oids"] is False:
+            table_opts.append("\n WITHOUT OIDS")
+
+        if pg_opts["on_commit"]:
+            on_commit_options = pg_opts["on_commit"].replace("_", " ").upper()
+            table_opts.append("\n ON COMMIT %s" % on_commit_options)
+
+        if pg_opts["tablespace"]:
+            tablespace_name = pg_opts["tablespace"]
+            table_opts.append(
+                "\n TABLESPACE %s" % self.preparer.quote(tablespace_name)
+            )
+
+        return "".join(table_opts)
+
+    def visit_computed_column(self, generated, **kw):
+        if generated.persisted is False:
+            raise exc.CompileError(
+                "PostrgreSQL computed columns do not support 'virtual' "
+                "persistence; set the 'persisted' flag to None or True for "
+                "PostgreSQL support."
+            )
+
+        return "GENERATED ALWAYS AS (%s) STORED" % self.sql_compiler.process(
+            generated.sqltext, include_table=False, literal_binds=True
+        )
+
+    def visit_create_sequence(self, create, **kw):
+        prefix = None
+        if create.element.data_type is not None:
+            prefix = " AS %s" % self.type_compiler.process(
+                create.element.data_type
+            )
+
+        return super().visit_create_sequence(create, prefix=prefix, **kw)
+
+    def _can_comment_on_constraint(self, ddl_instance):
+        constraint = ddl_instance.element
+        if constraint.name is None:
+            raise exc.CompileError(
+                f"Can't emit COMMENT ON for constraint {constraint!r}: "
+                "it has no name"
+            )
+        if constraint.table is None:
+            raise exc.CompileError(
+                f"Can't emit COMMENT ON for constraint {constraint!r}: "
+                "it has no associated table"
+            )
+
+    def visit_set_constraint_comment(self, create, **kw):
+        self._can_comment_on_constraint(create)
+        return "COMMENT ON CONSTRAINT %s ON %s IS %s" % (
+            self.preparer.format_constraint(create.element),
+            self.preparer.format_table(create.element.table),
+            self.sql_compiler.render_literal_value(
+                create.element.comment, sqltypes.String()
+            ),
+        )
+
+    def visit_drop_constraint_comment(self, drop, **kw):
+        self._can_comment_on_constraint(drop)
+        return "COMMENT ON CONSTRAINT %s ON %s IS NULL" % (
+            self.preparer.format_constraint(drop.element),
+            self.preparer.format_table(drop.element.table),
+        )
+
+
+class PGTypeCompiler(compiler.GenericTypeCompiler):
+    def visit_TSVECTOR(self, type_, **kw):
+        return "TSVECTOR"
+
+    def visit_TSQUERY(self, type_, **kw):
+        return "TSQUERY"
+
+    def visit_INET(self, type_, **kw):
+        return "INET"
+
+    def visit_CIDR(self, type_, **kw):
+        return "CIDR"
+
+    def visit_CITEXT(self, type_, **kw):
+        return "CITEXT"
+
+    def visit_MACADDR(self, type_, **kw):
+        return "MACADDR"
+
+    def visit_MACADDR8(self, type_, **kw):
+        return "MACADDR8"
+
+    def visit_MONEY(self, type_, **kw):
+        return "MONEY"
+
+    def visit_OID(self, type_, **kw):
+        return "OID"
+
+    def visit_REGCONFIG(self, type_, **kw):
+        return "REGCONFIG"
+
+    def visit_REGCLASS(self, type_, **kw):
+        return "REGCLASS"
+
+    def visit_FLOAT(self, type_, **kw):
+        if not type_.precision:
+            return "FLOAT"
+        else:
+            return "FLOAT(%(precision)s)" % {"precision": type_.precision}
+
+    def visit_double(self, type_, **kw):
+        return self.visit_DOUBLE_PRECISION(type, **kw)
+
+    def visit_BIGINT(self, type_, **kw):
+        return "BIGINT"
+
+    def visit_HSTORE(self, type_, **kw):
+        return "HSTORE"
+
+    def visit_JSON(self, type_, **kw):
+        return "JSON"
+
+    def visit_JSONB(self, type_, **kw):
+        return "JSONB"
+
+    def visit_INT4MULTIRANGE(self, type_, **kw):
+        return "INT4MULTIRANGE"
+
+    def visit_INT8MULTIRANGE(self, type_, **kw):
+        return "INT8MULTIRANGE"
+
+    def visit_NUMMULTIRANGE(self, type_, **kw):
+        return "NUMMULTIRANGE"
+
+    def visit_DATEMULTIRANGE(self, type_, **kw):
+        return "DATEMULTIRANGE"
+
+    def visit_TSMULTIRANGE(self, type_, **kw):
+        return "TSMULTIRANGE"
+
+    def visit_TSTZMULTIRANGE(self, type_, **kw):
+        return "TSTZMULTIRANGE"
+
+    def visit_INT4RANGE(self, type_, **kw):
+        return "INT4RANGE"
+
+    def visit_INT8RANGE(self, type_, **kw):
+        return "INT8RANGE"
+
+    def visit_NUMRANGE(self, type_, **kw):
+        return "NUMRANGE"
+
+    def visit_DATERANGE(self, type_, **kw):
+        return "DATERANGE"
+
+    def visit_TSRANGE(self, type_, **kw):
+        return "TSRANGE"
+
+    def visit_TSTZRANGE(self, type_, **kw):
+        return "TSTZRANGE"
+
+    def visit_json_int_index(self, type_, **kw):
+        return "INT"
+
+    def visit_json_str_index(self, type_, **kw):
+        return "TEXT"
+
+    def visit_datetime(self, type_, **kw):
+        return self.visit_TIMESTAMP(type_, **kw)
+
+    def visit_enum(self, type_, **kw):
+        if not type_.native_enum or not self.dialect.supports_native_enum:
+            return super().visit_enum(type_, **kw)
+        else:
+            return self.visit_ENUM(type_, **kw)
+
+    def visit_ENUM(self, type_, identifier_preparer=None, **kw):
+        if identifier_preparer is None:
+            identifier_preparer = self.dialect.identifier_preparer
+        return identifier_preparer.format_type(type_)
+
+    def visit_DOMAIN(self, type_, identifier_preparer=None, **kw):
+        if identifier_preparer is None:
+            identifier_preparer = self.dialect.identifier_preparer
+        return identifier_preparer.format_type(type_)
+
+    def visit_TIMESTAMP(self, type_, **kw):
+        return "TIMESTAMP%s %s" % (
+            (
+                "(%d)" % type_.precision
+                if getattr(type_, "precision", None) is not None
+                else ""
+            ),
+            (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE",
+        )
+
+    def visit_TIME(self, type_, **kw):
+        return "TIME%s %s" % (
+            (
+                "(%d)" % type_.precision
+                if getattr(type_, "precision", None) is not None
+                else ""
+            ),
+            (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE",
+        )
+
+    def visit_INTERVAL(self, type_, **kw):
+        text = "INTERVAL"
+        if type_.fields is not None:
+            text += " " + type_.fields
+        if type_.precision is not None:
+            text += " (%d)" % type_.precision
+        return text
+
+    def visit_BIT(self, type_, **kw):
+        if type_.varying:
+            compiled = "BIT VARYING"
+            if type_.length is not None:
+                compiled += "(%d)" % type_.length
+        else:
+            compiled = "BIT(%d)" % type_.length
+        return compiled
+
+    def visit_uuid(self, type_, **kw):
+        if type_.native_uuid:
+            return self.visit_UUID(type_, **kw)
+        else:
+            return super().visit_uuid(type_, **kw)
+
+    def visit_UUID(self, type_, **kw):
+        return "UUID"
+
+    def visit_large_binary(self, type_, **kw):
+        return self.visit_BYTEA(type_, **kw)
+
+    def visit_BYTEA(self, type_, **kw):
+        return "BYTEA"
+
+    def visit_ARRAY(self, type_, **kw):
+        inner = self.process(type_.item_type, **kw)
+        return re.sub(
+            r"((?: COLLATE.*)?)$",
+            (
+                r"%s\1"
+                % (
+                    "[]"
+                    * (type_.dimensions if type_.dimensions is not None else 1)
+                )
+            ),
+            inner,
+            count=1,
+        )
+
+    def visit_json_path(self, type_, **kw):
+        return self.visit_JSONPATH(type_, **kw)
+
+    def visit_JSONPATH(self, type_, **kw):
+        return "JSONPATH"
+
+
+class PGIdentifierPreparer(compiler.IdentifierPreparer):
+    reserved_words = RESERVED_WORDS
+
+    def _unquote_identifier(self, value):
+        if value[0] == self.initial_quote:
+            value = value[1:-1].replace(
+                self.escape_to_quote, self.escape_quote
+            )
+        return value
+
+    def format_type(self, type_, use_schema=True):
+        if not type_.name:
+            raise exc.CompileError(
+                f"PostgreSQL {type_.__class__.__name__} type requires a name."
+            )
+
+        name = self.quote(type_.name)
+        effective_schema = self.schema_for_object(type_)
+
+        if (
+            not self.omit_schema
+            and use_schema
+            and effective_schema is not None
+        ):
+            name = f"{self.quote_schema(effective_schema)}.{name}"
+        return name
+
+
+class ReflectedNamedType(TypedDict):
+    """Represents a reflected named type."""
+
+    name: str
+    """Name of the type."""
+    schema: str
+    """The schema of the type."""
+    visible: bool
+    """Indicates if this type is in the current search path."""
+
+
+class ReflectedDomainConstraint(TypedDict):
+    """Represents a reflect check constraint of a domain."""
+
+    name: str
+    """Name of the constraint."""
+    check: str
+    """The check constraint text."""
+
+
+class ReflectedDomain(ReflectedNamedType):
+    """Represents a reflected enum."""
+
+    type: str
+    """The string name of the underlying data type of the domain."""
+    nullable: bool
+    """Indicates if the domain allows null or not."""
+    default: Optional[str]
+    """The string representation of the default value of this domain
+    or ``None`` if none present.
+    """
+    constraints: List[ReflectedDomainConstraint]
+    """The constraints defined in the domain, if any.
+    The constraint are in order of evaluation by postgresql.
+    """
+    collation: Optional[str]
+    """The collation for the domain."""
+
+
+class ReflectedEnum(ReflectedNamedType):
+    """Represents a reflected enum."""
+
+    labels: List[str]
+    """The labels that compose the enum."""
+
+
+class PGInspector(reflection.Inspector):
+    dialect: PGDialect
+
+    def get_table_oid(
+        self, table_name: str, schema: Optional[str] = None
+    ) -> int:
+        """Return the OID for the given table name.
+
+        :param table_name: string name of the table.  For special quoting,
+         use :class:`.quoted_name`.
+
+        :param schema: string schema name; if omitted, uses the default schema
+         of the database connection.  For special quoting,
+         use :class:`.quoted_name`.
+
+        """
+
+        with self._operation_context() as conn:
+            return self.dialect.get_table_oid(
+                conn, table_name, schema, info_cache=self.info_cache
+            )
+
+    def get_domains(
+        self, schema: Optional[str] = None
+    ) -> List[ReflectedDomain]:
+        """Return a list of DOMAIN objects.
+
+        Each member is a dictionary containing these fields:
+
+            * name - name of the domain
+            * schema - the schema name for the domain.
+            * visible - boolean, whether or not this domain is visible
+              in the default search path.
+            * type - the type defined by this domain.
+            * nullable - Indicates if this domain can be ``NULL``.
+            * default - The default value of the domain or ``None`` if the
+              domain has no default.
+            * constraints - A list of dict wit the constraint defined by this
+              domain. Each element constaints two keys: ``name`` of the
+              constraint and ``check`` with the constraint text.
+
+        :param schema: schema name.  If None, the default schema
+         (typically 'public') is used.  May also be set to ``'*'`` to
+         indicate load domains for all schemas.
+
+        .. versionadded:: 2.0
+
+        """
+        with self._operation_context() as conn:
+            return self.dialect._load_domains(
+                conn, schema, info_cache=self.info_cache
+            )
+
+    def get_enums(self, schema: Optional[str] = None) -> List[ReflectedEnum]:
+        """Return a list of ENUM objects.
+
+        Each member is a dictionary containing these fields:
+
+            * name - name of the enum
+            * schema - the schema name for the enum.
+            * visible - boolean, whether or not this enum is visible
+              in the default search path.
+            * labels - a list of string labels that apply to the enum.
+
+        :param schema: schema name.  If None, the default schema
+         (typically 'public') is used.  May also be set to ``'*'`` to
+         indicate load enums for all schemas.
+
+        """
+        with self._operation_context() as conn:
+            return self.dialect._load_enums(
+                conn, schema, info_cache=self.info_cache
+            )
+
+    def get_foreign_table_names(
+        self, schema: Optional[str] = None
+    ) -> List[str]:
+        """Return a list of FOREIGN TABLE names.
+
+        Behavior is similar to that of
+        :meth:`_reflection.Inspector.get_table_names`,
+        except that the list is limited to those tables that report a
+        ``relkind`` value of ``f``.
+
+        """
+        with self._operation_context() as conn:
+            return self.dialect._get_foreign_table_names(
+                conn, schema, info_cache=self.info_cache
+            )
+
+    def has_type(
+        self, type_name: str, schema: Optional[str] = None, **kw: Any
+    ) -> bool:
+        """Return if the database has the specified type in the provided
+        schema.
+
+        :param type_name: the type to check.
+        :param schema: schema name.  If None, the default schema
+         (typically 'public') is used.  May also be set to ``'*'`` to
+         check in all schemas.
+
+        .. versionadded:: 2.0
+
+        """
+        with self._operation_context() as conn:
+            return self.dialect.has_type(
+                conn, type_name, schema, info_cache=self.info_cache
+            )
+
+
+class PGExecutionContext(default.DefaultExecutionContext):
+    def fire_sequence(self, seq, type_):
+        return self._execute_scalar(
+            (
+                "select nextval('%s')"
+                % self.identifier_preparer.format_sequence(seq)
+            ),
+            type_,
+        )
+
+    def get_insert_default(self, column):
+        if column.primary_key and column is column.table._autoincrement_column:
+            if column.server_default and column.server_default.has_argument:
+                # pre-execute passive defaults on primary key columns
+                return self._execute_scalar(
+                    "select %s" % column.server_default.arg, column.type
+                )
+
+            elif column.default is None or (
+                column.default.is_sequence and column.default.optional
+            ):
+                # execute the sequence associated with a SERIAL primary
+                # key column. for non-primary-key SERIAL, the ID just
+                # generates server side.
+
+                try:
+                    seq_name = column._postgresql_seq_name
+                except AttributeError:
+                    tab = column.table.name
+                    col = column.name
+                    tab = tab[0 : 29 + max(0, (29 - len(col)))]
+                    col = col[0 : 29 + max(0, (29 - len(tab)))]
+                    name = "%s_%s_seq" % (tab, col)
+                    column._postgresql_seq_name = seq_name = name
+
+                if column.table is not None:
+                    effective_schema = self.connection.schema_for_object(
+                        column.table
+                    )
+                else:
+                    effective_schema = None
+
+                if effective_schema is not None:
+                    exc = 'select nextval(\'"%s"."%s"\')' % (
+                        effective_schema,
+                        seq_name,
+                    )
+                else:
+                    exc = "select nextval('\"%s\"')" % (seq_name,)
+
+                return self._execute_scalar(exc, column.type)
+
+        return super().get_insert_default(column)
+
+
+class PGReadOnlyConnectionCharacteristic(
+    characteristics.ConnectionCharacteristic
+):
+    transactional = True
+
+    def reset_characteristic(self, dialect, dbapi_conn):
+        dialect.set_readonly(dbapi_conn, False)
+
+    def set_characteristic(self, dialect, dbapi_conn, value):
+        dialect.set_readonly(dbapi_conn, value)
+
+    def get_characteristic(self, dialect, dbapi_conn):
+        return dialect.get_readonly(dbapi_conn)
+
+
+class PGDeferrableConnectionCharacteristic(
+    characteristics.ConnectionCharacteristic
+):
+    transactional = True
+
+    def reset_characteristic(self, dialect, dbapi_conn):
+        dialect.set_deferrable(dbapi_conn, False)
+
+    def set_characteristic(self, dialect, dbapi_conn, value):
+        dialect.set_deferrable(dbapi_conn, value)
+
+    def get_characteristic(self, dialect, dbapi_conn):
+        return dialect.get_deferrable(dbapi_conn)
+
+
+class PGDialect(default.DefaultDialect):
+    name = "postgresql"
+    supports_statement_cache = True
+    supports_alter = True
+    max_identifier_length = 63
+    supports_sane_rowcount = True
+
+    bind_typing = interfaces.BindTyping.RENDER_CASTS
+
+    supports_native_enum = True
+    supports_native_boolean = True
+    supports_native_uuid = True
+    supports_smallserial = True
+
+    supports_sequences = True
+    sequences_optional = True
+    preexecute_autoincrement_sequences = True
+    postfetch_lastrowid = False
+    use_insertmanyvalues = True
+
+    returns_native_bytes = True
+
+    insertmanyvalues_implicit_sentinel = (
+        InsertmanyvaluesSentinelOpts.ANY_AUTOINCREMENT
+        | InsertmanyvaluesSentinelOpts.USE_INSERT_FROM_SELECT
+        | InsertmanyvaluesSentinelOpts.RENDER_SELECT_COL_CASTS
+    )
+
+    supports_comments = True
+    supports_constraint_comments = True
+    supports_default_values = True
+
+    supports_default_metavalue = True
+
+    supports_empty_insert = False
+    supports_multivalues_insert = True
+
+    supports_identity_columns = True
+
+    default_paramstyle = "pyformat"
+    ischema_names = ischema_names
+    colspecs = colspecs
+
+    statement_compiler = PGCompiler
+    ddl_compiler = PGDDLCompiler
+    type_compiler_cls = PGTypeCompiler
+    preparer = PGIdentifierPreparer
+    execution_ctx_cls = PGExecutionContext
+    inspector = PGInspector
+
+    update_returning = True
+    delete_returning = True
+    insert_returning = True
+    update_returning_multifrom = True
+    delete_returning_multifrom = True
+
+    connection_characteristics = (
+        default.DefaultDialect.connection_characteristics
+    )
+    connection_characteristics = connection_characteristics.union(
+        {
+            "postgresql_readonly": PGReadOnlyConnectionCharacteristic(),
+            "postgresql_deferrable": PGDeferrableConnectionCharacteristic(),
+        }
+    )
+
+    construct_arguments = [
+        (
+            schema.Index,
+            {
+                "using": False,
+                "include": None,
+                "where": None,
+                "ops": {},
+                "concurrently": False,
+                "with": {},
+                "tablespace": None,
+                "nulls_not_distinct": None,
+            },
+        ),
+        (
+            schema.Table,
+            {
+                "ignore_search_path": False,
+                "tablespace": None,
+                "partition_by": None,
+                "with_oids": None,
+                "on_commit": None,
+                "inherits": None,
+                "using": None,
+            },
+        ),
+        (
+            schema.CheckConstraint,
+            {
+                "not_valid": False,
+            },
+        ),
+        (
+            schema.ForeignKeyConstraint,
+            {
+                "not_valid": False,
+            },
+        ),
+        (
+            schema.UniqueConstraint,
+            {"nulls_not_distinct": None},
+        ),
+    ]
+
+    reflection_options = ("postgresql_ignore_search_path",)
+
+    _backslash_escapes = True
+    _supports_create_index_concurrently = True
+    _supports_drop_index_concurrently = True
+
+    def __init__(
+        self,
+        native_inet_types=None,
+        json_serializer=None,
+        json_deserializer=None,
+        **kwargs,
+    ):
+        default.DefaultDialect.__init__(self, **kwargs)
+
+        self._native_inet_types = native_inet_types
+        self._json_deserializer = json_deserializer
+        self._json_serializer = json_serializer
+
+    def initialize(self, connection):
+        super().initialize(connection)
+
+        # https://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689
+        self.supports_smallserial = self.server_version_info >= (9, 2)
+
+        self._set_backslash_escapes(connection)
+
+        self._supports_drop_index_concurrently = self.server_version_info >= (
+            9,
+            2,
+        )
+        self.supports_identity_columns = self.server_version_info >= (10,)
+
+    def get_isolation_level_values(self, dbapi_conn):
+        # note the generic dialect doesn't have AUTOCOMMIT, however
+        # all postgresql dialects should include AUTOCOMMIT.
+        return (
+            "SERIALIZABLE",
+            "READ UNCOMMITTED",
+            "READ COMMITTED",
+            "REPEATABLE READ",
+        )
+
+    def set_isolation_level(self, dbapi_connection, level):
+        cursor = dbapi_connection.cursor()
+        cursor.execute(
+            "SET SESSION CHARACTERISTICS AS TRANSACTION "
+            f"ISOLATION LEVEL {level}"
+        )
+        cursor.execute("COMMIT")
+        cursor.close()
+
+    def get_isolation_level(self, dbapi_connection):
+        cursor = dbapi_connection.cursor()
+        cursor.execute("show transaction isolation level")
+        val = cursor.fetchone()[0]
+        cursor.close()
+        return val.upper()
+
+    def set_readonly(self, connection, value):
+        raise NotImplementedError()
+
+    def get_readonly(self, connection):
+        raise NotImplementedError()
+
+    def set_deferrable(self, connection, value):
+        raise NotImplementedError()
+
+    def get_deferrable(self, connection):
+        raise NotImplementedError()
+
+    def _split_multihost_from_url(self, url: URL) -> Union[
+        Tuple[None, None],
+        Tuple[Tuple[Optional[str], ...], Tuple[Optional[int], ...]],
+    ]:
+        hosts: Optional[Tuple[Optional[str], ...]] = None
+        ports_str: Union[str, Tuple[Optional[str], ...], None] = None
+
+        integrated_multihost = False
+
+        if "host" in url.query:
+            if isinstance(url.query["host"], (list, tuple)):
+                integrated_multihost = True
+                hosts, ports_str = zip(
+                    *[
+                        token.split(":") if ":" in token else (token, None)
+                        for token in url.query["host"]
+                    ]
+                )
+
+            elif isinstance(url.query["host"], str):
+                hosts = tuple(url.query["host"].split(","))
+
+                if (
+                    "port" not in url.query
+                    and len(hosts) == 1
+                    and ":" in hosts[0]
+                ):
+                    # internet host is alphanumeric plus dots or hyphens.
+                    # this is essentially rfc1123, which refers to rfc952.
+                    # https://stackoverflow.com/questions/3523028/
+                    # valid-characters-of-a-hostname
+                    host_port_match = re.match(
+                        r"^([a-zA-Z0-9\-\.]*)(?:\:(\d*))?$", hosts[0]
+                    )
+                    if host_port_match:
+                        integrated_multihost = True
+                        h, p = host_port_match.group(1, 2)
+                        if TYPE_CHECKING:
+                            assert isinstance(h, str)
+                            assert isinstance(p, str)
+                        hosts = (h,)
+                        ports_str = cast(
+                            "Tuple[Optional[str], ...]", (p,) if p else (None,)
+                        )
+
+        if "port" in url.query:
+            if integrated_multihost:
+                raise exc.ArgumentError(
+                    "Can't mix 'multihost' formats together; use "
+                    '"host=h1,h2,h3&port=p1,p2,p3" or '
+                    '"host=h1:p1&host=h2:p2&host=h3:p3" separately'
+                )
+            if isinstance(url.query["port"], (list, tuple)):
+                ports_str = url.query["port"]
+            elif isinstance(url.query["port"], str):
+                ports_str = tuple(url.query["port"].split(","))
+
+        ports: Optional[Tuple[Optional[int], ...]] = None
+
+        if ports_str:
+            try:
+                ports = tuple(int(x) if x else None for x in ports_str)
+            except ValueError:
+                raise exc.ArgumentError(
+                    f"Received non-integer port arguments: {ports_str}"
+                ) from None
+
+        if ports and (
+            (not hosts and len(ports) > 1)
+            or (
+                hosts
+                and ports
+                and len(hosts) != len(ports)
+                and (len(hosts) > 1 or len(ports) > 1)
+            )
+        ):
+            raise exc.ArgumentError("number of hosts and ports don't match")
+
+        if hosts is not None:
+            if ports is None:
+                ports = tuple(None for _ in hosts)
+
+        return hosts, ports  # type: ignore
+
+    def do_begin_twophase(self, connection, xid):
+        self.do_begin(connection.connection)
+
+    def do_prepare_twophase(self, connection, xid):
+        connection.exec_driver_sql("PREPARE TRANSACTION '%s'" % xid)
+
+    def do_rollback_twophase(
+        self, connection, xid, is_prepared=True, recover=False
+    ):
+        if is_prepared:
+            if recover:
+                # FIXME: ugly hack to get out of transaction
+                # context when committing recoverable transactions
+                # Must find out a way how to make the dbapi not
+                # open a transaction.
+                connection.exec_driver_sql("ROLLBACK")
+            connection.exec_driver_sql("ROLLBACK PREPARED '%s'" % xid)
+            connection.exec_driver_sql("BEGIN")
+            self.do_rollback(connection.connection)
+        else:
+            self.do_rollback(connection.connection)
+
+    def do_commit_twophase(
+        self, connection, xid, is_prepared=True, recover=False
+    ):
+        if is_prepared:
+            if recover:
+                connection.exec_driver_sql("ROLLBACK")
+            connection.exec_driver_sql("COMMIT PREPARED '%s'" % xid)
+            connection.exec_driver_sql("BEGIN")
+            self.do_rollback(connection.connection)
+        else:
+            self.do_commit(connection.connection)
+
+    def do_recover_twophase(self, connection):
+        return connection.scalars(
+            sql.text("SELECT gid FROM pg_prepared_xacts")
+        ).all()
+
+    def _get_default_schema_name(self, connection):
+        return connection.exec_driver_sql("select current_schema()").scalar()
+
+    @reflection.cache
+    def has_schema(self, connection, schema, **kw):
+        query = select(pg_catalog.pg_namespace.c.nspname).where(
+            pg_catalog.pg_namespace.c.nspname == schema
+        )
+        return bool(connection.scalar(query))
+
+    def _pg_class_filter_scope_schema(
+        self, query, schema, scope, pg_class_table=None
+    ):
+        if pg_class_table is None:
+            pg_class_table = pg_catalog.pg_class
+        query = query.join(
+            pg_catalog.pg_namespace,
+            pg_catalog.pg_namespace.c.oid == pg_class_table.c.relnamespace,
+        )
+
+        if scope is ObjectScope.DEFAULT:
+            query = query.where(pg_class_table.c.relpersistence != "t")
+        elif scope is ObjectScope.TEMPORARY:
+            query = query.where(pg_class_table.c.relpersistence == "t")
+
+        if schema is None:
+            query = query.where(
+                pg_catalog.pg_table_is_visible(pg_class_table.c.oid),
+                # ignore pg_catalog schema
+                pg_catalog.pg_namespace.c.nspname != "pg_catalog",
+            )
+        else:
+            query = query.where(pg_catalog.pg_namespace.c.nspname == schema)
+        return query
+
+    def _pg_class_relkind_condition(self, relkinds, pg_class_table=None):
+        if pg_class_table is None:
+            pg_class_table = pg_catalog.pg_class
+        # uses the any form instead of in otherwise postgresql complaings
+        # that 'IN could not convert type character to "char"'
+        return pg_class_table.c.relkind == sql.any_(_array.array(relkinds))
+
+    @lru_cache()
+    def _has_table_query(self, schema):
+        query = select(pg_catalog.pg_class.c.relname).where(
+            pg_catalog.pg_class.c.relname == bindparam("table_name"),
+            self._pg_class_relkind_condition(
+                pg_catalog.RELKINDS_ALL_TABLE_LIKE
+            ),
+        )
+        return self._pg_class_filter_scope_schema(
+            query, schema, scope=ObjectScope.ANY
+        )
+
+    @reflection.cache
+    def has_table(self, connection, table_name, schema=None, **kw):
+        self._ensure_has_table_connection(connection)
+        query = self._has_table_query(schema)
+        return bool(connection.scalar(query, {"table_name": table_name}))
+
+    @reflection.cache
+    def has_sequence(self, connection, sequence_name, schema=None, **kw):
+        query = select(pg_catalog.pg_class.c.relname).where(
+            pg_catalog.pg_class.c.relkind == "S",
+            pg_catalog.pg_class.c.relname == sequence_name,
+        )
+        query = self._pg_class_filter_scope_schema(
+            query, schema, scope=ObjectScope.ANY
+        )
+        return bool(connection.scalar(query))
+
+    @reflection.cache
+    def has_type(self, connection, type_name, schema=None, **kw):
+        query = (
+            select(pg_catalog.pg_type.c.typname)
+            .join(
+                pg_catalog.pg_namespace,
+                pg_catalog.pg_namespace.c.oid
+                == pg_catalog.pg_type.c.typnamespace,
+            )
+            .where(pg_catalog.pg_type.c.typname == type_name)
+        )
+        if schema is None:
+            query = query.where(
+                pg_catalog.pg_type_is_visible(pg_catalog.pg_type.c.oid),
+                # ignore pg_catalog schema
+                pg_catalog.pg_namespace.c.nspname != "pg_catalog",
+            )
+        elif schema != "*":
+            query = query.where(pg_catalog.pg_namespace.c.nspname == schema)
+
+        return bool(connection.scalar(query))
+
+    def _get_server_version_info(self, connection):
+        v = connection.exec_driver_sql("select pg_catalog.version()").scalar()
+        m = re.match(
+            r".*(?:PostgreSQL|EnterpriseDB) "
+            r"(\d+)\.?(\d+)?(?:\.(\d+))?(?:\.\d+)?(?:devel|beta)?",
+            v,
+        )
+        if not m:
+            raise AssertionError(
+                "Could not determine version from string '%s'" % v
+            )
+        return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
+
+    @reflection.cache
+    def get_table_oid(self, connection, table_name, schema=None, **kw):
+        """Fetch the oid for schema.table_name."""
+        query = select(pg_catalog.pg_class.c.oid).where(
+            pg_catalog.pg_class.c.relname == table_name,
+            self._pg_class_relkind_condition(
+                pg_catalog.RELKINDS_ALL_TABLE_LIKE
+            ),
+        )
+        query = self._pg_class_filter_scope_schema(
+            query, schema, scope=ObjectScope.ANY
+        )
+        table_oid = connection.scalar(query)
+        if table_oid is None:
+            raise exc.NoSuchTableError(
+                f"{schema}.{table_name}" if schema else table_name
+            )
+        return table_oid
+
+    @reflection.cache
+    def get_schema_names(self, connection, **kw):
+        query = (
+            select(pg_catalog.pg_namespace.c.nspname)
+            .where(pg_catalog.pg_namespace.c.nspname.not_like("pg_%"))
+            .order_by(pg_catalog.pg_namespace.c.nspname)
+        )
+        return connection.scalars(query).all()
+
+    def _get_relnames_for_relkinds(self, connection, schema, relkinds, scope):
+        query = select(pg_catalog.pg_class.c.relname).where(
+            self._pg_class_relkind_condition(relkinds)
+        )
+        query = self._pg_class_filter_scope_schema(query, schema, scope=scope)
+        return connection.scalars(query).all()
+
+    @reflection.cache
+    def get_table_names(self, connection, schema=None, **kw):
+        return self._get_relnames_for_relkinds(
+            connection,
+            schema,
+            pg_catalog.RELKINDS_TABLE_NO_FOREIGN,
+            scope=ObjectScope.DEFAULT,
+        )
+
+    @reflection.cache
+    def get_temp_table_names(self, connection, **kw):
+        return self._get_relnames_for_relkinds(
+            connection,
+            schema=None,
+            relkinds=pg_catalog.RELKINDS_TABLE_NO_FOREIGN,
+            scope=ObjectScope.TEMPORARY,
+        )
+
+    @reflection.cache
+    def _get_foreign_table_names(self, connection, schema=None, **kw):
+        return self._get_relnames_for_relkinds(
+            connection, schema, relkinds=("f",), scope=ObjectScope.ANY
+        )
+
+    @reflection.cache
+    def get_view_names(self, connection, schema=None, **kw):
+        return self._get_relnames_for_relkinds(
+            connection,
+            schema,
+            pg_catalog.RELKINDS_VIEW,
+            scope=ObjectScope.DEFAULT,
+        )
+
+    @reflection.cache
+    def get_materialized_view_names(self, connection, schema=None, **kw):
+        return self._get_relnames_for_relkinds(
+            connection,
+            schema,
+            pg_catalog.RELKINDS_MAT_VIEW,
+            scope=ObjectScope.DEFAULT,
+        )
+
+    @reflection.cache
+    def get_temp_view_names(self, connection, schema=None, **kw):
+        return self._get_relnames_for_relkinds(
+            connection,
+            schema,
+            # NOTE: do not include temp materialzied views (that do not
+            # seem to be a thing at least up to version 14)
+            pg_catalog.RELKINDS_VIEW,
+            scope=ObjectScope.TEMPORARY,
+        )
+
+    @reflection.cache
+    def get_sequence_names(self, connection, schema=None, **kw):
+        return self._get_relnames_for_relkinds(
+            connection, schema, relkinds=("S",), scope=ObjectScope.ANY
+        )
+
+    @reflection.cache
+    def get_view_definition(self, connection, view_name, schema=None, **kw):
+        query = (
+            select(pg_catalog.pg_get_viewdef(pg_catalog.pg_class.c.oid))
+            .select_from(pg_catalog.pg_class)
+            .where(
+                pg_catalog.pg_class.c.relname == view_name,
+                self._pg_class_relkind_condition(
+                    pg_catalog.RELKINDS_VIEW + pg_catalog.RELKINDS_MAT_VIEW
+                ),
+            )
+        )
+        query = self._pg_class_filter_scope_schema(
+            query, schema, scope=ObjectScope.ANY
+        )
+        res = connection.scalar(query)
+        if res is None:
+            raise exc.NoSuchTableError(
+                f"{schema}.{view_name}" if schema else view_name
+            )
+        else:
+            return res
+
+    def _value_or_raise(self, data, table, schema):
+        try:
+            return dict(data)[(schema, table)]
+        except KeyError:
+            raise exc.NoSuchTableError(
+                f"{schema}.{table}" if schema else table
+            ) from None
+
+    def _prepare_filter_names(self, filter_names):
+        if filter_names:
+            return True, {"filter_names": filter_names}
+        else:
+            return False, {}
+
+    def _kind_to_relkinds(self, kind: ObjectKind) -> Tuple[str, ...]:
+        if kind is ObjectKind.ANY:
+            return pg_catalog.RELKINDS_ALL_TABLE_LIKE
+        relkinds = ()
+        if ObjectKind.TABLE in kind:
+            relkinds += pg_catalog.RELKINDS_TABLE
+        if ObjectKind.VIEW in kind:
+            relkinds += pg_catalog.RELKINDS_VIEW
+        if ObjectKind.MATERIALIZED_VIEW in kind:
+            relkinds += pg_catalog.RELKINDS_MAT_VIEW
+        return relkinds
+
+    @reflection.cache
+    def get_columns(self, connection, table_name, schema=None, **kw):
+        data = self.get_multi_columns(
+            connection,
+            schema=schema,
+            filter_names=[table_name],
+            scope=ObjectScope.ANY,
+            kind=ObjectKind.ANY,
+            **kw,
+        )
+        return self._value_or_raise(data, table_name, schema)
+
+    @lru_cache()
+    def _columns_query(self, schema, has_filter_names, scope, kind):
+        # NOTE: the query with the default and identity options scalar
+        # subquery is faster than trying to use outer joins for them
+        generated = (
+            pg_catalog.pg_attribute.c.attgenerated.label("generated")
+            if self.server_version_info >= (12,)
+            else sql.null().label("generated")
+        )
+        if self.server_version_info >= (10,):
+            # join lateral performs worse (~2x slower) than a scalar_subquery
+            identity = (
+                select(
+                    sql.func.json_build_object(
+                        "always",
+                        pg_catalog.pg_attribute.c.attidentity == "a",
+                        "start",
+                        pg_catalog.pg_sequence.c.seqstart,
+                        "increment",
+                        pg_catalog.pg_sequence.c.seqincrement,
+                        "minvalue",
+                        pg_catalog.pg_sequence.c.seqmin,
+                        "maxvalue",
+                        pg_catalog.pg_sequence.c.seqmax,
+                        "cache",
+                        pg_catalog.pg_sequence.c.seqcache,
+                        "cycle",
+                        pg_catalog.pg_sequence.c.seqcycle,
+                        type_=sqltypes.JSON(),
+                    )
+                )
+                .select_from(pg_catalog.pg_sequence)
+                .where(
+                    # attidentity != '' is required or it will reflect also
+                    # serial columns as identity.
+                    pg_catalog.pg_attribute.c.attidentity != "",
+                    pg_catalog.pg_sequence.c.seqrelid
+                    == sql.cast(
+                        sql.cast(
+                            pg_catalog.pg_get_serial_sequence(
+                                sql.cast(
+                                    sql.cast(
+                                        pg_catalog.pg_attribute.c.attrelid,
+                                        REGCLASS,
+                                    ),
+                                    TEXT,
+                                ),
+                                pg_catalog.pg_attribute.c.attname,
+                            ),
+                            REGCLASS,
+                        ),
+                        OID,
+                    ),
+                )
+                .correlate(pg_catalog.pg_attribute)
+                .scalar_subquery()
+                .label("identity_options")
+            )
+        else:
+            identity = sql.null().label("identity_options")
+
+        # join lateral performs the same as scalar_subquery here
+        default = (
+            select(
+                pg_catalog.pg_get_expr(
+                    pg_catalog.pg_attrdef.c.adbin,
+                    pg_catalog.pg_attrdef.c.adrelid,
+                )
+            )
+            .select_from(pg_catalog.pg_attrdef)
+            .where(
+                pg_catalog.pg_attrdef.c.adrelid
+                == pg_catalog.pg_attribute.c.attrelid,
+                pg_catalog.pg_attrdef.c.adnum
+                == pg_catalog.pg_attribute.c.attnum,
+                pg_catalog.pg_attribute.c.atthasdef,
+            )
+            .correlate(pg_catalog.pg_attribute)
+            .scalar_subquery()
+            .label("default")
+        )
+        relkinds = self._kind_to_relkinds(kind)
+        query = (
+            select(
+                pg_catalog.pg_attribute.c.attname.label("name"),
+                pg_catalog.format_type(
+                    pg_catalog.pg_attribute.c.atttypid,
+                    pg_catalog.pg_attribute.c.atttypmod,
+                ).label("format_type"),
+                default,
+                pg_catalog.pg_attribute.c.attnotnull.label("not_null"),
+                pg_catalog.pg_class.c.relname.label("table_name"),
+                pg_catalog.pg_description.c.description.label("comment"),
+                generated,
+                identity,
+            )
+            .select_from(pg_catalog.pg_class)
+            # NOTE: postgresql support table with no user column, meaning
+            # there is no row with pg_attribute.attnum > 0. use a left outer
+            # join to avoid filtering these tables.
+            .outerjoin(
+                pg_catalog.pg_attribute,
+                sql.and_(
+                    pg_catalog.pg_class.c.oid
+                    == pg_catalog.pg_attribute.c.attrelid,
+                    pg_catalog.pg_attribute.c.attnum > 0,
+                    ~pg_catalog.pg_attribute.c.attisdropped,
+                ),
+            )
+            .outerjoin(
+                pg_catalog.pg_description,
+                sql.and_(
+                    pg_catalog.pg_description.c.objoid
+                    == pg_catalog.pg_attribute.c.attrelid,
+                    pg_catalog.pg_description.c.objsubid
+                    == pg_catalog.pg_attribute.c.attnum,
+                ),
+            )
+            .where(self._pg_class_relkind_condition(relkinds))
+            .order_by(
+                pg_catalog.pg_class.c.relname, pg_catalog.pg_attribute.c.attnum
+            )
+        )
+        query = self._pg_class_filter_scope_schema(query, schema, scope=scope)
+        if has_filter_names:
+            query = query.where(
+                pg_catalog.pg_class.c.relname.in_(bindparam("filter_names"))
+            )
+        return query
+
+    def get_multi_columns(
+        self, connection, schema, filter_names, scope, kind, **kw
+    ):
+        has_filter_names, params = self._prepare_filter_names(filter_names)
+        query = self._columns_query(schema, has_filter_names, scope, kind)
+        rows = connection.execute(query, params).mappings()
+
+        # dictionary with (name, ) if default search path or (schema, name)
+        # as keys
+        domains = {
+            ((d["schema"], d["name"]) if not d["visible"] else (d["name"],)): d
+            for d in self._load_domains(
+                connection, schema="*", info_cache=kw.get("info_cache")
+            )
+        }
+
+        # dictionary with (name, ) if default search path or (schema, name)
+        # as keys
+        enums = dict(
+            (
+                ((rec["name"],), rec)
+                if rec["visible"]
+                else ((rec["schema"], rec["name"]), rec)
+            )
+            for rec in self._load_enums(
+                connection, schema="*", info_cache=kw.get("info_cache")
+            )
+        )
+
+        columns = self._get_columns_info(rows, domains, enums, schema)
+
+        return columns.items()
+
+    _format_type_args_pattern = re.compile(r"\((.*)\)")
+    _format_type_args_delim = re.compile(r"\s*,\s*")
+    _format_array_spec_pattern = re.compile(r"((?:\[\])*)$")
+
+    def _reflect_type(
+        self,
+        format_type: Optional[str],
+        domains: dict[str, ReflectedDomain],
+        enums: dict[str, ReflectedEnum],
+        type_description: str,
+    ) -> sqltypes.TypeEngine[Any]:
+        """
+        Attempts to reconstruct a column type defined in ischema_names based
+        on the information available in the format_type.
+
+        If the `format_type` cannot be associated with a known `ischema_names`,
+        it is treated as a reference to a known PostgreSQL named `ENUM` or
+        `DOMAIN` type.
+        """
+        type_description = type_description or "unknown type"
+        if format_type is None:
+            util.warn(
+                "PostgreSQL format_type() returned NULL for %s"
+                % type_description
+            )
+            return sqltypes.NULLTYPE
+
+        attype_args_match = self._format_type_args_pattern.search(format_type)
+        if attype_args_match and attype_args_match.group(1):
+            attype_args = self._format_type_args_delim.split(
+                attype_args_match.group(1)
+            )
+        else:
+            attype_args = ()
+
+        match_array_dim = self._format_array_spec_pattern.search(format_type)
+        # Each "[]" in array specs corresponds to an array dimension
+        array_dim = len(match_array_dim.group(1) or "") // 2
+
+        # Remove all parameters and array specs from format_type to obtain an
+        # ischema_name candidate
+        attype = self._format_type_args_pattern.sub("", format_type)
+        attype = self._format_array_spec_pattern.sub("", attype)
+
+        schema_type = self.ischema_names.get(attype.lower(), None)
+        args, kwargs = (), {}
+
+        if attype == "numeric":
+            if len(attype_args) == 2:
+                precision, scale = map(int, attype_args)
+                args = (precision, scale)
+
+        elif attype == "double precision":
+            args = (53,)
+
+        elif attype == "integer":
+            args = ()
+
+        elif attype in ("timestamp with time zone", "time with time zone"):
+            kwargs["timezone"] = True
+            if len(attype_args) == 1:
+                kwargs["precision"] = int(attype_args[0])
+
+        elif attype in (
+            "timestamp without time zone",
+            "time without time zone",
+            "time",
+        ):
+            kwargs["timezone"] = False
+            if len(attype_args) == 1:
+                kwargs["precision"] = int(attype_args[0])
+
+        elif attype == "bit varying":
+            kwargs["varying"] = True
+            if len(attype_args) == 1:
+                charlen = int(attype_args[0])
+                args = (charlen,)
+
+        elif attype.startswith("interval"):
+            schema_type = INTERVAL
+
+            field_match = re.match(r"interval (.+)", attype)
+            if field_match:
+                kwargs["fields"] = field_match.group(1)
+
+            if len(attype_args) == 1:
+                kwargs["precision"] = int(attype_args[0])
+
+        else:
+            enum_or_domain_key = tuple(util.quoted_token_parser(attype))
+
+            if enum_or_domain_key in enums:
+                schema_type = ENUM
+                enum = enums[enum_or_domain_key]
+
+                args = tuple(enum["labels"])
+                kwargs["name"] = enum["name"]
+
+                if not enum["visible"]:
+                    kwargs["schema"] = enum["schema"]
+                args = tuple(enum["labels"])
+            elif enum_or_domain_key in domains:
+                schema_type = DOMAIN
+                domain = domains[enum_or_domain_key]
+
+                data_type = self._reflect_type(
+                    domain["type"],
+                    domains,
+                    enums,
+                    type_description="DOMAIN '%s'" % domain["name"],
+                )
+                args = (domain["name"], data_type)
+
+                kwargs["collation"] = domain["collation"]
+                kwargs["default"] = domain["default"]
+                kwargs["not_null"] = not domain["nullable"]
+                kwargs["create_type"] = False
+
+                if domain["constraints"]:
+                    # We only support a single constraint
+                    check_constraint = domain["constraints"][0]
+
+                    kwargs["constraint_name"] = check_constraint["name"]
+                    kwargs["check"] = check_constraint["check"]
+
+                if not domain["visible"]:
+                    kwargs["schema"] = domain["schema"]
+
+            else:
+                try:
+                    charlen = int(attype_args[0])
+                    args = (charlen, *attype_args[1:])
+                except (ValueError, IndexError):
+                    args = attype_args
+
+        if not schema_type:
+            util.warn(
+                "Did not recognize type '%s' of %s"
+                % (attype, type_description)
+            )
+            return sqltypes.NULLTYPE
+
+        data_type = schema_type(*args, **kwargs)
+        if array_dim >= 1:
+            # postgres does not preserve dimensionality or size of array types.
+            data_type = _array.ARRAY(data_type)
+
+        return data_type
+
+    def _get_columns_info(self, rows, domains, enums, schema):
+        columns = defaultdict(list)
+        for row_dict in rows:
+            # ensure that each table has an entry, even if it has no columns
+            if row_dict["name"] is None:
+                columns[(schema, row_dict["table_name"])] = (
+                    ReflectionDefaults.columns()
+                )
+                continue
+            table_cols = columns[(schema, row_dict["table_name"])]
+
+            coltype = self._reflect_type(
+                row_dict["format_type"],
+                domains,
+                enums,
+                type_description="column '%s'" % row_dict["name"],
+            )
+
+            default = row_dict["default"]
+            name = row_dict["name"]
+            generated = row_dict["generated"]
+            nullable = not row_dict["not_null"]
+
+            if isinstance(coltype, DOMAIN):
+                if not default:
+                    # domain can override the default value but
+                    # cant set it to None
+                    if coltype.default is not None:
+                        default = coltype.default
+
+                nullable = nullable and not coltype.not_null
+
+            identity = row_dict["identity_options"]
+
+            # If a zero byte or blank string depending on driver (is also
+            # absent for older PG versions), then not a generated column.
+            # Otherwise, s = stored. (Other values might be added in the
+            # future.)
+            if generated not in (None, "", b"\x00"):
+                computed = dict(
+                    sqltext=default, persisted=generated in ("s", b"s")
+                )
+                default = None
+            else:
+                computed = None
+
+            # adjust the default value
+            autoincrement = False
+            if default is not None:
+                match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
+                if match is not None:
+                    if issubclass(coltype._type_affinity, sqltypes.Integer):
+                        autoincrement = True
+                    # the default is related to a Sequence
+                    if "." not in match.group(2) and schema is not None:
+                        # unconditionally quote the schema name.  this could
+                        # later be enhanced to obey quoting rules /
+                        # "quote schema"
+                        default = (
+                            match.group(1)
+                            + ('"%s"' % schema)
+                            + "."
+                            + match.group(2)
+                            + match.group(3)
+                        )
+
+            column_info = {
+                "name": name,
+                "type": coltype,
+                "nullable": nullable,
+                "default": default,
+                "autoincrement": autoincrement or identity is not None,
+                "comment": row_dict["comment"],
+            }
+            if computed is not None:
+                column_info["computed"] = computed
+            if identity is not None:
+                column_info["identity"] = identity
+
+            table_cols.append(column_info)
+
+        return columns
+
+    @lru_cache()
+    def _table_oids_query(self, schema, has_filter_names, scope, kind):
+        relkinds = self._kind_to_relkinds(kind)
+        oid_q = select(
+            pg_catalog.pg_class.c.oid, pg_catalog.pg_class.c.relname
+        ).where(self._pg_class_relkind_condition(relkinds))
+        oid_q = self._pg_class_filter_scope_schema(oid_q, schema, scope=scope)
+
+        if has_filter_names:
+            oid_q = oid_q.where(
+                pg_catalog.pg_class.c.relname.in_(bindparam("filter_names"))
+            )
+        return oid_q
+
+    @reflection.flexi_cache(
+        ("schema", InternalTraversal.dp_string),
+        ("filter_names", InternalTraversal.dp_string_list),
+        ("kind", InternalTraversal.dp_plain_obj),
+        ("scope", InternalTraversal.dp_plain_obj),
+    )
+    def _get_table_oids(
+        self, connection, schema, filter_names, scope, kind, **kw
+    ):
+        has_filter_names, params = self._prepare_filter_names(filter_names)
+        oid_q = self._table_oids_query(schema, has_filter_names, scope, kind)
+        result = connection.execute(oid_q, params)
+        return result.all()
+
+    @lru_cache()
+    def _constraint_query(self, is_unique):
+        con_sq = (
+            select(
+                pg_catalog.pg_constraint.c.conrelid,
+                pg_catalog.pg_constraint.c.conname,
+                pg_catalog.pg_constraint.c.conindid,
+                sql.func.unnest(pg_catalog.pg_constraint.c.conkey).label(
+                    "attnum"
+                ),
+                sql.func.generate_subscripts(
+                    pg_catalog.pg_constraint.c.conkey, 1
+                ).label("ord"),
+                pg_catalog.pg_description.c.description,
+            )
+            .outerjoin(
+                pg_catalog.pg_description,
+                pg_catalog.pg_description.c.objoid
+                == pg_catalog.pg_constraint.c.oid,
+            )
+            .where(
+                pg_catalog.pg_constraint.c.contype == bindparam("contype"),
+                pg_catalog.pg_constraint.c.conrelid.in_(bindparam("oids")),
+            )
+            .subquery("con")
+        )
+
+        attr_sq = (
+            select(
+                con_sq.c.conrelid,
+                con_sq.c.conname,
+                con_sq.c.conindid,
+                con_sq.c.description,
+                con_sq.c.ord,
+                pg_catalog.pg_attribute.c.attname,
+            )
+            .select_from(pg_catalog.pg_attribute)
+            .join(
+                con_sq,
+                sql.and_(
+                    pg_catalog.pg_attribute.c.attnum == con_sq.c.attnum,
+                    pg_catalog.pg_attribute.c.attrelid == con_sq.c.conrelid,
+                ),
+            )
+            .where(
+                # NOTE: restate the condition here, since pg15 otherwise
+                # seems to get confused on pscopg2 sometimes, doing
+                # a sequential scan of pg_attribute.
+                # The condition in the con_sq subquery is not actually needed
+                # in pg15, but it may be needed in older versions. Keeping it
+                # does not seems to have any inpact in any case.
+                con_sq.c.conrelid.in_(bindparam("oids"))
+            )
+            .subquery("attr")
+        )
+
+        constraint_query = (
+            select(
+                attr_sq.c.conrelid,
+                sql.func.array_agg(
+                    # NOTE: cast since some postgresql derivatives may
+                    # not support array_agg on the name type
+                    aggregate_order_by(
+                        attr_sq.c.attname.cast(TEXT), attr_sq.c.ord
+                    )
+                ).label("cols"),
+                attr_sq.c.conname,
+                sql.func.min(attr_sq.c.description).label("description"),
+            )
+            .group_by(attr_sq.c.conrelid, attr_sq.c.conname)
+            .order_by(attr_sq.c.conrelid, attr_sq.c.conname)
+        )
+
+        if is_unique:
+            if self.server_version_info >= (15,):
+                constraint_query = constraint_query.join(
+                    pg_catalog.pg_index,
+                    attr_sq.c.conindid == pg_catalog.pg_index.c.indexrelid,
+                ).add_columns(
+                    sql.func.bool_and(
+                        pg_catalog.pg_index.c.indnullsnotdistinct
+                    ).label("indnullsnotdistinct")
+                )
+            else:
+                constraint_query = constraint_query.add_columns(
+                    sql.false().label("indnullsnotdistinct")
+                )
+        else:
+            constraint_query = constraint_query.add_columns(
+                sql.null().label("extra")
+            )
+        return constraint_query
+
+    def _reflect_constraint(
+        self, connection, contype, schema, filter_names, scope, kind, **kw
+    ):
+        # used to reflect primary and unique constraint
+        table_oids = self._get_table_oids(
+            connection, schema, filter_names, scope, kind, **kw
+        )
+        batches = list(table_oids)
+        is_unique = contype == "u"
+
+        while batches:
+            batch = batches[0:3000]
+            batches[0:3000] = []
+
+            result = connection.execute(
+                self._constraint_query(is_unique),
+                {"oids": [r[0] for r in batch], "contype": contype},
+            )
+
+            result_by_oid = defaultdict(list)
+            for oid, cols, constraint_name, comment, extra in result:
+                result_by_oid[oid].append(
+                    (cols, constraint_name, comment, extra)
+                )
+
+            for oid, tablename in batch:
+                for_oid = result_by_oid.get(oid, ())
+                if for_oid:
+                    for cols, constraint, comment, extra in for_oid:
+                        if is_unique:
+                            yield tablename, cols, constraint, comment, {
+                                "nullsnotdistinct": extra
+                            }
+                        else:
+                            yield tablename, cols, constraint, comment, None
+                else:
+                    yield tablename, None, None, None, None
+
+    @reflection.cache
+    def get_pk_constraint(self, connection, table_name, schema=None, **kw):
+        data = self.get_multi_pk_constraint(
+            connection,
+            schema=schema,
+            filter_names=[table_name],
+            scope=ObjectScope.ANY,
+            kind=ObjectKind.ANY,
+            **kw,
+        )
+        return self._value_or_raise(data, table_name, schema)
+
+    def get_multi_pk_constraint(
+        self, connection, schema, filter_names, scope, kind, **kw
+    ):
+        result = self._reflect_constraint(
+            connection, "p", schema, filter_names, scope, kind, **kw
+        )
+
+        # only a single pk can be present for each table. Return an entry
+        # even if a table has no primary key
+        default = ReflectionDefaults.pk_constraint
+        return (
+            (
+                (schema, table_name),
+                (
+                    {
+                        "constrained_columns": [] if cols is None else cols,
+                        "name": pk_name,
+                        "comment": comment,
+                    }
+                    if pk_name is not None
+                    else default()
+                ),
+            )
+            for table_name, cols, pk_name, comment, _ in result
+        )
+
+    @reflection.cache
+    def get_foreign_keys(
+        self,
+        connection,
+        table_name,
+        schema=None,
+        postgresql_ignore_search_path=False,
+        **kw,
+    ):
+        data = self.get_multi_foreign_keys(
+            connection,
+            schema=schema,
+            filter_names=[table_name],
+            postgresql_ignore_search_path=postgresql_ignore_search_path,
+            scope=ObjectScope.ANY,
+            kind=ObjectKind.ANY,
+            **kw,
+        )
+        return self._value_or_raise(data, table_name, schema)
+
+    @lru_cache()
+    def _foreing_key_query(self, schema, has_filter_names, scope, kind):
+        pg_class_ref = pg_catalog.pg_class.alias("cls_ref")
+        pg_namespace_ref = pg_catalog.pg_namespace.alias("nsp_ref")
+        relkinds = self._kind_to_relkinds(kind)
+        query = (
+            select(
+                pg_catalog.pg_class.c.relname,
+                pg_catalog.pg_constraint.c.conname,
+                # NOTE: avoid calling pg_get_constraintdef when not needed
+                # to speed up the query
+                sql.case(
+                    (
+                        pg_catalog.pg_constraint.c.oid.is_not(None),
+                        pg_catalog.pg_get_constraintdef(
+                            pg_catalog.pg_constraint.c.oid, True
+                        ),
+                    ),
+                    else_=None,
+                ),
+                pg_namespace_ref.c.nspname,
+                pg_catalog.pg_description.c.description,
+            )
+            .select_from(pg_catalog.pg_class)
+            .outerjoin(
+                pg_catalog.pg_constraint,
+                sql.and_(
+                    pg_catalog.pg_class.c.oid
+                    == pg_catalog.pg_constraint.c.conrelid,
+                    pg_catalog.pg_constraint.c.contype == "f",
+                ),
+            )
+            .outerjoin(
+                pg_class_ref,
+                pg_class_ref.c.oid == pg_catalog.pg_constraint.c.confrelid,
+            )
+            .outerjoin(
+                pg_namespace_ref,
+                pg_class_ref.c.relnamespace == pg_namespace_ref.c.oid,
+            )
+            .outerjoin(
+                pg_catalog.pg_description,
+                pg_catalog.pg_description.c.objoid
+                == pg_catalog.pg_constraint.c.oid,
+            )
+            .order_by(
+                pg_catalog.pg_class.c.relname,
+                pg_catalog.pg_constraint.c.conname,
+            )
+            .where(self._pg_class_relkind_condition(relkinds))
+        )
+        query = self._pg_class_filter_scope_schema(query, schema, scope)
+        if has_filter_names:
+            query = query.where(
+                pg_catalog.pg_class.c.relname.in_(bindparam("filter_names"))
+            )
+        return query
+
+    @util.memoized_property
+    def _fk_regex_pattern(self):
+        # optionally quoted token
+        qtoken = '(?:"[^"]+"|[A-Za-z0-9_]+?)'
+
+        # https://www.postgresql.org/docs/current/static/sql-createtable.html
+        return re.compile(
+            r"FOREIGN KEY \((.*?)\) "
+            rf"REFERENCES (?:({qtoken})\.)?({qtoken})\(((?:{qtoken}(?: *, *)?)+)\)"  # noqa: E501
+            r"[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?"
+            r"[\s]?(ON UPDATE "
+            r"(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?"
+            r"[\s]?(ON DELETE "
+            r"(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?"
+            r"[\s]?(DEFERRABLE|NOT DEFERRABLE)?"
+            r"[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?"
+        )
+
+    def get_multi_foreign_keys(
+        self,
+        connection,
+        schema,
+        filter_names,
+        scope,
+        kind,
+        postgresql_ignore_search_path=False,
+        **kw,
+    ):
+        preparer = self.identifier_preparer
+
+        has_filter_names, params = self._prepare_filter_names(filter_names)
+        query = self._foreing_key_query(schema, has_filter_names, scope, kind)
+        result = connection.execute(query, params)
+
+        FK_REGEX = self._fk_regex_pattern
+
+        fkeys = defaultdict(list)
+        default = ReflectionDefaults.foreign_keys
+        for table_name, conname, condef, conschema, comment in result:
+            # ensure that each table has an entry, even if it has
+            # no foreign keys
+            if conname is None:
+                fkeys[(schema, table_name)] = default()
+                continue
+            table_fks = fkeys[(schema, table_name)]
+            m = re.search(FK_REGEX, condef).groups()
+
+            (
+                constrained_columns,
+                referred_schema,
+                referred_table,
+                referred_columns,
+                _,
+                match,
+                _,
+                onupdate,
+                _,
+                ondelete,
+                deferrable,
+                _,
+                initially,
+            ) = m
+
+            if deferrable is not None:
+                deferrable = True if deferrable == "DEFERRABLE" else False
+            constrained_columns = [
+                preparer._unquote_identifier(x)
+                for x in re.split(r"\s*,\s*", constrained_columns)
+            ]
+
+            if postgresql_ignore_search_path:
+                # when ignoring search path, we use the actual schema
+                # provided it isn't the "default" schema
+                if conschema != self.default_schema_name:
+                    referred_schema = conschema
+                else:
+                    referred_schema = schema
+            elif referred_schema:
+                # referred_schema is the schema that we regexp'ed from
+                # pg_get_constraintdef().  If the schema is in the search
+                # path, pg_get_constraintdef() will give us None.
+                referred_schema = preparer._unquote_identifier(referred_schema)
+            elif schema is not None and schema == conschema:
+                # If the actual schema matches the schema of the table
+                # we're reflecting, then we will use that.
+                referred_schema = schema
+
+            referred_table = preparer._unquote_identifier(referred_table)
+            referred_columns = [
+                preparer._unquote_identifier(x)
+                for x in re.split(r"\s*,\s", referred_columns)
+            ]
+            options = {
+                k: v
+                for k, v in [
+                    ("onupdate", onupdate),
+                    ("ondelete", ondelete),
+                    ("initially", initially),
+                    ("deferrable", deferrable),
+                    ("match", match),
+                ]
+                if v is not None and v != "NO ACTION"
+            }
+            fkey_d = {
+                "name": conname,
+                "constrained_columns": constrained_columns,
+                "referred_schema": referred_schema,
+                "referred_table": referred_table,
+                "referred_columns": referred_columns,
+                "options": options,
+                "comment": comment,
+            }
+            table_fks.append(fkey_d)
+        return fkeys.items()
+
+    @reflection.cache
+    def get_indexes(self, connection, table_name, schema=None, **kw):
+        data = self.get_multi_indexes(
+            connection,
+            schema=schema,
+            filter_names=[table_name],
+            scope=ObjectScope.ANY,
+            kind=ObjectKind.ANY,
+            **kw,
+        )
+        return self._value_or_raise(data, table_name, schema)
+
+    @util.memoized_property
+    def _index_query(self):
+        pg_class_index = pg_catalog.pg_class.alias("cls_idx")
+        # NOTE: repeating oids clause improve query performance
+
+        # subquery to get the columns
+        idx_sq = (
+            select(
+                pg_catalog.pg_index.c.indexrelid,
+                pg_catalog.pg_index.c.indrelid,
+                sql.func.unnest(pg_catalog.pg_index.c.indkey).label("attnum"),
+                sql.func.generate_subscripts(
+                    pg_catalog.pg_index.c.indkey, 1
+                ).label("ord"),
+            )
+            .where(
+                ~pg_catalog.pg_index.c.indisprimary,
+                pg_catalog.pg_index.c.indrelid.in_(bindparam("oids")),
+            )
+            .subquery("idx")
+        )
+
+        attr_sq = (
+            select(
+                idx_sq.c.indexrelid,
+                idx_sq.c.indrelid,
+                idx_sq.c.ord,
+                # NOTE: always using pg_get_indexdef is too slow so just
+                # invoke when the element is an expression
+                sql.case(
+                    (
+                        idx_sq.c.attnum == 0,
+                        pg_catalog.pg_get_indexdef(
+                            idx_sq.c.indexrelid, idx_sq.c.ord + 1, True
+                        ),
+                    ),
+                    # NOTE: need to cast this since attname is of type "name"
+                    # that's limited to 63 bytes, while pg_get_indexdef
+                    # returns "text" so its output may get cut
+                    else_=pg_catalog.pg_attribute.c.attname.cast(TEXT),
+                ).label("element"),
+                (idx_sq.c.attnum == 0).label("is_expr"),
+            )
+            .select_from(idx_sq)
+            .outerjoin(
+                # do not remove rows where idx_sq.c.attnum is 0
+                pg_catalog.pg_attribute,
+                sql.and_(
+                    pg_catalog.pg_attribute.c.attnum == idx_sq.c.attnum,
+                    pg_catalog.pg_attribute.c.attrelid == idx_sq.c.indrelid,
+                ),
+            )
+            .where(idx_sq.c.indrelid.in_(bindparam("oids")))
+            .subquery("idx_attr")
+        )
+
+        cols_sq = (
+            select(
+                attr_sq.c.indexrelid,
+                sql.func.min(attr_sq.c.indrelid),
+                sql.func.array_agg(
+                    aggregate_order_by(attr_sq.c.element, attr_sq.c.ord)
+                ).label("elements"),
+                sql.func.array_agg(
+                    aggregate_order_by(attr_sq.c.is_expr, attr_sq.c.ord)
+                ).label("elements_is_expr"),
+            )
+            .group_by(attr_sq.c.indexrelid)
+            .subquery("idx_cols")
+        )
+
+        if self.server_version_info >= (11, 0):
+            indnkeyatts = pg_catalog.pg_index.c.indnkeyatts
+        else:
+            indnkeyatts = sql.null().label("indnkeyatts")
+
+        if self.server_version_info >= (15,):
+            nulls_not_distinct = pg_catalog.pg_index.c.indnullsnotdistinct
+        else:
+            nulls_not_distinct = sql.false().label("indnullsnotdistinct")
+
+        return (
+            select(
+                pg_catalog.pg_index.c.indrelid,
+                pg_class_index.c.relname.label("relname_index"),
+                pg_catalog.pg_index.c.indisunique,
+                pg_catalog.pg_constraint.c.conrelid.is_not(None).label(
+                    "has_constraint"
+                ),
+                pg_catalog.pg_index.c.indoption,
+                pg_class_index.c.reloptions,
+                pg_catalog.pg_am.c.amname,
+                # NOTE: pg_get_expr is very fast so this case has almost no
+                # performance impact
+                sql.case(
+                    (
+                        pg_catalog.pg_index.c.indpred.is_not(None),
+                        pg_catalog.pg_get_expr(
+                            pg_catalog.pg_index.c.indpred,
+                            pg_catalog.pg_index.c.indrelid,
+                        ),
+                    ),
+                    else_=None,
+                ).label("filter_definition"),
+                indnkeyatts,
+                nulls_not_distinct,
+                cols_sq.c.elements,
+                cols_sq.c.elements_is_expr,
+            )
+            .select_from(pg_catalog.pg_index)
+            .where(
+                pg_catalog.pg_index.c.indrelid.in_(bindparam("oids")),
+                ~pg_catalog.pg_index.c.indisprimary,
+            )
+            .join(
+                pg_class_index,
+                pg_catalog.pg_index.c.indexrelid == pg_class_index.c.oid,
+            )
+            .join(
+                pg_catalog.pg_am,
+                pg_class_index.c.relam == pg_catalog.pg_am.c.oid,
+            )
+            .outerjoin(
+                cols_sq,
+                pg_catalog.pg_index.c.indexrelid == cols_sq.c.indexrelid,
+            )
+            .outerjoin(
+                pg_catalog.pg_constraint,
+                sql.and_(
+                    pg_catalog.pg_index.c.indrelid
+                    == pg_catalog.pg_constraint.c.conrelid,
+                    pg_catalog.pg_index.c.indexrelid
+                    == pg_catalog.pg_constraint.c.conindid,
+                    pg_catalog.pg_constraint.c.contype
+                    == sql.any_(_array.array(("p", "u", "x"))),
+                ),
+            )
+            .order_by(pg_catalog.pg_index.c.indrelid, pg_class_index.c.relname)
+        )
+
+    def get_multi_indexes(
+        self, connection, schema, filter_names, scope, kind, **kw
+    ):
+        table_oids = self._get_table_oids(
+            connection, schema, filter_names, scope, kind, **kw
+        )
+
+        indexes = defaultdict(list)
+        default = ReflectionDefaults.indexes
+
+        batches = list(table_oids)
+
+        while batches:
+            batch = batches[0:3000]
+            batches[0:3000] = []
+
+            result = connection.execute(
+                self._index_query, {"oids": [r[0] for r in batch]}
+            ).mappings()
+
+            result_by_oid = defaultdict(list)
+            for row_dict in result:
+                result_by_oid[row_dict["indrelid"]].append(row_dict)
+
+            for oid, table_name in batch:
+                if oid not in result_by_oid:
+                    # ensure that each table has an entry, even if reflection
+                    # is skipped because not supported
+                    indexes[(schema, table_name)] = default()
+                    continue
+
+                for row in result_by_oid[oid]:
+                    index_name = row["relname_index"]
+
+                    table_indexes = indexes[(schema, table_name)]
+
+                    all_elements = row["elements"]
+                    all_elements_is_expr = row["elements_is_expr"]
+                    indnkeyatts = row["indnkeyatts"]
+                    # "The number of key columns in the index, not counting any
+                    # included columns, which are merely stored and do not
+                    # participate in the index semantics"
+                    if indnkeyatts and len(all_elements) > indnkeyatts:
+                        # this is a "covering index" which has INCLUDE columns
+                        # as well as regular index columns
+                        inc_cols = all_elements[indnkeyatts:]
+                        idx_elements = all_elements[:indnkeyatts]
+                        idx_elements_is_expr = all_elements_is_expr[
+                            :indnkeyatts
+                        ]
+                        # postgresql does not support expression on included
+                        # columns as of v14: "ERROR: expressions are not
+                        # supported in included columns".
+                        assert all(
+                            not is_expr
+                            for is_expr in all_elements_is_expr[indnkeyatts:]
+                        )
+                    else:
+                        idx_elements = all_elements
+                        idx_elements_is_expr = all_elements_is_expr
+                        inc_cols = []
+
+                    index = {"name": index_name, "unique": row["indisunique"]}
+                    if any(idx_elements_is_expr):
+                        index["column_names"] = [
+                            None if is_expr else expr
+                            for expr, is_expr in zip(
+                                idx_elements, idx_elements_is_expr
+                            )
+                        ]
+                        index["expressions"] = idx_elements
+                    else:
+                        index["column_names"] = idx_elements
+
+                    sorting = {}
+                    for col_index, col_flags in enumerate(row["indoption"]):
+                        col_sorting = ()
+                        # try to set flags only if they differ from PG
+                        # defaults...
+                        if col_flags & 0x01:
+                            col_sorting += ("desc",)
+                            if not (col_flags & 0x02):
+                                col_sorting += ("nulls_last",)
+                        else:
+                            if col_flags & 0x02:
+                                col_sorting += ("nulls_first",)
+                        if col_sorting:
+                            sorting[idx_elements[col_index]] = col_sorting
+                    if sorting:
+                        index["column_sorting"] = sorting
+                    if row["has_constraint"]:
+                        index["duplicates_constraint"] = index_name
+
+                    dialect_options = {}
+                    if row["reloptions"]:
+                        dialect_options["postgresql_with"] = dict(
+                            [
+                                option.split("=", 1)
+                                for option in row["reloptions"]
+                            ]
+                        )
+                    # it *might* be nice to include that this is 'btree' in the
+                    # reflection info.  But we don't want an Index object
+                    # to have a ``postgresql_using`` in it that is just the
+                    # default, so for the moment leaving this out.
+                    amname = row["amname"]
+                    if amname != "btree":
+                        dialect_options["postgresql_using"] = row["amname"]
+                    if row["filter_definition"]:
+                        dialect_options["postgresql_where"] = row[
+                            "filter_definition"
+                        ]
+                    if self.server_version_info >= (11,):
+                        # NOTE: this is legacy, this is part of
+                        # dialect_options now as of #7382
+                        index["include_columns"] = inc_cols
+                        dialect_options["postgresql_include"] = inc_cols
+                    if row["indnullsnotdistinct"]:
+                        # the default is False, so ignore it.
+                        dialect_options["postgresql_nulls_not_distinct"] = row[
+                            "indnullsnotdistinct"
+                        ]
+
+                    if dialect_options:
+                        index["dialect_options"] = dialect_options
+
+                    table_indexes.append(index)
+        return indexes.items()
+
+    @reflection.cache
+    def get_unique_constraints(
+        self, connection, table_name, schema=None, **kw
+    ):
+        data = self.get_multi_unique_constraints(
+            connection,
+            schema=schema,
+            filter_names=[table_name],
+            scope=ObjectScope.ANY,
+            kind=ObjectKind.ANY,
+            **kw,
+        )
+        return self._value_or_raise(data, table_name, schema)
+
+    def get_multi_unique_constraints(
+        self,
+        connection,
+        schema,
+        filter_names,
+        scope,
+        kind,
+        **kw,
+    ):
+        result = self._reflect_constraint(
+            connection, "u", schema, filter_names, scope, kind, **kw
+        )
+
+        # each table can have multiple unique constraints
+        uniques = defaultdict(list)
+        default = ReflectionDefaults.unique_constraints
+        for table_name, cols, con_name, comment, options in result:
+            # ensure a list is created for each table. leave it empty if
+            # the table has no unique cosntraint
+            if con_name is None:
+                uniques[(schema, table_name)] = default()
+                continue
+
+            uc_dict = {
+                "column_names": cols,
+                "name": con_name,
+                "comment": comment,
+            }
+            if options:
+                if options["nullsnotdistinct"]:
+                    uc_dict["dialect_options"] = {
+                        "postgresql_nulls_not_distinct": options[
+                            "nullsnotdistinct"
+                        ]
+                    }
+
+            uniques[(schema, table_name)].append(uc_dict)
+        return uniques.items()
+
+    @reflection.cache
+    def get_table_comment(self, connection, table_name, schema=None, **kw):
+        data = self.get_multi_table_comment(
+            connection,
+            schema,
+            [table_name],
+            scope=ObjectScope.ANY,
+            kind=ObjectKind.ANY,
+            **kw,
+        )
+        return self._value_or_raise(data, table_name, schema)
+
+    @lru_cache()
+    def _comment_query(self, schema, has_filter_names, scope, kind):
+        relkinds = self._kind_to_relkinds(kind)
+        query = (
+            select(
+                pg_catalog.pg_class.c.relname,
+                pg_catalog.pg_description.c.description,
+            )
+            .select_from(pg_catalog.pg_class)
+            .outerjoin(
+                pg_catalog.pg_description,
+                sql.and_(
+                    pg_catalog.pg_class.c.oid
+                    == pg_catalog.pg_description.c.objoid,
+                    pg_catalog.pg_description.c.objsubid == 0,
+                    pg_catalog.pg_description.c.classoid
+                    == sql.func.cast("pg_catalog.pg_class", REGCLASS),
+                ),
+            )
+            .where(self._pg_class_relkind_condition(relkinds))
+        )
+        query = self._pg_class_filter_scope_schema(query, schema, scope)
+        if has_filter_names:
+            query = query.where(
+                pg_catalog.pg_class.c.relname.in_(bindparam("filter_names"))
+            )
+        return query
+
+    def get_multi_table_comment(
+        self, connection, schema, filter_names, scope, kind, **kw
+    ):
+        has_filter_names, params = self._prepare_filter_names(filter_names)
+        query = self._comment_query(schema, has_filter_names, scope, kind)
+        result = connection.execute(query, params)
+
+        default = ReflectionDefaults.table_comment
+        return (
+            (
+                (schema, table),
+                {"text": comment} if comment is not None else default(),
+            )
+            for table, comment in result
+        )
+
+    @reflection.cache
+    def get_check_constraints(self, connection, table_name, schema=None, **kw):
+        data = self.get_multi_check_constraints(
+            connection,
+            schema,
+            [table_name],
+            scope=ObjectScope.ANY,
+            kind=ObjectKind.ANY,
+            **kw,
+        )
+        return self._value_or_raise(data, table_name, schema)
+
+    @lru_cache()
+    def _check_constraint_query(self, schema, has_filter_names, scope, kind):
+        relkinds = self._kind_to_relkinds(kind)
+        query = (
+            select(
+                pg_catalog.pg_class.c.relname,
+                pg_catalog.pg_constraint.c.conname,
+                # NOTE: avoid calling pg_get_constraintdef when not needed
+                # to speed up the query
+                sql.case(
+                    (
+                        pg_catalog.pg_constraint.c.oid.is_not(None),
+                        pg_catalog.pg_get_constraintdef(
+                            pg_catalog.pg_constraint.c.oid, True
+                        ),
+                    ),
+                    else_=None,
+                ),
+                pg_catalog.pg_description.c.description,
+            )
+            .select_from(pg_catalog.pg_class)
+            .outerjoin(
+                pg_catalog.pg_constraint,
+                sql.and_(
+                    pg_catalog.pg_class.c.oid
+                    == pg_catalog.pg_constraint.c.conrelid,
+                    pg_catalog.pg_constraint.c.contype == "c",
+                ),
+            )
+            .outerjoin(
+                pg_catalog.pg_description,
+                pg_catalog.pg_description.c.objoid
+                == pg_catalog.pg_constraint.c.oid,
+            )
+            .order_by(
+                pg_catalog.pg_class.c.relname,
+                pg_catalog.pg_constraint.c.conname,
+            )
+            .where(self._pg_class_relkind_condition(relkinds))
+        )
+        query = self._pg_class_filter_scope_schema(query, schema, scope)
+        if has_filter_names:
+            query = query.where(
+                pg_catalog.pg_class.c.relname.in_(bindparam("filter_names"))
+            )
+        return query
+
+    def get_multi_check_constraints(
+        self, connection, schema, filter_names, scope, kind, **kw
+    ):
+        has_filter_names, params = self._prepare_filter_names(filter_names)
+        query = self._check_constraint_query(
+            schema, has_filter_names, scope, kind
+        )
+        result = connection.execute(query, params)
+
+        check_constraints = defaultdict(list)
+        default = ReflectionDefaults.check_constraints
+        for table_name, check_name, src, comment in result:
+            # only two cases for check_name and src: both null or both defined
+            if check_name is None and src is None:
+                check_constraints[(schema, table_name)] = default()
+                continue
+            # samples:
+            # "CHECK (((a > 1) AND (a < 5)))"
+            # "CHECK (((a = 1) OR ((a > 2) AND (a < 5))))"
+            # "CHECK (((a > 1) AND (a < 5))) NOT VALID"
+            # "CHECK (some_boolean_function(a))"
+            # "CHECK (((a\n < 1)\n OR\n (a\n >= 5))\n)"
+            # "CHECK (a NOT NULL) NO INHERIT"
+            # "CHECK (a NOT NULL) NO INHERIT NOT VALID"
+
+            m = re.match(
+                r"^CHECK *\((.+)\)( NO INHERIT)?( NOT VALID)?$",
+                src,
+                flags=re.DOTALL,
+            )
+            if not m:
+                util.warn("Could not parse CHECK constraint text: %r" % src)
+                sqltext = ""
+            else:
+                sqltext = re.compile(
+                    r"^[\s\n]*\((.+)\)[\s\n]*$", flags=re.DOTALL
+                ).sub(r"\1", m.group(1))
+            entry = {
+                "name": check_name,
+                "sqltext": sqltext,
+                "comment": comment,
+            }
+            if m:
+                do = {}
+                if " NOT VALID" in m.groups():
+                    do["not_valid"] = True
+                if " NO INHERIT" in m.groups():
+                    do["no_inherit"] = True
+                if do:
+                    entry["dialect_options"] = do
+
+            check_constraints[(schema, table_name)].append(entry)
+        return check_constraints.items()
+
+    def _pg_type_filter_schema(self, query, schema):
+        if schema is None:
+            query = query.where(
+                pg_catalog.pg_type_is_visible(pg_catalog.pg_type.c.oid),
+                # ignore pg_catalog schema
+                pg_catalog.pg_namespace.c.nspname != "pg_catalog",
+            )
+        elif schema != "*":
+            query = query.where(pg_catalog.pg_namespace.c.nspname == schema)
+        return query
+
+    @lru_cache()
+    def _enum_query(self, schema):
+        lbl_agg_sq = (
+            select(
+                pg_catalog.pg_enum.c.enumtypid,
+                sql.func.array_agg(
+                    aggregate_order_by(
+                        # NOTE: cast since some postgresql derivatives may
+                        # not support array_agg on the name type
+                        pg_catalog.pg_enum.c.enumlabel.cast(TEXT),
+                        pg_catalog.pg_enum.c.enumsortorder,
+                    )
+                ).label("labels"),
+            )
+            .group_by(pg_catalog.pg_enum.c.enumtypid)
+            .subquery("lbl_agg")
+        )
+
+        query = (
+            select(
+                pg_catalog.pg_type.c.typname.label("name"),
+                pg_catalog.pg_type_is_visible(pg_catalog.pg_type.c.oid).label(
+                    "visible"
+                ),
+                pg_catalog.pg_namespace.c.nspname.label("schema"),
+                lbl_agg_sq.c.labels.label("labels"),
+            )
+            .join(
+                pg_catalog.pg_namespace,
+                pg_catalog.pg_namespace.c.oid
+                == pg_catalog.pg_type.c.typnamespace,
+            )
+            .outerjoin(
+                lbl_agg_sq, pg_catalog.pg_type.c.oid == lbl_agg_sq.c.enumtypid
+            )
+            .where(pg_catalog.pg_type.c.typtype == "e")
+            .order_by(
+                pg_catalog.pg_namespace.c.nspname, pg_catalog.pg_type.c.typname
+            )
+        )
+
+        return self._pg_type_filter_schema(query, schema)
+
+    @reflection.cache
+    def _load_enums(self, connection, schema=None, **kw):
+        if not self.supports_native_enum:
+            return []
+
+        result = connection.execute(self._enum_query(schema))
+
+        enums = []
+        for name, visible, schema, labels in result:
+            enums.append(
+                {
+                    "name": name,
+                    "schema": schema,
+                    "visible": visible,
+                    "labels": [] if labels is None else labels,
+                }
+            )
+        return enums
+
+    @lru_cache()
+    def _domain_query(self, schema):
+        con_sq = (
+            select(
+                pg_catalog.pg_constraint.c.contypid,
+                sql.func.array_agg(
+                    pg_catalog.pg_get_constraintdef(
+                        pg_catalog.pg_constraint.c.oid, True
+                    )
+                ).label("condefs"),
+                sql.func.array_agg(
+                    # NOTE: cast since some postgresql derivatives may
+                    # not support array_agg on the name type
+                    pg_catalog.pg_constraint.c.conname.cast(TEXT)
+                ).label("connames"),
+            )
+            # The domain this constraint is on; zero if not a domain constraint
+            .where(pg_catalog.pg_constraint.c.contypid != 0)
+            .group_by(pg_catalog.pg_constraint.c.contypid)
+            .subquery("domain_constraints")
+        )
+
+        query = (
+            select(
+                pg_catalog.pg_type.c.typname.label("name"),
+                pg_catalog.format_type(
+                    pg_catalog.pg_type.c.typbasetype,
+                    pg_catalog.pg_type.c.typtypmod,
+                ).label("attype"),
+                (~pg_catalog.pg_type.c.typnotnull).label("nullable"),
+                pg_catalog.pg_type.c.typdefault.label("default"),
+                pg_catalog.pg_type_is_visible(pg_catalog.pg_type.c.oid).label(
+                    "visible"
+                ),
+                pg_catalog.pg_namespace.c.nspname.label("schema"),
+                con_sq.c.condefs,
+                con_sq.c.connames,
+                pg_catalog.pg_collation.c.collname,
+            )
+            .join(
+                pg_catalog.pg_namespace,
+                pg_catalog.pg_namespace.c.oid
+                == pg_catalog.pg_type.c.typnamespace,
+            )
+            .outerjoin(
+                pg_catalog.pg_collation,
+                pg_catalog.pg_type.c.typcollation
+                == pg_catalog.pg_collation.c.oid,
+            )
+            .outerjoin(
+                con_sq,
+                pg_catalog.pg_type.c.oid == con_sq.c.contypid,
+            )
+            .where(pg_catalog.pg_type.c.typtype == "d")
+            .order_by(
+                pg_catalog.pg_namespace.c.nspname, pg_catalog.pg_type.c.typname
+            )
+        )
+        return self._pg_type_filter_schema(query, schema)
+
+    @reflection.cache
+    def _load_domains(self, connection, schema=None, **kw):
+        result = connection.execute(self._domain_query(schema))
+
+        domains: List[ReflectedDomain] = []
+        for domain in result.mappings():
+            # strip (30) from character varying(30)
+            attype = re.search(r"([^\(]+)", domain["attype"]).group(1)
+            constraints: List[ReflectedDomainConstraint] = []
+            if domain["connames"]:
+                # When a domain has multiple CHECK constraints, they will
+                # be tested in alphabetical order by name.
+                sorted_constraints = sorted(
+                    zip(domain["connames"], domain["condefs"]),
+                    key=lambda t: t[0],
+                )
+                for name, def_ in sorted_constraints:
+                    # constraint is in the form "CHECK (expression)"
+                    # or "NOT NULL". Ignore the "NOT NULL" and
+                    # remove "CHECK (" and the tailing ")".
+                    if def_.casefold().startswith("check"):
+                        check = def_[7:-1]
+                        constraints.append({"name": name, "check": check})
+            domain_rec: ReflectedDomain = {
+                "name": domain["name"],
+                "schema": domain["schema"],
+                "visible": domain["visible"],
+                "type": attype,
+                "nullable": domain["nullable"],
+                "default": domain["default"],
+                "constraints": constraints,
+                "collation": domain["collname"],
+            }
+            domains.append(domain_rec)
+
+        return domains
+
+    def _set_backslash_escapes(self, connection):
+        # this method is provided as an override hook for descendant
+        # dialects (e.g. Redshift), so removing it may break them
+        std_string = connection.exec_driver_sql(
+            "show standard_conforming_strings"
+        ).scalar()
+        self._backslash_escapes = std_string == "off"
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/dml.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/dml.py
new file mode 100644
index 00000000..1187b6bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/dml.py
@@ -0,0 +1,339 @@
+# dialects/postgresql/dml.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+from __future__ import annotations
+
+from typing import Any
+from typing import List
+from typing import Optional
+from typing import Tuple
+from typing import Union
+
+from . import ext
+from .._typing import _OnConflictConstraintT
+from .._typing import _OnConflictIndexElementsT
+from .._typing import _OnConflictIndexWhereT
+from .._typing import _OnConflictSetT
+from .._typing import _OnConflictWhereT
+from ... import util
+from ...sql import coercions
+from ...sql import roles
+from ...sql import schema
+from ...sql._typing import _DMLTableArgument
+from ...sql.base import _exclusive_against
+from ...sql.base import _generative
+from ...sql.base import ColumnCollection
+from ...sql.base import ReadOnlyColumnCollection
+from ...sql.dml import Insert as StandardInsert
+from ...sql.elements import ClauseElement
+from ...sql.elements import ColumnElement
+from ...sql.elements import KeyedColumnElement
+from ...sql.elements import TextClause
+from ...sql.expression import alias
+from ...util.typing import Self
+
+
+__all__ = ("Insert", "insert")
+
+
+def insert(table: _DMLTableArgument) -> Insert:
+    """Construct a PostgreSQL-specific variant :class:`_postgresql.Insert`
+    construct.
+
+    .. container:: inherited_member
+
+        The :func:`sqlalchemy.dialects.postgresql.insert` function creates
+        a :class:`sqlalchemy.dialects.postgresql.Insert`.  This class is based
+        on the dialect-agnostic :class:`_sql.Insert` construct which may
+        be constructed using the :func:`_sql.insert` function in
+        SQLAlchemy Core.
+
+    The :class:`_postgresql.Insert` construct includes additional methods
+    :meth:`_postgresql.Insert.on_conflict_do_update`,
+    :meth:`_postgresql.Insert.on_conflict_do_nothing`.
+
+    """
+    return Insert(table)
+
+
+class Insert(StandardInsert):
+    """PostgreSQL-specific implementation of INSERT.
+
+    Adds methods for PG-specific syntaxes such as ON CONFLICT.
+
+    The :class:`_postgresql.Insert` object is created using the
+    :func:`sqlalchemy.dialects.postgresql.insert` function.
+
+    """
+
+    stringify_dialect = "postgresql"
+    inherit_cache = False
+
+    @util.memoized_property
+    def excluded(
+        self,
+    ) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]:
+        """Provide the ``excluded`` namespace for an ON CONFLICT statement
+
+        PG's ON CONFLICT clause allows reference to the row that would
+        be inserted, known as ``excluded``.  This attribute provides
+        all columns in this row to be referenceable.
+
+        .. tip::  The :attr:`_postgresql.Insert.excluded` attribute is an
+            instance of :class:`_expression.ColumnCollection`, which provides
+            an interface the same as that of the :attr:`_schema.Table.c`
+            collection described at :ref:`metadata_tables_and_columns`.
+            With this collection, ordinary names are accessible like attributes
+            (e.g. ``stmt.excluded.some_column``), but special names and
+            dictionary method names should be accessed using indexed access,
+            such as ``stmt.excluded["column name"]`` or
+            ``stmt.excluded["values"]``.   See the docstring for
+            :class:`_expression.ColumnCollection` for further examples.
+
+        .. seealso::
+
+            :ref:`postgresql_insert_on_conflict` - example of how
+            to use :attr:`_expression.Insert.excluded`
+
+        """
+        return alias(self.table, name="excluded").columns
+
+    _on_conflict_exclusive = _exclusive_against(
+        "_post_values_clause",
+        msgs={
+            "_post_values_clause": "This Insert construct already has "
+            "an ON CONFLICT clause established"
+        },
+    )
+
+    @_generative
+    @_on_conflict_exclusive
+    def on_conflict_do_update(
+        self,
+        constraint: _OnConflictConstraintT = None,
+        index_elements: _OnConflictIndexElementsT = None,
+        index_where: _OnConflictIndexWhereT = None,
+        set_: _OnConflictSetT = None,
+        where: _OnConflictWhereT = None,
+    ) -> Self:
+        r"""
+        Specifies a DO UPDATE SET action for ON CONFLICT clause.
+
+        Either the ``constraint`` or ``index_elements`` argument is
+        required, but only one of these can be specified.
+
+        :param constraint:
+         The name of a unique or exclusion constraint on the table,
+         or the constraint object itself if it has a .name attribute.
+
+        :param index_elements:
+         A sequence consisting of string column names, :class:`_schema.Column`
+         objects, or other column expression objects that will be used
+         to infer a target index.
+
+        :param index_where:
+         Additional WHERE criterion that can be used to infer a
+         conditional target index.
+
+        :param set\_:
+         A dictionary or other mapping object
+         where the keys are either names of columns in the target table,
+         or :class:`_schema.Column` objects or other ORM-mapped columns
+         matching that of the target table, and expressions or literals
+         as values, specifying the ``SET`` actions to take.
+
+         .. versionadded:: 1.4 The
+            :paramref:`_postgresql.Insert.on_conflict_do_update.set_`
+            parameter supports :class:`_schema.Column` objects from the target
+            :class:`_schema.Table` as keys.
+
+         .. warning:: This dictionary does **not** take into account
+            Python-specified default UPDATE values or generation functions,
+            e.g. those specified using :paramref:`_schema.Column.onupdate`.
+            These values will not be exercised for an ON CONFLICT style of
+            UPDATE, unless they are manually specified in the
+            :paramref:`.Insert.on_conflict_do_update.set_` dictionary.
+
+        :param where:
+         Optional argument. An expression object representing a ``WHERE``
+         clause that restricts the rows affected by ``DO UPDATE SET``. Rows not
+         meeting the ``WHERE`` condition will not be updated (effectively a
+         ``DO NOTHING`` for those rows).
+
+
+        .. seealso::
+
+            :ref:`postgresql_insert_on_conflict`
+
+        """
+        self._post_values_clause = OnConflictDoUpdate(
+            constraint, index_elements, index_where, set_, where
+        )
+        return self
+
+    @_generative
+    @_on_conflict_exclusive
+    def on_conflict_do_nothing(
+        self,
+        constraint: _OnConflictConstraintT = None,
+        index_elements: _OnConflictIndexElementsT = None,
+        index_where: _OnConflictIndexWhereT = None,
+    ) -> Self:
+        """
+        Specifies a DO NOTHING action for ON CONFLICT clause.
+
+        The ``constraint`` and ``index_elements`` arguments
+        are optional, but only one of these can be specified.
+
+        :param constraint:
+         The name of a unique or exclusion constraint on the table,
+         or the constraint object itself if it has a .name attribute.
+
+        :param index_elements:
+         A sequence consisting of string column names, :class:`_schema.Column`
+         objects, or other column expression objects that will be used
+         to infer a target index.
+
+        :param index_where:
+         Additional WHERE criterion that can be used to infer a
+         conditional target index.
+
+        .. seealso::
+
+            :ref:`postgresql_insert_on_conflict`
+
+        """
+        self._post_values_clause = OnConflictDoNothing(
+            constraint, index_elements, index_where
+        )
+        return self
+
+
+class OnConflictClause(ClauseElement):
+    stringify_dialect = "postgresql"
+
+    constraint_target: Optional[str]
+    inferred_target_elements: Optional[List[Union[str, schema.Column[Any]]]]
+    inferred_target_whereclause: Optional[
+        Union[ColumnElement[Any], TextClause]
+    ]
+
+    def __init__(
+        self,
+        constraint: _OnConflictConstraintT = None,
+        index_elements: _OnConflictIndexElementsT = None,
+        index_where: _OnConflictIndexWhereT = None,
+    ):
+        if constraint is not None:
+            if not isinstance(constraint, str) and isinstance(
+                constraint,
+                (schema.Constraint, ext.ExcludeConstraint),
+            ):
+                constraint = getattr(constraint, "name") or constraint
+
+        if constraint is not None:
+            if index_elements is not None:
+                raise ValueError(
+                    "'constraint' and 'index_elements' are mutually exclusive"
+                )
+
+            if isinstance(constraint, str):
+                self.constraint_target = constraint
+                self.inferred_target_elements = None
+                self.inferred_target_whereclause = None
+            elif isinstance(constraint, schema.Index):
+                index_elements = constraint.expressions
+                index_where = constraint.dialect_options["postgresql"].get(
+                    "where"
+                )
+            elif isinstance(constraint, ext.ExcludeConstraint):
+                index_elements = constraint.columns
+                index_where = constraint.where
+            else:
+                index_elements = constraint.columns
+                index_where = constraint.dialect_options["postgresql"].get(
+                    "where"
+                )
+
+        if index_elements is not None:
+            self.constraint_target = None
+            self.inferred_target_elements = [
+                coercions.expect(roles.DDLConstraintColumnRole, column)
+                for column in index_elements
+            ]
+
+            self.inferred_target_whereclause = (
+                coercions.expect(
+                    (
+                        roles.StatementOptionRole
+                        if isinstance(constraint, ext.ExcludeConstraint)
+                        else roles.WhereHavingRole
+                    ),
+                    index_where,
+                )
+                if index_where is not None
+                else None
+            )
+
+        elif constraint is None:
+            self.constraint_target = self.inferred_target_elements = (
+                self.inferred_target_whereclause
+            ) = None
+
+
+class OnConflictDoNothing(OnConflictClause):
+    __visit_name__ = "on_conflict_do_nothing"
+
+
+class OnConflictDoUpdate(OnConflictClause):
+    __visit_name__ = "on_conflict_do_update"
+
+    update_values_to_set: List[Tuple[Union[schema.Column[Any], str], Any]]
+    update_whereclause: Optional[ColumnElement[Any]]
+
+    def __init__(
+        self,
+        constraint: _OnConflictConstraintT = None,
+        index_elements: _OnConflictIndexElementsT = None,
+        index_where: _OnConflictIndexWhereT = None,
+        set_: _OnConflictSetT = None,
+        where: _OnConflictWhereT = None,
+    ):
+        super().__init__(
+            constraint=constraint,
+            index_elements=index_elements,
+            index_where=index_where,
+        )
+
+        if (
+            self.inferred_target_elements is None
+            and self.constraint_target is None
+        ):
+            raise ValueError(
+                "Either constraint or index_elements, "
+                "but not both, must be specified unless DO NOTHING"
+            )
+
+        if isinstance(set_, dict):
+            if not set_:
+                raise ValueError("set parameter dictionary must not be empty")
+        elif isinstance(set_, ColumnCollection):
+            set_ = dict(set_)
+        else:
+            raise ValueError(
+                "set parameter must be a non-empty dictionary "
+                "or a ColumnCollection such as the `.c.` collection "
+                "of a Table object"
+            )
+        self.update_values_to_set = [
+            (coercions.expect(roles.DMLColumnRole, key), value)
+            for key, value in set_.items()
+        ]
+        self.update_whereclause = (
+            coercions.expect(roles.WhereHavingRole, where)
+            if where is not None
+            else None
+        )
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/ext.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/ext.py
new file mode 100644
index 00000000..94466ae0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/ext.py
@@ -0,0 +1,501 @@
+# dialects/postgresql/ext.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+from __future__ import annotations
+
+from typing import Any
+from typing import TYPE_CHECKING
+from typing import TypeVar
+
+from . import types
+from .array import ARRAY
+from ...sql import coercions
+from ...sql import elements
+from ...sql import expression
+from ...sql import functions
+from ...sql import roles
+from ...sql import schema
+from ...sql.schema import ColumnCollectionConstraint
+from ...sql.sqltypes import TEXT
+from ...sql.visitors import InternalTraversal
+
+_T = TypeVar("_T", bound=Any)
+
+if TYPE_CHECKING:
+    from ...sql.visitors import _TraverseInternalsType
+
+
+class aggregate_order_by(expression.ColumnElement):
+    """Represent a PostgreSQL aggregate order by expression.
+
+    E.g.::
+
+        from sqlalchemy.dialects.postgresql import aggregate_order_by
+
+        expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
+        stmt = select(expr)
+
+    would represent the expression:
+
+    .. sourcecode:: sql
+
+        SELECT array_agg(a ORDER BY b DESC) FROM table;
+
+    Similarly::
+
+        expr = func.string_agg(
+            table.c.a, aggregate_order_by(literal_column("','"), table.c.a)
+        )
+        stmt = select(expr)
+
+    Would represent:
+
+    .. sourcecode:: sql
+
+        SELECT string_agg(a, ',' ORDER BY a) FROM table;
+
+    .. versionchanged:: 1.2.13 - the ORDER BY argument may be multiple terms
+
+    .. seealso::
+
+        :class:`_functions.array_agg`
+
+    """
+
+    __visit_name__ = "aggregate_order_by"
+
+    stringify_dialect = "postgresql"
+    _traverse_internals: _TraverseInternalsType = [
+        ("target", InternalTraversal.dp_clauseelement),
+        ("type", InternalTraversal.dp_type),
+        ("order_by", InternalTraversal.dp_clauseelement),
+    ]
+
+    def __init__(self, target, *order_by):
+        self.target = coercions.expect(roles.ExpressionElementRole, target)
+        self.type = self.target.type
+
+        _lob = len(order_by)
+        if _lob == 0:
+            raise TypeError("at least one ORDER BY element is required")
+        elif _lob == 1:
+            self.order_by = coercions.expect(
+                roles.ExpressionElementRole, order_by[0]
+            )
+        else:
+            self.order_by = elements.ClauseList(
+                *order_by, _literal_as_text_role=roles.ExpressionElementRole
+            )
+
+    def self_group(self, against=None):
+        return self
+
+    def get_children(self, **kwargs):
+        return self.target, self.order_by
+
+    def _copy_internals(self, clone=elements._clone, **kw):
+        self.target = clone(self.target, **kw)
+        self.order_by = clone(self.order_by, **kw)
+
+    @property
+    def _from_objects(self):
+        return self.target._from_objects + self.order_by._from_objects
+
+
+class ExcludeConstraint(ColumnCollectionConstraint):
+    """A table-level EXCLUDE constraint.
+
+    Defines an EXCLUDE constraint as described in the `PostgreSQL
+    documentation`__.
+
+    __ https://www.postgresql.org/docs/current/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
+
+    """  # noqa
+
+    __visit_name__ = "exclude_constraint"
+
+    where = None
+    inherit_cache = False
+
+    create_drop_stringify_dialect = "postgresql"
+
+    @elements._document_text_coercion(
+        "where",
+        ":class:`.ExcludeConstraint`",
+        ":paramref:`.ExcludeConstraint.where`",
+    )
+    def __init__(self, *elements, **kw):
+        r"""
+        Create an :class:`.ExcludeConstraint` object.
+
+        E.g.::
+
+            const = ExcludeConstraint(
+                (Column("period"), "&&"),
+                (Column("group"), "="),
+                where=(Column("group") != "some group"),
+                ops={"group": "my_operator_class"},
+            )
+
+        The constraint is normally embedded into the :class:`_schema.Table`
+        construct
+        directly, or added later using :meth:`.append_constraint`::
+
+            some_table = Table(
+                "some_table",
+                metadata,
+                Column("id", Integer, primary_key=True),
+                Column("period", TSRANGE()),
+                Column("group", String),
+            )
+
+            some_table.append_constraint(
+                ExcludeConstraint(
+                    (some_table.c.period, "&&"),
+                    (some_table.c.group, "="),
+                    where=some_table.c.group != "some group",
+                    name="some_table_excl_const",
+                    ops={"group": "my_operator_class"},
+                )
+            )
+
+        The exclude constraint defined in this example requires the
+        ``btree_gist`` extension, that can be created using the
+        command ``CREATE EXTENSION btree_gist;``.
+
+        :param \*elements:
+
+          A sequence of two tuples of the form ``(column, operator)`` where
+          "column" is either a :class:`_schema.Column` object, or a SQL
+          expression element (e.g. ``func.int8range(table.from, table.to)``)
+          or the name of a column as string, and "operator" is a string
+          containing the operator to use (e.g. `"&&"` or `"="`).
+
+          In order to specify a column name when a :class:`_schema.Column`
+          object is not available, while ensuring
+          that any necessary quoting rules take effect, an ad-hoc
+          :class:`_schema.Column` or :func:`_expression.column`
+          object should be used.
+          The ``column`` may also be a string SQL expression when
+          passed as :func:`_expression.literal_column` or
+          :func:`_expression.text`
+
+        :param name:
+          Optional, the in-database name of this constraint.
+
+        :param deferrable:
+          Optional bool.  If set, emit DEFERRABLE or NOT DEFERRABLE when
+          issuing DDL for this constraint.
+
+        :param initially:
+          Optional string.  If set, emit INITIALLY <value> when issuing DDL
+          for this constraint.
+
+        :param using:
+          Optional string.  If set, emit USING <index_method> when issuing DDL
+          for this constraint. Defaults to 'gist'.
+
+        :param where:
+          Optional SQL expression construct or literal SQL string.
+          If set, emit WHERE <predicate> when issuing DDL
+          for this constraint.
+
+        :param ops:
+          Optional dictionary.  Used to define operator classes for the
+          elements; works the same way as that of the
+          :ref:`postgresql_ops <postgresql_operator_classes>`
+          parameter specified to the :class:`_schema.Index` construct.
+
+          .. versionadded:: 1.3.21
+
+          .. seealso::
+
+            :ref:`postgresql_operator_classes` - general description of how
+            PostgreSQL operator classes are specified.
+
+        """
+        columns = []
+        render_exprs = []
+        self.operators = {}
+
+        expressions, operators = zip(*elements)
+
+        for (expr, column, strname, add_element), operator in zip(
+            coercions.expect_col_expression_collection(
+                roles.DDLConstraintColumnRole, expressions
+            ),
+            operators,
+        ):
+            if add_element is not None:
+                columns.append(add_element)
+
+            name = column.name if column is not None else strname
+
+            if name is not None:
+                # backwards compat
+                self.operators[name] = operator
+
+            render_exprs.append((expr, name, operator))
+
+        self._render_exprs = render_exprs
+
+        ColumnCollectionConstraint.__init__(
+            self,
+            *columns,
+            name=kw.get("name"),
+            deferrable=kw.get("deferrable"),
+            initially=kw.get("initially"),
+        )
+        self.using = kw.get("using", "gist")
+        where = kw.get("where")
+        if where is not None:
+            self.where = coercions.expect(roles.StatementOptionRole, where)
+
+        self.ops = kw.get("ops", {})
+
+    def _set_parent(self, table, **kw):
+        super()._set_parent(table)
+
+        self._render_exprs = [
+            (
+                expr if not isinstance(expr, str) else table.c[expr],
+                name,
+                operator,
+            )
+            for expr, name, operator in (self._render_exprs)
+        ]
+
+    def _copy(self, target_table=None, **kw):
+        elements = [
+            (
+                schema._copy_expression(expr, self.parent, target_table),
+                operator,
+            )
+            for expr, _, operator in self._render_exprs
+        ]
+        c = self.__class__(
+            *elements,
+            name=self.name,
+            deferrable=self.deferrable,
+            initially=self.initially,
+            where=self.where,
+            using=self.using,
+        )
+        c.dispatch._update(self.dispatch)
+        return c
+
+
+def array_agg(*arg, **kw):
+    """PostgreSQL-specific form of :class:`_functions.array_agg`, ensures
+    return type is :class:`_postgresql.ARRAY` and not
+    the plain :class:`_types.ARRAY`, unless an explicit ``type_``
+    is passed.
+
+    """
+    kw["_default_array_type"] = ARRAY
+    return functions.func.array_agg(*arg, **kw)
+
+
+class _regconfig_fn(functions.GenericFunction[_T]):
+    inherit_cache = True
+
+    def __init__(self, *args, **kwargs):
+        args = list(args)
+        if len(args) > 1:
+            initial_arg = coercions.expect(
+                roles.ExpressionElementRole,
+                args.pop(0),
+                name=getattr(self, "name", None),
+                apply_propagate_attrs=self,
+                type_=types.REGCONFIG,
+            )
+            initial_arg = [initial_arg]
+        else:
+            initial_arg = []
+
+        addtl_args = [
+            coercions.expect(
+                roles.ExpressionElementRole,
+                c,
+                name=getattr(self, "name", None),
+                apply_propagate_attrs=self,
+            )
+            for c in args
+        ]
+        super().__init__(*(initial_arg + addtl_args), **kwargs)
+
+
+class to_tsvector(_regconfig_fn):
+    """The PostgreSQL ``to_tsvector`` SQL function.
+
+    This function applies automatic casting of the REGCONFIG argument
+    to use the :class:`_postgresql.REGCONFIG` datatype automatically,
+    and applies a return type of :class:`_postgresql.TSVECTOR`.
+
+    Assuming the PostgreSQL dialect has been imported, either by invoking
+    ``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
+    engine using ``create_engine("postgresql...")``,
+    :class:`_postgresql.to_tsvector` will be used automatically when invoking
+    ``sqlalchemy.func.to_tsvector()``, ensuring the correct argument and return
+    type handlers are used at compile and execution time.
+
+    .. versionadded:: 2.0.0rc1
+
+    """
+
+    inherit_cache = True
+    type = types.TSVECTOR
+
+
+class to_tsquery(_regconfig_fn):
+    """The PostgreSQL ``to_tsquery`` SQL function.
+
+    This function applies automatic casting of the REGCONFIG argument
+    to use the :class:`_postgresql.REGCONFIG` datatype automatically,
+    and applies a return type of :class:`_postgresql.TSQUERY`.
+
+    Assuming the PostgreSQL dialect has been imported, either by invoking
+    ``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
+    engine using ``create_engine("postgresql...")``,
+    :class:`_postgresql.to_tsquery` will be used automatically when invoking
+    ``sqlalchemy.func.to_tsquery()``, ensuring the correct argument and return
+    type handlers are used at compile and execution time.
+
+    .. versionadded:: 2.0.0rc1
+
+    """
+
+    inherit_cache = True
+    type = types.TSQUERY
+
+
+class plainto_tsquery(_regconfig_fn):
+    """The PostgreSQL ``plainto_tsquery`` SQL function.
+
+    This function applies automatic casting of the REGCONFIG argument
+    to use the :class:`_postgresql.REGCONFIG` datatype automatically,
+    and applies a return type of :class:`_postgresql.TSQUERY`.
+
+    Assuming the PostgreSQL dialect has been imported, either by invoking
+    ``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
+    engine using ``create_engine("postgresql...")``,
+    :class:`_postgresql.plainto_tsquery` will be used automatically when
+    invoking ``sqlalchemy.func.plainto_tsquery()``, ensuring the correct
+    argument and return type handlers are used at compile and execution time.
+
+    .. versionadded:: 2.0.0rc1
+
+    """
+
+    inherit_cache = True
+    type = types.TSQUERY
+
+
+class phraseto_tsquery(_regconfig_fn):
+    """The PostgreSQL ``phraseto_tsquery`` SQL function.
+
+    This function applies automatic casting of the REGCONFIG argument
+    to use the :class:`_postgresql.REGCONFIG` datatype automatically,
+    and applies a return type of :class:`_postgresql.TSQUERY`.
+
+    Assuming the PostgreSQL dialect has been imported, either by invoking
+    ``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
+    engine using ``create_engine("postgresql...")``,
+    :class:`_postgresql.phraseto_tsquery` will be used automatically when
+    invoking ``sqlalchemy.func.phraseto_tsquery()``, ensuring the correct
+    argument and return type handlers are used at compile and execution time.
+
+    .. versionadded:: 2.0.0rc1
+
+    """
+
+    inherit_cache = True
+    type = types.TSQUERY
+
+
+class websearch_to_tsquery(_regconfig_fn):
+    """The PostgreSQL ``websearch_to_tsquery`` SQL function.
+
+    This function applies automatic casting of the REGCONFIG argument
+    to use the :class:`_postgresql.REGCONFIG` datatype automatically,
+    and applies a return type of :class:`_postgresql.TSQUERY`.
+
+    Assuming the PostgreSQL dialect has been imported, either by invoking
+    ``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
+    engine using ``create_engine("postgresql...")``,
+    :class:`_postgresql.websearch_to_tsquery` will be used automatically when
+    invoking ``sqlalchemy.func.websearch_to_tsquery()``, ensuring the correct
+    argument and return type handlers are used at compile and execution time.
+
+    .. versionadded:: 2.0.0rc1
+
+    """
+
+    inherit_cache = True
+    type = types.TSQUERY
+
+
+class ts_headline(_regconfig_fn):
+    """The PostgreSQL ``ts_headline`` SQL function.
+
+    This function applies automatic casting of the REGCONFIG argument
+    to use the :class:`_postgresql.REGCONFIG` datatype automatically,
+    and applies a return type of :class:`_types.TEXT`.
+
+    Assuming the PostgreSQL dialect has been imported, either by invoking
+    ``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
+    engine using ``create_engine("postgresql...")``,
+    :class:`_postgresql.ts_headline` will be used automatically when invoking
+    ``sqlalchemy.func.ts_headline()``, ensuring the correct argument and return
+    type handlers are used at compile and execution time.
+
+    .. versionadded:: 2.0.0rc1
+
+    """
+
+    inherit_cache = True
+    type = TEXT
+
+    def __init__(self, *args, **kwargs):
+        args = list(args)
+
+        # parse types according to
+        # https://www.postgresql.org/docs/current/textsearch-controls.html#TEXTSEARCH-HEADLINE
+        if len(args) < 2:
+            # invalid args; don't do anything
+            has_regconfig = False
+        elif (
+            isinstance(args[1], elements.ColumnElement)
+            and args[1].type._type_affinity is types.TSQUERY
+        ):
+            # tsquery is second argument, no regconfig argument
+            has_regconfig = False
+        else:
+            has_regconfig = True
+
+        if has_regconfig:
+            initial_arg = coercions.expect(
+                roles.ExpressionElementRole,
+                args.pop(0),
+                apply_propagate_attrs=self,
+                name=getattr(self, "name", None),
+                type_=types.REGCONFIG,
+            )
+            initial_arg = [initial_arg]
+        else:
+            initial_arg = []
+
+        addtl_args = [
+            coercions.expect(
+                roles.ExpressionElementRole,
+                c,
+                name=getattr(self, "name", None),
+                apply_propagate_attrs=self,
+            )
+            for c in args
+        ]
+        super().__init__(*(initial_arg + addtl_args), **kwargs)
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/hstore.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/hstore.py
new file mode 100644
index 00000000..0a915b17
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/hstore.py
@@ -0,0 +1,406 @@
+# dialects/postgresql/hstore.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+
+import re
+
+from .array import ARRAY
+from .operators import CONTAINED_BY
+from .operators import CONTAINS
+from .operators import GETITEM
+from .operators import HAS_ALL
+from .operators import HAS_ANY
+from .operators import HAS_KEY
+from ... import types as sqltypes
+from ...sql import functions as sqlfunc
+
+
+__all__ = ("HSTORE", "hstore")
+
+
+class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
+    """Represent the PostgreSQL HSTORE type.
+
+    The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
+
+        data_table = Table(
+            "data_table",
+            metadata,
+            Column("id", Integer, primary_key=True),
+            Column("data", HSTORE),
+        )
+
+        with engine.connect() as conn:
+            conn.execute(
+                data_table.insert(), data={"key1": "value1", "key2": "value2"}
+            )
+
+    :class:`.HSTORE` provides for a wide range of operations, including:
+
+    * Index operations::
+
+        data_table.c.data["some key"] == "some value"
+
+    * Containment operations::
+
+        data_table.c.data.has_key("some key")
+
+        data_table.c.data.has_all(["one", "two", "three"])
+
+    * Concatenation::
+
+        data_table.c.data + {"k1": "v1"}
+
+    For a full list of special methods see
+    :class:`.HSTORE.comparator_factory`.
+
+    .. container:: topic
+
+        **Detecting Changes in HSTORE columns when using the ORM**
+
+        For usage with the SQLAlchemy ORM, it may be desirable to combine the
+        usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary now
+        part of the :mod:`sqlalchemy.ext.mutable` extension. This extension
+        will allow "in-place" changes to the dictionary, e.g. addition of new
+        keys or replacement/removal of existing keys to/from the current
+        dictionary, to produce events which will be detected by the unit of
+        work::
+
+            from sqlalchemy.ext.mutable import MutableDict
+
+
+            class MyClass(Base):
+                __tablename__ = "data_table"
+
+                id = Column(Integer, primary_key=True)
+                data = Column(MutableDict.as_mutable(HSTORE))
+
+
+            my_object = session.query(MyClass).one()
+
+            # in-place mutation, requires Mutable extension
+            # in order for the ORM to detect
+            my_object.data["some_key"] = "some value"
+
+            session.commit()
+
+        When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
+        will not be alerted to any changes to the contents of an existing
+        dictionary, unless that dictionary value is re-assigned to the
+        HSTORE-attribute itself, thus generating a change event.
+
+    .. seealso::
+
+        :class:`.hstore` - render the PostgreSQL ``hstore()`` function.
+
+
+    """  # noqa: E501
+
+    __visit_name__ = "HSTORE"
+    hashable = False
+    text_type = sqltypes.Text()
+
+    def __init__(self, text_type=None):
+        """Construct a new :class:`.HSTORE`.
+
+        :param text_type: the type that should be used for indexed values.
+         Defaults to :class:`_types.Text`.
+
+        """
+        if text_type is not None:
+            self.text_type = text_type
+
+    class Comparator(
+        sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator
+    ):
+        """Define comparison operations for :class:`.HSTORE`."""
+
+        def has_key(self, other):
+            """Boolean expression.  Test for presence of a key.  Note that the
+            key may be a SQLA expression.
+            """
+            return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
+
+        def has_all(self, other):
+            """Boolean expression.  Test for presence of all keys in jsonb"""
+            return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
+
+        def has_any(self, other):
+            """Boolean expression.  Test for presence of any key in jsonb"""
+            return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
+
+        def contains(self, other, **kwargs):
+            """Boolean expression.  Test if keys (or array) are a superset
+            of/contained the keys of the argument jsonb expression.
+
+            kwargs may be ignored by this operator but are required for API
+            conformance.
+            """
+            return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
+
+        def contained_by(self, other):
+            """Boolean expression.  Test if keys are a proper subset of the
+            keys of the argument jsonb expression.
+            """
+            return self.operate(
+                CONTAINED_BY, other, result_type=sqltypes.Boolean
+            )
+
+        def _setup_getitem(self, index):
+            return GETITEM, index, self.type.text_type
+
+        def defined(self, key):
+            """Boolean expression.  Test for presence of a non-NULL value for
+            the key.  Note that the key may be a SQLA expression.
+            """
+            return _HStoreDefinedFunction(self.expr, key)
+
+        def delete(self, key):
+            """HStore expression.  Returns the contents of this hstore with the
+            given key deleted.  Note that the key may be a SQLA expression.
+            """
+            if isinstance(key, dict):
+                key = _serialize_hstore(key)
+            return _HStoreDeleteFunction(self.expr, key)
+
+        def slice(self, array):
+            """HStore expression.  Returns a subset of an hstore defined by
+            array of keys.
+            """
+            return _HStoreSliceFunction(self.expr, array)
+
+        def keys(self):
+            """Text array expression.  Returns array of keys."""
+            return _HStoreKeysFunction(self.expr)
+
+        def vals(self):
+            """Text array expression.  Returns array of values."""
+            return _HStoreValsFunction(self.expr)
+
+        def array(self):
+            """Text array expression.  Returns array of alternating keys and
+            values.
+            """
+            return _HStoreArrayFunction(self.expr)
+
+        def matrix(self):
+            """Text array expression.  Returns array of [key, value] pairs."""
+            return _HStoreMatrixFunction(self.expr)
+
+    comparator_factory = Comparator
+
+    def bind_processor(self, dialect):
+        # note that dialect-specific types like that of psycopg and
+        # psycopg2 will override this method to allow driver-level conversion
+        # instead, see _PsycopgHStore
+        def process(value):
+            if isinstance(value, dict):
+                return _serialize_hstore(value)
+            else:
+                return value
+
+        return process
+
+    def result_processor(self, dialect, coltype):
+        # note that dialect-specific types like that of psycopg and
+        # psycopg2 will override this method to allow driver-level conversion
+        # instead, see _PsycopgHStore
+        def process(value):
+            if value is not None:
+                return _parse_hstore(value)
+            else:
+                return value
+
+        return process
+
+
+class hstore(sqlfunc.GenericFunction):
+    """Construct an hstore value within a SQL expression using the
+    PostgreSQL ``hstore()`` function.
+
+    The :class:`.hstore` function accepts one or two arguments as described
+    in the PostgreSQL documentation.
+
+    E.g.::
+
+        from sqlalchemy.dialects.postgresql import array, hstore
+
+        select(hstore("key1", "value1"))
+
+        select(
+            hstore(
+                array(["key1", "key2", "key3"]),
+                array(["value1", "value2", "value3"]),
+            )
+        )
+
+    .. seealso::
+
+        :class:`.HSTORE` - the PostgreSQL ``HSTORE`` datatype.
+
+    """
+
+    type = HSTORE
+    name = "hstore"
+    inherit_cache = True
+
+
+class _HStoreDefinedFunction(sqlfunc.GenericFunction):
+    type = sqltypes.Boolean
+    name = "defined"
+    inherit_cache = True
+
+
+class _HStoreDeleteFunction(sqlfunc.GenericFunction):
+    type = HSTORE
+    name = "delete"
+    inherit_cache = True
+
+
+class _HStoreSliceFunction(sqlfunc.GenericFunction):
+    type = HSTORE
+    name = "slice"
+    inherit_cache = True
+
+
+class _HStoreKeysFunction(sqlfunc.GenericFunction):
+    type = ARRAY(sqltypes.Text)
+    name = "akeys"
+    inherit_cache = True
+
+
+class _HStoreValsFunction(sqlfunc.GenericFunction):
+    type = ARRAY(sqltypes.Text)
+    name = "avals"
+    inherit_cache = True
+
+
+class _HStoreArrayFunction(sqlfunc.GenericFunction):
+    type = ARRAY(sqltypes.Text)
+    name = "hstore_to_array"
+    inherit_cache = True
+
+
+class _HStoreMatrixFunction(sqlfunc.GenericFunction):
+    type = ARRAY(sqltypes.Text)
+    name = "hstore_to_matrix"
+    inherit_cache = True
+
+
+#
+# parsing.  note that none of this is used with the psycopg2 backend,
+# which provides its own native extensions.
+#
+
+# My best guess at the parsing rules of hstore literals, since no formal
+# grammar is given.  This is mostly reverse engineered from PG's input parser
+# behavior.
+HSTORE_PAIR_RE = re.compile(
+    r"""
+(
+  "(?P<key> (\\ . | [^"])* )"       # Quoted key
+)
+[ ]* => [ ]*    # Pair operator, optional adjoining whitespace
+(
+    (?P<value_null> NULL )          # NULL value
+  | "(?P<value> (\\ . | [^"])* )"   # Quoted value
+)
+""",
+    re.VERBOSE,
+)
+
+HSTORE_DELIMITER_RE = re.compile(
+    r"""
+[ ]* , [ ]*
+""",
+    re.VERBOSE,
+)
+
+
+def _parse_error(hstore_str, pos):
+    """format an unmarshalling error."""
+
+    ctx = 20
+    hslen = len(hstore_str)
+
+    parsed_tail = hstore_str[max(pos - ctx - 1, 0) : min(pos, hslen)]
+    residual = hstore_str[min(pos, hslen) : min(pos + ctx + 1, hslen)]
+
+    if len(parsed_tail) > ctx:
+        parsed_tail = "[...]" + parsed_tail[1:]
+    if len(residual) > ctx:
+        residual = residual[:-1] + "[...]"
+
+    return "After %r, could not parse residual at position %d: %r" % (
+        parsed_tail,
+        pos,
+        residual,
+    )
+
+
+def _parse_hstore(hstore_str):
+    """Parse an hstore from its literal string representation.
+
+    Attempts to approximate PG's hstore input parsing rules as closely as
+    possible. Although currently this is not strictly necessary, since the
+    current implementation of hstore's output syntax is stricter than what it
+    accepts as input, the documentation makes no guarantees that will always
+    be the case.
+
+
+
+    """
+    result = {}
+    pos = 0
+    pair_match = HSTORE_PAIR_RE.match(hstore_str)
+
+    while pair_match is not None:
+        key = pair_match.group("key").replace(r"\"", '"').replace("\\\\", "\\")
+        if pair_match.group("value_null"):
+            value = None
+        else:
+            value = (
+                pair_match.group("value")
+                .replace(r"\"", '"')
+                .replace("\\\\", "\\")
+            )
+        result[key] = value
+
+        pos += pair_match.end()
+
+        delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
+        if delim_match is not None:
+            pos += delim_match.end()
+
+        pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
+
+    if pos != len(hstore_str):
+        raise ValueError(_parse_error(hstore_str, pos))
+
+    return result
+
+
+def _serialize_hstore(val):
+    """Serialize a dictionary into an hstore literal.  Keys and values must
+    both be strings (except None for values).
+
+    """
+
+    def esc(s, position):
+        if position == "value" and s is None:
+            return "NULL"
+        elif isinstance(s, str):
+            return '"%s"' % s.replace("\\", "\\\\").replace('"', r"\"")
+        else:
+            raise ValueError(
+                "%r in %s position is not a string." % (s, position)
+            )
+
+    return ", ".join(
+        "%s=>%s" % (esc(k, "key"), esc(v, "value")) for k, v in val.items()
+    )
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/json.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/json.py
new file mode 100644
index 00000000..663be8b7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/json.py
@@ -0,0 +1,367 @@
+# dialects/postgresql/json.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+from __future__ import annotations
+
+from typing import Any
+from typing import Callable
+from typing import List
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from .array import ARRAY
+from .array import array as _pg_array
+from .operators import ASTEXT
+from .operators import CONTAINED_BY
+from .operators import CONTAINS
+from .operators import DELETE_PATH
+from .operators import HAS_ALL
+from .operators import HAS_ANY
+from .operators import HAS_KEY
+from .operators import JSONPATH_ASTEXT
+from .operators import PATH_EXISTS
+from .operators import PATH_MATCH
+from ... import types as sqltypes
+from ...sql import cast
+from ...sql._typing import _T
+
+if TYPE_CHECKING:
+    from ...engine.interfaces import Dialect
+    from ...sql.elements import ColumnElement
+    from ...sql.type_api import _BindProcessorType
+    from ...sql.type_api import _LiteralProcessorType
+    from ...sql.type_api import TypeEngine
+
+__all__ = ("JSON", "JSONB")
+
+
+class JSONPathType(sqltypes.JSON.JSONPathType):
+    def _processor(
+        self, dialect: Dialect, super_proc: Optional[Callable[[Any], Any]]
+    ) -> Callable[[Any], Any]:
+        def process(value: Any) -> Any:
+            if isinstance(value, str):
+                # If it's already a string assume that it's in json path
+                # format. This allows using cast with json paths literals
+                return value
+            elif value:
+                # If it's already a string assume that it's in json path
+                # format. This allows using cast with json paths literals
+                value = "{%s}" % (", ".join(map(str, value)))
+            else:
+                value = "{}"
+            if super_proc:
+                value = super_proc(value)
+            return value
+
+        return process
+
+    def bind_processor(self, dialect: Dialect) -> _BindProcessorType[Any]:
+        return self._processor(dialect, self.string_bind_processor(dialect))  # type: ignore[return-value]  # noqa: E501
+
+    def literal_processor(
+        self, dialect: Dialect
+    ) -> _LiteralProcessorType[Any]:
+        return self._processor(dialect, self.string_literal_processor(dialect))  # type: ignore[return-value]  # noqa: E501
+
+
+class JSONPATH(JSONPathType):
+    """JSON Path Type.
+
+    This is usually required to cast literal values to json path when using
+    json search like function, such as ``jsonb_path_query_array`` or
+    ``jsonb_path_exists``::
+
+        stmt = sa.select(
+            sa.func.jsonb_path_query_array(
+                table.c.jsonb_col, cast("$.address.id", JSONPATH)
+            )
+        )
+
+    """
+
+    __visit_name__ = "JSONPATH"
+
+
+class JSON(sqltypes.JSON):
+    """Represent the PostgreSQL JSON type.
+
+    :class:`_postgresql.JSON` is used automatically whenever the base
+    :class:`_types.JSON` datatype is used against a PostgreSQL backend,
+    however base :class:`_types.JSON` datatype does not provide Python
+    accessors for PostgreSQL-specific comparison methods such as
+    :meth:`_postgresql.JSON.Comparator.astext`; additionally, to use
+    PostgreSQL ``JSONB``, the :class:`_postgresql.JSONB` datatype should
+    be used explicitly.
+
+    .. seealso::
+
+        :class:`_types.JSON` - main documentation for the generic
+        cross-platform JSON datatype.
+
+    The operators provided by the PostgreSQL version of :class:`_types.JSON`
+    include:
+
+    * Index operations (the ``->`` operator)::
+
+        data_table.c.data["some key"]
+
+        data_table.c.data[5]
+
+    * Index operations returning text
+      (the ``->>`` operator)::
+
+        data_table.c.data["some key"].astext == "some value"
+
+      Note that equivalent functionality is available via the
+      :attr:`.JSON.Comparator.as_string` accessor.
+
+    * Index operations with CAST
+      (equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
+
+        data_table.c.data["some key"].astext.cast(Integer) == 5
+
+      Note that equivalent functionality is available via the
+      :attr:`.JSON.Comparator.as_integer` and similar accessors.
+
+    * Path index operations (the ``#>`` operator)::
+
+        data_table.c.data[("key_1", "key_2", 5, ..., "key_n")]
+
+    * Path index operations returning text (the ``#>>`` operator)::
+
+        data_table.c.data[
+            ("key_1", "key_2", 5, ..., "key_n")
+        ].astext == "some value"
+
+    Index operations return an expression object whose type defaults to
+    :class:`_types.JSON` by default,
+    so that further JSON-oriented instructions
+    may be called upon the result type.
+
+    Custom serializers and deserializers are specified at the dialect level,
+    that is using :func:`_sa.create_engine`.  The reason for this is that when
+    using psycopg2, the DBAPI only allows serializers at the per-cursor
+    or per-connection level.   E.g.::
+
+        engine = create_engine(
+            "postgresql+psycopg2://scott:tiger@localhost/test",
+            json_serializer=my_serialize_fn,
+            json_deserializer=my_deserialize_fn,
+        )
+
+    When using the psycopg2 dialect, the json_deserializer is registered
+    against the database using ``psycopg2.extras.register_default_json``.
+
+    .. seealso::
+
+        :class:`_types.JSON` - Core level JSON type
+
+        :class:`_postgresql.JSONB`
+
+    """  # noqa
+
+    render_bind_cast = True
+    astext_type: TypeEngine[str] = sqltypes.Text()
+
+    def __init__(
+        self,
+        none_as_null: bool = False,
+        astext_type: Optional[TypeEngine[str]] = None,
+    ):
+        """Construct a :class:`_types.JSON` type.
+
+        :param none_as_null: if True, persist the value ``None`` as a
+         SQL NULL value, not the JSON encoding of ``null``.   Note that
+         when this flag is False, the :func:`.null` construct can still
+         be used to persist a NULL value::
+
+             from sqlalchemy import null
+
+             conn.execute(table.insert(), {"data": null()})
+
+         .. seealso::
+
+              :attr:`_types.JSON.NULL`
+
+        :param astext_type: the type to use for the
+         :attr:`.JSON.Comparator.astext`
+         accessor on indexed attributes.  Defaults to :class:`_types.Text`.
+
+        """
+        super().__init__(none_as_null=none_as_null)
+        if astext_type is not None:
+            self.astext_type = astext_type
+
+    class Comparator(sqltypes.JSON.Comparator[_T]):
+        """Define comparison operations for :class:`_types.JSON`."""
+
+        type: JSON
+
+        @property
+        def astext(self) -> ColumnElement[str]:
+            """On an indexed expression, use the "astext" (e.g. "->>")
+            conversion when rendered in SQL.
+
+            E.g.::
+
+                select(data_table.c.data["some key"].astext)
+
+            .. seealso::
+
+                :meth:`_expression.ColumnElement.cast`
+
+            """
+            if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
+                return self.expr.left.operate(  # type: ignore[no-any-return]
+                    JSONPATH_ASTEXT,
+                    self.expr.right,
+                    result_type=self.type.astext_type,
+                )
+            else:
+                return self.expr.left.operate(  # type: ignore[no-any-return]
+                    ASTEXT, self.expr.right, result_type=self.type.astext_type
+                )
+
+    comparator_factory = Comparator
+
+
+class JSONB(JSON):
+    """Represent the PostgreSQL JSONB type.
+
+    The :class:`_postgresql.JSONB` type stores arbitrary JSONB format data,
+    e.g.::
+
+        data_table = Table(
+            "data_table",
+            metadata,
+            Column("id", Integer, primary_key=True),
+            Column("data", JSONB),
+        )
+
+        with engine.connect() as conn:
+            conn.execute(
+                data_table.insert(), data={"key1": "value1", "key2": "value2"}
+            )
+
+    The :class:`_postgresql.JSONB` type includes all operations provided by
+    :class:`_types.JSON`, including the same behaviors for indexing
+    operations.
+    It also adds additional operators specific to JSONB, including
+    :meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
+    :meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
+    :meth:`.JSONB.Comparator.contained_by`,
+    :meth:`.JSONB.Comparator.delete_path`,
+    :meth:`.JSONB.Comparator.path_exists` and
+    :meth:`.JSONB.Comparator.path_match`.
+
+    Like the :class:`_types.JSON` type, the :class:`_postgresql.JSONB`
+    type does not detect
+    in-place changes when used with the ORM, unless the
+    :mod:`sqlalchemy.ext.mutable` extension is used.
+
+    Custom serializers and deserializers
+    are shared with the :class:`_types.JSON` class,
+    using the ``json_serializer``
+    and ``json_deserializer`` keyword arguments.  These must be specified
+    at the dialect level using :func:`_sa.create_engine`.  When using
+    psycopg2, the serializers are associated with the jsonb type using
+    ``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
+    in the same way that ``psycopg2.extras.register_default_json`` is used
+    to register these handlers with the json type.
+
+    .. seealso::
+
+        :class:`_types.JSON`
+
+    """
+
+    __visit_name__ = "JSONB"
+
+    class Comparator(JSON.Comparator[_T]):
+        """Define comparison operations for :class:`_types.JSON`."""
+
+        type: JSONB
+
+        def has_key(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression.  Test for presence of a key (equivalent of
+            the ``?`` operator).  Note that the key may be a SQLA expression.
+            """
+            return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
+
+        def has_all(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression.  Test for presence of all keys in jsonb
+            (equivalent of the ``?&`` operator)
+            """
+            return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
+
+        def has_any(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression.  Test for presence of any key in jsonb
+            (equivalent of the ``?|`` operator)
+            """
+            return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
+
+        def contains(self, other: Any, **kwargs: Any) -> ColumnElement[bool]:
+            """Boolean expression.  Test if keys (or array) are a superset
+            of/contained the keys of the argument jsonb expression
+            (equivalent of the ``@>`` operator).
+
+            kwargs may be ignored by this operator but are required for API
+            conformance.
+            """
+            return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
+
+        def contained_by(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression.  Test if keys are a proper subset of the
+            keys of the argument jsonb expression
+            (equivalent of the ``<@`` operator).
+            """
+            return self.operate(
+                CONTAINED_BY, other, result_type=sqltypes.Boolean
+            )
+
+        def delete_path(
+            self, array: Union[List[str], _pg_array[str]]
+        ) -> ColumnElement[JSONB]:
+            """JSONB expression. Deletes field or array element specified in
+            the argument array (equivalent of the ``#-`` operator).
+
+            The input may be a list of strings that will be coerced to an
+            ``ARRAY`` or an instance of :meth:`_postgres.array`.
+
+            .. versionadded:: 2.0
+            """
+            if not isinstance(array, _pg_array):
+                array = _pg_array(array)  # type: ignore[no-untyped-call]
+            right_side = cast(array, ARRAY(sqltypes.TEXT))
+            return self.operate(DELETE_PATH, right_side, result_type=JSONB)
+
+        def path_exists(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression. Test for presence of item given by the
+            argument JSONPath expression (equivalent of the ``@?`` operator).
+
+            .. versionadded:: 2.0
+            """
+            return self.operate(
+                PATH_EXISTS, other, result_type=sqltypes.Boolean
+            )
+
+        def path_match(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression. Test if JSONPath predicate given by the
+            argument JSONPath expression matches
+            (equivalent of the ``@@`` operator).
+
+            Only the first item of the result is taken into account.
+
+            .. versionadded:: 2.0
+            """
+            return self.operate(
+                PATH_MATCH, other, result_type=sqltypes.Boolean
+            )
+
+    comparator_factory = Comparator
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/named_types.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/named_types.py
new file mode 100644
index 00000000..e1b8e84c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/named_types.py
@@ -0,0 +1,505 @@
+# dialects/postgresql/named_types.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+from __future__ import annotations
+
+from typing import Any
+from typing import Optional
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import Union
+
+from ... import schema
+from ... import util
+from ...sql import coercions
+from ...sql import elements
+from ...sql import roles
+from ...sql import sqltypes
+from ...sql import type_api
+from ...sql.base import _NoArg
+from ...sql.ddl import InvokeCreateDDLBase
+from ...sql.ddl import InvokeDropDDLBase
+
+if TYPE_CHECKING:
+    from ...sql._typing import _TypeEngineArgument
+
+
+class NamedType(sqltypes.TypeEngine):
+    """Base for named types."""
+
+    __abstract__ = True
+    DDLGenerator: Type[NamedTypeGenerator]
+    DDLDropper: Type[NamedTypeDropper]
+    create_type: bool
+
+    def create(self, bind, checkfirst=True, **kw):
+        """Emit ``CREATE`` DDL for this type.
+
+        :param bind: a connectable :class:`_engine.Engine`,
+         :class:`_engine.Connection`, or similar object to emit
+         SQL.
+        :param checkfirst: if ``True``, a query against
+         the PG catalog will be first performed to see
+         if the type does not exist already before
+         creating.
+
+        """
+        bind._run_ddl_visitor(self.DDLGenerator, self, checkfirst=checkfirst)
+
+    def drop(self, bind, checkfirst=True, **kw):
+        """Emit ``DROP`` DDL for this type.
+
+        :param bind: a connectable :class:`_engine.Engine`,
+         :class:`_engine.Connection`, or similar object to emit
+         SQL.
+        :param checkfirst: if ``True``, a query against
+         the PG catalog will be first performed to see
+         if the type actually exists before dropping.
+
+        """
+        bind._run_ddl_visitor(self.DDLDropper, self, checkfirst=checkfirst)
+
+    def _check_for_name_in_memos(self, checkfirst, kw):
+        """Look in the 'ddl runner' for 'memos', then
+        note our name in that collection.
+
+        This to ensure a particular named type is operated
+        upon only once within any kind of create/drop
+        sequence without relying upon "checkfirst".
+
+        """
+        if not self.create_type:
+            return True
+        if "_ddl_runner" in kw:
+            ddl_runner = kw["_ddl_runner"]
+            type_name = f"pg_{self.__visit_name__}"
+            if type_name in ddl_runner.memo:
+                existing = ddl_runner.memo[type_name]
+            else:
+                existing = ddl_runner.memo[type_name] = set()
+            present = (self.schema, self.name) in existing
+            existing.add((self.schema, self.name))
+            return present
+        else:
+            return False
+
+    def _on_table_create(self, target, bind, checkfirst=False, **kw):
+        if (
+            checkfirst
+            or (
+                not self.metadata
+                and not kw.get("_is_metadata_operation", False)
+            )
+        ) and not self._check_for_name_in_memos(checkfirst, kw):
+            self.create(bind=bind, checkfirst=checkfirst)
+
+    def _on_table_drop(self, target, bind, checkfirst=False, **kw):
+        if (
+            not self.metadata
+            and not kw.get("_is_metadata_operation", False)
+            and not self._check_for_name_in_memos(checkfirst, kw)
+        ):
+            self.drop(bind=bind, checkfirst=checkfirst)
+
+    def _on_metadata_create(self, target, bind, checkfirst=False, **kw):
+        if not self._check_for_name_in_memos(checkfirst, kw):
+            self.create(bind=bind, checkfirst=checkfirst)
+
+    def _on_metadata_drop(self, target, bind, checkfirst=False, **kw):
+        if not self._check_for_name_in_memos(checkfirst, kw):
+            self.drop(bind=bind, checkfirst=checkfirst)
+
+
+class NamedTypeGenerator(InvokeCreateDDLBase):
+    def __init__(self, dialect, connection, checkfirst=False, **kwargs):
+        super().__init__(connection, **kwargs)
+        self.checkfirst = checkfirst
+
+    def _can_create_type(self, type_):
+        if not self.checkfirst:
+            return True
+
+        effective_schema = self.connection.schema_for_object(type_)
+        return not self.connection.dialect.has_type(
+            self.connection, type_.name, schema=effective_schema
+        )
+
+
+class NamedTypeDropper(InvokeDropDDLBase):
+    def __init__(self, dialect, connection, checkfirst=False, **kwargs):
+        super().__init__(connection, **kwargs)
+        self.checkfirst = checkfirst
+
+    def _can_drop_type(self, type_):
+        if not self.checkfirst:
+            return True
+
+        effective_schema = self.connection.schema_for_object(type_)
+        return self.connection.dialect.has_type(
+            self.connection, type_.name, schema=effective_schema
+        )
+
+
+class EnumGenerator(NamedTypeGenerator):
+    def visit_enum(self, enum):
+        if not self._can_create_type(enum):
+            return
+
+        with self.with_ddl_events(enum):
+            self.connection.execute(CreateEnumType(enum))
+
+
+class EnumDropper(NamedTypeDropper):
+    def visit_enum(self, enum):
+        if not self._can_drop_type(enum):
+            return
+
+        with self.with_ddl_events(enum):
+            self.connection.execute(DropEnumType(enum))
+
+
+class ENUM(NamedType, type_api.NativeForEmulated, sqltypes.Enum):
+    """PostgreSQL ENUM type.
+
+    This is a subclass of :class:`_types.Enum` which includes
+    support for PG's ``CREATE TYPE`` and ``DROP TYPE``.
+
+    When the builtin type :class:`_types.Enum` is used and the
+    :paramref:`.Enum.native_enum` flag is left at its default of
+    True, the PostgreSQL backend will use a :class:`_postgresql.ENUM`
+    type as the implementation, so the special create/drop rules
+    will be used.
+
+    The create/drop behavior of ENUM is necessarily intricate, due to the
+    awkward relationship the ENUM type has in relationship to the
+    parent table, in that it may be "owned" by just a single table, or
+    may be shared among many tables.
+
+    When using :class:`_types.Enum` or :class:`_postgresql.ENUM`
+    in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted
+    corresponding to when the :meth:`_schema.Table.create` and
+    :meth:`_schema.Table.drop`
+    methods are called::
+
+        table = Table(
+            "sometable",
+            metadata,
+            Column("some_enum", ENUM("a", "b", "c", name="myenum")),
+        )
+
+        table.create(engine)  # will emit CREATE ENUM and CREATE TABLE
+        table.drop(engine)  # will emit DROP TABLE and DROP ENUM
+
+    To use a common enumerated type between multiple tables, the best
+    practice is to declare the :class:`_types.Enum` or
+    :class:`_postgresql.ENUM` independently, and associate it with the
+    :class:`_schema.MetaData` object itself::
+
+        my_enum = ENUM("a", "b", "c", name="myenum", metadata=metadata)
+
+        t1 = Table("sometable_one", metadata, Column("some_enum", myenum))
+
+        t2 = Table("sometable_two", metadata, Column("some_enum", myenum))
+
+    When this pattern is used, care must still be taken at the level
+    of individual table creates.  Emitting CREATE TABLE without also
+    specifying ``checkfirst=True`` will still cause issues::
+
+        t1.create(engine)  # will fail: no such type 'myenum'
+
+    If we specify ``checkfirst=True``, the individual table-level create
+    operation will check for the ``ENUM`` and create if not exists::
+
+        # will check if enum exists, and emit CREATE TYPE if not
+        t1.create(engine, checkfirst=True)
+
+    When using a metadata-level ENUM type, the type will always be created
+    and dropped if either the metadata-wide create/drop is called::
+
+        metadata.create_all(engine)  # will emit CREATE TYPE
+        metadata.drop_all(engine)  # will emit DROP TYPE
+
+    The type can also be created and dropped directly::
+
+        my_enum.create(engine)
+        my_enum.drop(engine)
+
+    """
+
+    native_enum = True
+    DDLGenerator = EnumGenerator
+    DDLDropper = EnumDropper
+
+    def __init__(
+        self,
+        *enums,
+        name: Union[str, _NoArg, None] = _NoArg.NO_ARG,
+        create_type: bool = True,
+        **kw,
+    ):
+        """Construct an :class:`_postgresql.ENUM`.
+
+        Arguments are the same as that of
+        :class:`_types.Enum`, but also including
+        the following parameters.
+
+        :param create_type: Defaults to True.
+         Indicates that ``CREATE TYPE`` should be
+         emitted, after optionally checking for the
+         presence of the type, when the parent
+         table is being created; and additionally
+         that ``DROP TYPE`` is called when the table
+         is dropped.    When ``False``, no check
+         will be performed and no ``CREATE TYPE``
+         or ``DROP TYPE`` is emitted, unless
+         :meth:`~.postgresql.ENUM.create`
+         or :meth:`~.postgresql.ENUM.drop`
+         are called directly.
+         Setting to ``False`` is helpful
+         when invoking a creation scheme to a SQL file
+         without access to the actual database -
+         the :meth:`~.postgresql.ENUM.create` and
+         :meth:`~.postgresql.ENUM.drop` methods can
+         be used to emit SQL to a target bind.
+
+        """
+        native_enum = kw.pop("native_enum", None)
+        if native_enum is False:
+            util.warn(
+                "the native_enum flag does not apply to the "
+                "sqlalchemy.dialects.postgresql.ENUM datatype; this type "
+                "always refers to ENUM.   Use sqlalchemy.types.Enum for "
+                "non-native enum."
+            )
+        self.create_type = create_type
+        if name is not _NoArg.NO_ARG:
+            kw["name"] = name
+        super().__init__(*enums, **kw)
+
+    def coerce_compared_value(self, op, value):
+        super_coerced_type = super().coerce_compared_value(op, value)
+        if (
+            super_coerced_type._type_affinity
+            is type_api.STRINGTYPE._type_affinity
+        ):
+            return self
+        else:
+            return super_coerced_type
+
+    @classmethod
+    def __test_init__(cls):
+        return cls(name="name")
+
+    @classmethod
+    def adapt_emulated_to_native(cls, impl, **kw):
+        """Produce a PostgreSQL native :class:`_postgresql.ENUM` from plain
+        :class:`.Enum`.
+
+        """
+        kw.setdefault("validate_strings", impl.validate_strings)
+        kw.setdefault("name", impl.name)
+        kw.setdefault("schema", impl.schema)
+        kw.setdefault("inherit_schema", impl.inherit_schema)
+        kw.setdefault("metadata", impl.metadata)
+        kw.setdefault("_create_events", False)
+        kw.setdefault("values_callable", impl.values_callable)
+        kw.setdefault("omit_aliases", impl._omit_aliases)
+        kw.setdefault("_adapted_from", impl)
+        if type_api._is_native_for_emulated(impl.__class__):
+            kw.setdefault("create_type", impl.create_type)
+
+        return cls(**kw)
+
+    def create(self, bind=None, checkfirst=True):
+        """Emit ``CREATE TYPE`` for this
+        :class:`_postgresql.ENUM`.
+
+        If the underlying dialect does not support
+        PostgreSQL CREATE TYPE, no action is taken.
+
+        :param bind: a connectable :class:`_engine.Engine`,
+         :class:`_engine.Connection`, or similar object to emit
+         SQL.
+        :param checkfirst: if ``True``, a query against
+         the PG catalog will be first performed to see
+         if the type does not exist already before
+         creating.
+
+        """
+        if not bind.dialect.supports_native_enum:
+            return
+
+        super().create(bind, checkfirst=checkfirst)
+
+    def drop(self, bind=None, checkfirst=True):
+        """Emit ``DROP TYPE`` for this
+        :class:`_postgresql.ENUM`.
+
+        If the underlying dialect does not support
+        PostgreSQL DROP TYPE, no action is taken.
+
+        :param bind: a connectable :class:`_engine.Engine`,
+         :class:`_engine.Connection`, or similar object to emit
+         SQL.
+        :param checkfirst: if ``True``, a query against
+         the PG catalog will be first performed to see
+         if the type actually exists before dropping.
+
+        """
+        if not bind.dialect.supports_native_enum:
+            return
+
+        super().drop(bind, checkfirst=checkfirst)
+
+    def get_dbapi_type(self, dbapi):
+        """dont return dbapi.STRING for ENUM in PostgreSQL, since that's
+        a different type"""
+
+        return None
+
+
+class DomainGenerator(NamedTypeGenerator):
+    def visit_DOMAIN(self, domain):
+        if not self._can_create_type(domain):
+            return
+        with self.with_ddl_events(domain):
+            self.connection.execute(CreateDomainType(domain))
+
+
+class DomainDropper(NamedTypeDropper):
+    def visit_DOMAIN(self, domain):
+        if not self._can_drop_type(domain):
+            return
+
+        with self.with_ddl_events(domain):
+            self.connection.execute(DropDomainType(domain))
+
+
+class DOMAIN(NamedType, sqltypes.SchemaType):
+    r"""Represent the DOMAIN PostgreSQL type.
+
+    A domain is essentially a data type with optional constraints
+    that restrict the allowed set of values. E.g.::
+
+        PositiveInt = DOMAIN("pos_int", Integer, check="VALUE > 0", not_null=True)
+
+        UsPostalCode = DOMAIN(
+            "us_postal_code",
+            Text,
+            check="VALUE ~ '^\d{5}$' OR VALUE ~ '^\d{5}-\d{4}$'",
+        )
+
+    See the `PostgreSQL documentation`__ for additional details
+
+    __ https://www.postgresql.org/docs/current/sql-createdomain.html
+
+    .. versionadded:: 2.0
+
+    """  # noqa: E501
+
+    DDLGenerator = DomainGenerator
+    DDLDropper = DomainDropper
+
+    __visit_name__ = "DOMAIN"
+
+    def __init__(
+        self,
+        name: str,
+        data_type: _TypeEngineArgument[Any],
+        *,
+        collation: Optional[str] = None,
+        default: Union[elements.TextClause, str, None] = None,
+        constraint_name: Optional[str] = None,
+        not_null: Optional[bool] = None,
+        check: Union[elements.TextClause, str, None] = None,
+        create_type: bool = True,
+        **kw: Any,
+    ):
+        """
+        Construct a DOMAIN.
+
+        :param name: the name of the domain
+        :param data_type: The underlying data type of the domain.
+          This can include array specifiers.
+        :param collation: An optional collation for the domain.
+          If no collation is specified, the underlying data type's default
+          collation is used. The underlying type must be collatable if
+          ``collation`` is specified.
+        :param default: The DEFAULT clause specifies a default value for
+          columns of the domain data type. The default should be a string
+          or a :func:`_expression.text` value.
+          If no default value is specified, then the default value is
+          the null value.
+        :param constraint_name: An optional name for a constraint.
+          If not specified, the backend generates a name.
+        :param not_null: Values of this domain are prevented from being null.
+          By default domain are allowed to be null. If not specified
+          no nullability clause will be emitted.
+        :param check: CHECK clause specify integrity constraint or test
+          which values of the domain must satisfy. A constraint must be
+          an expression producing a Boolean result that can use the key
+          word VALUE to refer to the value being tested.
+          Differently from PostgreSQL, only a single check clause is
+          currently allowed in SQLAlchemy.
+        :param schema: optional schema name
+        :param metadata: optional :class:`_schema.MetaData` object which
+         this :class:`_postgresql.DOMAIN` will be directly associated
+        :param create_type: Defaults to True.
+         Indicates that ``CREATE TYPE`` should be emitted, after optionally
+         checking for the presence of the type, when the parent table is
+         being created; and additionally that ``DROP TYPE`` is called
+         when the table is dropped.
+
+        """
+        self.data_type = type_api.to_instance(data_type)
+        self.default = default
+        self.collation = collation
+        self.constraint_name = constraint_name
+        self.not_null = bool(not_null)
+        if check is not None:
+            check = coercions.expect(roles.DDLExpressionRole, check)
+        self.check = check
+        self.create_type = create_type
+        super().__init__(name=name, **kw)
+
+    @classmethod
+    def __test_init__(cls):
+        return cls("name", sqltypes.Integer)
+
+    def adapt(self, impl, **kw):
+        if self.default:
+            kw["default"] = self.default
+        if self.constraint_name is not None:
+            kw["constraint_name"] = self.constraint_name
+        if self.not_null:
+            kw["not_null"] = self.not_null
+        if self.check is not None:
+            kw["check"] = str(self.check)
+        if self.create_type:
+            kw["create_type"] = self.create_type
+
+        return super().adapt(impl, **kw)
+
+
+class CreateEnumType(schema._CreateDropBase):
+    __visit_name__ = "create_enum_type"
+
+
+class DropEnumType(schema._CreateDropBase):
+    __visit_name__ = "drop_enum_type"
+
+
+class CreateDomainType(schema._CreateDropBase):
+    """Represent a CREATE DOMAIN statement."""
+
+    __visit_name__ = "create_domain_type"
+
+
+class DropDomainType(schema._CreateDropBase):
+    """Represent a DROP DOMAIN statement."""
+
+    __visit_name__ = "drop_domain_type"
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/operators.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/operators.py
new file mode 100644
index 00000000..ebcafcba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/operators.py
@@ -0,0 +1,129 @@
+# dialects/postgresql/operators.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+from ...sql import operators
+
+
+_getitem_precedence = operators._PRECEDENCE[operators.json_getitem_op]
+_eq_precedence = operators._PRECEDENCE[operators.eq]
+
+# JSON + JSONB
+ASTEXT = operators.custom_op(
+    "->>",
+    precedence=_getitem_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+)
+
+JSONPATH_ASTEXT = operators.custom_op(
+    "#>>",
+    precedence=_getitem_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+)
+
+# JSONB + HSTORE
+HAS_KEY = operators.custom_op(
+    "?",
+    precedence=_eq_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+    is_comparison=True,
+)
+
+HAS_ALL = operators.custom_op(
+    "?&",
+    precedence=_eq_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+    is_comparison=True,
+)
+
+HAS_ANY = operators.custom_op(
+    "?|",
+    precedence=_eq_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+    is_comparison=True,
+)
+
+# JSONB
+DELETE_PATH = operators.custom_op(
+    "#-",
+    precedence=_getitem_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+)
+
+PATH_EXISTS = operators.custom_op(
+    "@?",
+    precedence=_eq_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+    is_comparison=True,
+)
+
+PATH_MATCH = operators.custom_op(
+    "@@",
+    precedence=_eq_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+    is_comparison=True,
+)
+
+# JSONB + ARRAY + HSTORE + RANGE
+CONTAINS = operators.custom_op(
+    "@>",
+    precedence=_eq_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+    is_comparison=True,
+)
+
+CONTAINED_BY = operators.custom_op(
+    "<@",
+    precedence=_eq_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+    is_comparison=True,
+)
+
+# ARRAY + RANGE
+OVERLAP = operators.custom_op(
+    "&&",
+    precedence=_eq_precedence,
+    is_comparison=True,
+)
+
+# RANGE
+STRICTLY_LEFT_OF = operators.custom_op(
+    "<<", precedence=_eq_precedence, is_comparison=True
+)
+
+STRICTLY_RIGHT_OF = operators.custom_op(
+    ">>", precedence=_eq_precedence, is_comparison=True
+)
+
+NOT_EXTEND_RIGHT_OF = operators.custom_op(
+    "&<", precedence=_eq_precedence, is_comparison=True
+)
+
+NOT_EXTEND_LEFT_OF = operators.custom_op(
+    "&>", precedence=_eq_precedence, is_comparison=True
+)
+
+ADJACENT_TO = operators.custom_op(
+    "-|-", precedence=_eq_precedence, is_comparison=True
+)
+
+# HSTORE
+GETITEM = operators.custom_op(
+    "->",
+    precedence=_getitem_precedence,
+    natural_self_precedent=True,
+    eager_grouping=True,
+)
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/pg8000.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/pg8000.py
new file mode 100644
index 00000000..bf113230
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/pg8000.py
@@ -0,0 +1,666 @@
+# dialects/postgresql/pg8000.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors <see AUTHORS
+# file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+r"""
+.. dialect:: postgresql+pg8000
+    :name: pg8000
+    :dbapi: pg8000
+    :connectstring: postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
+    :url: https://pypi.org/project/pg8000/
+
+.. versionchanged:: 1.4  The pg8000 dialect has been updated for version
+   1.16.6 and higher, and is again part of SQLAlchemy's continuous integration
+   with full feature support.
+
+.. _pg8000_unicode:
+
+Unicode
+-------
+
+pg8000 will encode / decode string values between it and the server using the
+PostgreSQL ``client_encoding`` parameter; by default this is the value in
+the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
+Typically, this can be changed to ``utf-8``, as a more useful default::
+
+    # client_encoding = sql_ascii # actually, defaults to database encoding
+    client_encoding = utf8
+
+The ``client_encoding`` can be overridden for a session by executing the SQL:
+
+.. sourcecode:: sql
+
+    SET CLIENT_ENCODING TO 'utf8';
+
+SQLAlchemy will execute this SQL on all new connections based on the value
+passed to :func:`_sa.create_engine` using the ``client_encoding`` parameter::
+
+    engine = create_engine(
+        "postgresql+pg8000://user:pass@host/dbname", client_encoding="utf8"
+    )
+
+.. _pg8000_ssl:
+
+SSL Connections
+---------------
+
+pg8000 accepts a Python ``SSLContext`` object which may be specified using the
+:paramref:`_sa.create_engine.connect_args` dictionary::
+
+    import ssl
+
+    ssl_context = ssl.create_default_context()
+    engine = sa.create_engine(
+        "postgresql+pg8000://scott:tiger@192.168.0.199/test",
+        connect_args={"ssl_context": ssl_context},
+    )
+
+If the server uses an automatically-generated certificate that is self-signed
+or does not match the host name (as seen from the client), it may also be
+necessary to disable hostname checking::
+
+    import ssl
+
+    ssl_context = ssl.create_default_context()
+    ssl_context.check_hostname = False
+    ssl_context.verify_mode = ssl.CERT_NONE
+    engine = sa.create_engine(
+        "postgresql+pg8000://scott:tiger@192.168.0.199/test",
+        connect_args={"ssl_context": ssl_context},
+    )
+
+.. _pg8000_isolation_level:
+
+pg8000 Transaction Isolation Level
+-------------------------------------
+
+The pg8000 dialect offers the same isolation level settings as that
+of the :ref:`psycopg2 <psycopg2_isolation_level>` dialect:
+
+* ``READ COMMITTED``
+* ``READ UNCOMMITTED``
+* ``REPEATABLE READ``
+* ``SERIALIZABLE``
+* ``AUTOCOMMIT``
+
+.. seealso::
+
+    :ref:`postgresql_isolation_level`
+
+    :ref:`psycopg2_isolation_level`
+
+
+"""  # noqa
+import decimal
+import re
+
+from . import ranges
+from .array import ARRAY as PGARRAY
+from .base import _DECIMAL_TYPES
+from .base import _FLOAT_TYPES
+from .base import _INT_TYPES
+from .base import ENUM
+from .base import INTERVAL
+from .base import PGCompiler
+from .base import PGDialect
+from .base import PGExecutionContext
+from .base import PGIdentifierPreparer
+from .json import JSON
+from .json import JSONB
+from .json import JSONPathType
+from .pg_catalog import _SpaceVector
+from .pg_catalog import OIDVECTOR
+from .types import CITEXT
+from ... import exc
+from ... import util
+from ...engine import processors
+from ...sql import sqltypes
+from ...sql.elements import quoted_name
+
+
+class _PGString(sqltypes.String):
+    render_bind_cast = True
+
+
+class _PGNumeric(sqltypes.Numeric):
+    render_bind_cast = True
+
+    def result_processor(self, dialect, coltype):
+        if self.asdecimal:
+            if coltype in _FLOAT_TYPES:
+                return processors.to_decimal_processor_factory(
+                    decimal.Decimal, self._effective_decimal_return_scale
+                )
+            elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+                # pg8000 returns Decimal natively for 1700
+                return None
+            else:
+                raise exc.InvalidRequestError(
+                    "Unknown PG numeric type: %d" % coltype
+                )
+        else:
+            if coltype in _FLOAT_TYPES:
+                # pg8000 returns float natively for 701
+                return None
+            elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+                return processors.to_float
+            else:
+                raise exc.InvalidRequestError(
+                    "Unknown PG numeric type: %d" % coltype
+                )
+
+
+class _PGFloat(_PGNumeric, sqltypes.Float):
+    __visit_name__ = "float"
+    render_bind_cast = True
+
+
+class _PGNumericNoBind(_PGNumeric):
+    def bind_processor(self, dialect):
+        return None
+
+
+class _PGJSON(JSON):
+    render_bind_cast = True
+
+    def result_processor(self, dialect, coltype):
+        return None
+
+
+class _PGJSONB(JSONB):
+    render_bind_cast = True
+
+    def result_processor(self, dialect, coltype):
+        return None
+
+
+class _PGJSONIndexType(sqltypes.JSON.JSONIndexType):
+    def get_dbapi_type(self, dbapi):
+        raise NotImplementedError("should not be here")
+
+
+class _PGJSONIntIndexType(sqltypes.JSON.JSONIntIndexType):
+    __visit_name__ = "json_int_index"
+
+    render_bind_cast = True
+
+
+class _PGJSONStrIndexType(sqltypes.JSON.JSONStrIndexType):
+    __visit_name__ = "json_str_index"
+
+    render_bind_cast = True
+
+
+class _PGJSONPathType(JSONPathType):
+    pass
+
+    # DBAPI type 1009
+
+
+class _PGEnum(ENUM):
+    def get_dbapi_type(self, dbapi):
+        return dbapi.UNKNOWN
+
+
+class _PGInterval(INTERVAL):
+    render_bind_cast = True
+
+    def get_dbapi_type(self, dbapi):
+        return dbapi.INTERVAL
+
+    @classmethod
+    def adapt_emulated_to_native(cls, interval, **kw):
+        return _PGInterval(precision=interval.second_precision)
+
+
+class _PGTimeStamp(sqltypes.DateTime):
+    render_bind_cast = True
+
+
+class _PGDate(sqltypes.Date):
+    render_bind_cast = True
+
+
+class _PGTime(sqltypes.Time):
+    render_bind_cast = True
+
+
+class _PGInteger(sqltypes.Integer):
+    render_bind_cast = True
+
+
+class _PGSmallInteger(sqltypes.SmallInteger):
+    render_bind_cast = True
+
+
+class _PGNullType(sqltypes.NullType):
+    pass
+
+
+class _PGBigInteger(sqltypes.BigInteger):
+    render_bind_cast = True
+
+
+class _PGBoolean(sqltypes.Boolean):
+    render_bind_cast = True
+
+
+class _PGARRAY(PGARRAY):
+    render_bind_cast = True
+
+
+class _PGOIDVECTOR(_SpaceVector, OIDVECTOR):
+    pass
+
+
+class _Pg8000Range(ranges.AbstractSingleRangeImpl):
+    def bind_processor(self, dialect):
+        pg8000_Range = dialect.dbapi.Range
+
+        def to_range(value):
+            if isinstance(value, ranges.Range):
+                value = pg8000_Range(
+                    value.lower, value.upper, value.bounds, value.empty
+                )
+            return value
+
+        return to_range
+
+    def result_processor(self, dialect, coltype):
+        def to_range(value):
+            if value is not None:
+                value = ranges.Range(
+                    value.lower,
+                    value.upper,
+                    bounds=value.bounds,
+                    empty=value.is_empty,
+                )
+            return value
+
+        return to_range
+
+
+class _Pg8000MultiRange(ranges.AbstractMultiRangeImpl):
+    def bind_processor(self, dialect):
+        pg8000_Range = dialect.dbapi.Range
+
+        def to_multirange(value):
+            if isinstance(value, list):
+                mr = []
+                for v in value:
+                    if isinstance(v, ranges.Range):
+                        mr.append(
+                            pg8000_Range(v.lower, v.upper, v.bounds, v.empty)
+                        )
+                    else:
+                        mr.append(v)
+                return mr
+            else:
+                return value
+
+        return to_multirange
+
+    def result_processor(self, dialect, coltype):
+        def to_multirange(value):
+            if value is None:
+                return None
+            else:
+                return ranges.MultiRange(
+                    ranges.Range(
+                        v.lower, v.upper, bounds=v.bounds, empty=v.is_empty
+                    )
+                    for v in value
+                )
+
+        return to_multirange
+
+
+_server_side_id = util.counter()
+
+
+class PGExecutionContext_pg8000(PGExecutionContext):
+    def create_server_side_cursor(self):
+        ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
+        return ServerSideCursor(self._dbapi_connection.cursor(), ident)
+
+    def pre_exec(self):
+        if not self.compiled:
+            return
+
+
+class ServerSideCursor:
+    server_side = True
+
+    def __init__(self, cursor, ident):
+        self.ident = ident
+        self.cursor = cursor
+
+    @property
+    def connection(self):
+        return self.cursor.connection
+
+    @property
+    def rowcount(self):
+        return self.cursor.rowcount
+
+    @property
+    def description(self):
+        return self.cursor.description
+
+    def execute(self, operation, args=(), stream=None):
+        op = "DECLARE " + self.ident + " NO SCROLL CURSOR FOR " + operation
+        self.cursor.execute(op, args, stream=stream)
+        return self
+
+    def executemany(self, operation, param_sets):
+        self.cursor.executemany(operation, param_sets)
+        return self
+
+    def fetchone(self):
+        self.cursor.execute("FETCH FORWARD 1 FROM " + self.ident)
+        return self.cursor.fetchone()
+
+    def fetchmany(self, num=None):
+        if num is None:
+            return self.fetchall()
+        else:
+            self.cursor.execute(
+                "FETCH FORWARD " + str(int(num)) + " FROM " + self.ident
+            )
+            return self.cursor.fetchall()
+
+    def fetchall(self):
+        self.cursor.execute("FETCH FORWARD ALL FROM " + self.ident)
+        return self.cursor.fetchall()
+
+    def close(self):
+        self.cursor.execute("CLOSE " + self.ident)
+        self.cursor.close()
+
+    def setinputsizes(self, *sizes):
+        self.cursor.setinputsizes(*sizes)
+
+    def setoutputsize(self, size, column=None):
+        pass
+
+
+class PGCompiler_pg8000(PGCompiler):
+    def visit_mod_binary(self, binary, operator, **kw):
+        return (
+            self.process(binary.left, **kw)
+            + " %% "
+            + self.process(binary.right, **kw)
+        )
+
+
+class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
+    def __init__(self, *args, **kwargs):
+        PGIdentifierPreparer.__init__(self, *args, **kwargs)
+        self._double_percents = False
+
+
+class PGDialect_pg8000(PGDialect):
+    driver = "pg8000"
+    supports_statement_cache = True
+
+    supports_unicode_statements = True
+
+    supports_unicode_binds = True
+
+    default_paramstyle = "format"
+    supports_sane_multi_rowcount = True
+    execution_ctx_cls = PGExecutionContext_pg8000
+    statement_compiler = PGCompiler_pg8000
+    preparer = PGIdentifierPreparer_pg8000
+    supports_server_side_cursors = True
+
+    render_bind_cast = True
+
+    # reversed as of pg8000 1.16.6.  1.16.5 and lower
+    # are no longer compatible
+    description_encoding = None
+    # description_encoding = "use_encoding"
+
+    colspecs = util.update_copy(
+        PGDialect.colspecs,
+        {
+            sqltypes.String: _PGString,
+            sqltypes.Numeric: _PGNumericNoBind,
+            sqltypes.Float: _PGFloat,
+            sqltypes.JSON: _PGJSON,
+            sqltypes.Boolean: _PGBoolean,
+            sqltypes.NullType: _PGNullType,
+            JSONB: _PGJSONB,
+            CITEXT: CITEXT,
+            sqltypes.JSON.JSONPathType: _PGJSONPathType,
+            sqltypes.JSON.JSONIndexType: _PGJSONIndexType,
+            sqltypes.JSON.JSONIntIndexType: _PGJSONIntIndexType,
+            sqltypes.JSON.JSONStrIndexType: _PGJSONStrIndexType,
+            sqltypes.Interval: _PGInterval,
+            INTERVAL: _PGInterval,
+            sqltypes.DateTime: _PGTimeStamp,
+            sqltypes.DateTime: _PGTimeStamp,
+            sqltypes.Date: _PGDate,
+            sqltypes.Time: _PGTime,
+            sqltypes.Integer: _PGInteger,
+            sqltypes.SmallInteger: _PGSmallInteger,
+            sqltypes.BigInteger: _PGBigInteger,
+            sqltypes.Enum: _PGEnum,
+            sqltypes.ARRAY: _PGARRAY,
+            OIDVECTOR: _PGOIDVECTOR,
+            ranges.INT4RANGE: _Pg8000Range,
+            ranges.INT8RANGE: _Pg8000Range,
+            ranges.NUMRANGE: _Pg8000Range,
+            ranges.DATERANGE: _Pg8000Range,
+            ranges.TSRANGE: _Pg8000Range,
+            ranges.TSTZRANGE: _Pg8000Range,
+            ranges.INT4MULTIRANGE: _Pg8000MultiRange,
+            ranges.INT8MULTIRANGE: _Pg8000MultiRange,
+            ranges.NUMMULTIRANGE: _Pg8000MultiRange,
+            ranges.DATEMULTIRANGE: _Pg8000MultiRange,
+            ranges.TSMULTIRANGE: _Pg8000MultiRange,
+            ranges.TSTZMULTIRANGE: _Pg8000MultiRange,
+        },
+    )
+
+    def __init__(self, client_encoding=None, **kwargs):
+        PGDialect.__init__(self, **kwargs)
+        self.client_encoding = client_encoding
+
+        if self._dbapi_version < (1, 16, 6):
+            raise NotImplementedError("pg8000 1.16.6 or greater is required")
+
+        if self._native_inet_types:
+            raise NotImplementedError(
+                "The pg8000 dialect does not fully implement "
+                "ipaddress type handling; INET is supported by default, "
+                "CIDR is not"
+            )
+
+    @util.memoized_property
+    def _dbapi_version(self):
+        if self.dbapi and hasattr(self.dbapi, "__version__"):
+            return tuple(
+                [
+                    int(x)
+                    for x in re.findall(
+                        r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__
+                    )
+                ]
+            )
+        else:
+            return (99, 99, 99)
+
+    @classmethod
+    def import_dbapi(cls):
+        return __import__("pg8000")
+
+    def create_connect_args(self, url):
+        opts = url.translate_connect_args(username="user")
+        if "port" in opts:
+            opts["port"] = int(opts["port"])
+        opts.update(url.query)
+        return ([], opts)
+
+    def is_disconnect(self, e, connection, cursor):
+        if isinstance(e, self.dbapi.InterfaceError) and "network error" in str(
+            e
+        ):
+            # new as of pg8000 1.19.0 for broken connections
+            return True
+
+        # connection was closed normally
+        return "connection is closed" in str(e)
+
+    def get_isolation_level_values(self, dbapi_connection):
+        return (
+            "AUTOCOMMIT",
+            "READ COMMITTED",
+            "READ UNCOMMITTED",
+            "REPEATABLE READ",
+            "SERIALIZABLE",
+        )
+
+    def set_isolation_level(self, dbapi_connection, level):
+        level = level.replace("_", " ")
+
+        if level == "AUTOCOMMIT":
+            dbapi_connection.autocommit = True
+        else:
+            dbapi_connection.autocommit = False
+            cursor = dbapi_connection.cursor()
+            cursor.execute(
+                "SET SESSION CHARACTERISTICS AS TRANSACTION "
+                f"ISOLATION LEVEL {level}"
+            )
+            cursor.execute("COMMIT")
+            cursor.close()
+
+    def set_readonly(self, connection, value):
+        cursor = connection.cursor()
+        try:
+            cursor.execute(
+                "SET SESSION CHARACTERISTICS AS TRANSACTION %s"
+                % ("READ ONLY" if value else "READ WRITE")
+            )
+            cursor.execute("COMMIT")
+        finally:
+            cursor.close()
+
+    def get_readonly(self, connection):
+        cursor = connection.cursor()
+        try:
+            cursor.execute("show transaction_read_only")
+            val = cursor.fetchone()[0]
+        finally:
+            cursor.close()
+
+        return val == "on"
+
+    def set_deferrable(self, connection, value):
+        cursor = connection.cursor()
+        try:
+            cursor.execute(
+                "SET SESSION CHARACTERISTICS AS TRANSACTION %s"
+                % ("DEFERRABLE" if value else "NOT DEFERRABLE")
+            )
+            cursor.execute("COMMIT")
+        finally:
+            cursor.close()
+
+    def get_deferrable(self, connection):
+        cursor = connection.cursor()
+        try:
+            cursor.execute("show transaction_deferrable")
+            val = cursor.fetchone()[0]
+        finally:
+            cursor.close()
+
+        return val == "on"
+
+    def _set_client_encoding(self, dbapi_connection, client_encoding):
+        cursor = dbapi_connection.cursor()
+        cursor.execute(
+            f"""SET CLIENT_ENCODING TO '{
+                client_encoding.replace("'", "''")
+            }'"""
+        )
+        cursor.execute("COMMIT")
+        cursor.close()
+
+    def do_begin_twophase(self, connection, xid):
+        connection.connection.tpc_begin((0, xid, ""))
+
+    def do_prepare_twophase(self, connection, xid):
+        connection.connection.tpc_prepare()
+
+    def do_rollback_twophase(
+        self, connection, xid, is_prepared=True, recover=False
+    ):
+        connection.connection.tpc_rollback((0, xid, ""))
+
+    def do_commit_twophase(
+        self, connection, xid, is_prepared=True, recover=False
+    ):
+        connection.connection.tpc_commit((0, xid, ""))
+
+    def do_recover_twophase(self, connection):
+        return [row[1] for row in connection.connection.tpc_recover()]
+
+    def on_connect(self):
+        fns = []
+
+        def on_connect(conn):
+            conn.py_types[quoted_name] = conn.py_types[str]
+
+        fns.append(on_connect)
+
+        if self.client_encoding is not None:
+
+            def on_connect(conn):
+                self._set_client_encoding(conn, self.client_encoding)
+
+            fns.append(on_connect)
+
+        if self._native_inet_types is False:
+
+            def on_connect(conn):
+                # inet
+                conn.register_in_adapter(869, lambda s: s)
+
+                # cidr
+                conn.register_in_adapter(650, lambda s: s)
+
+            fns.append(on_connect)
+
+        if self._json_deserializer:
+
+            def on_connect(conn):
+                # json
+                conn.register_in_adapter(114, self._json_deserializer)
+
+                # jsonb
+                conn.register_in_adapter(3802, self._json_deserializer)
+
+            fns.append(on_connect)
+
+        if len(fns) > 0:
+
+            def on_connect(conn):
+                for fn in fns:
+                    fn(conn)
+
+            return on_connect
+        else:
+            return None
+
+    @util.memoized_property
+    def _dialect_specific_select_one(self):
+        return ";"
+
+
+dialect = PGDialect_pg8000
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/pg_catalog.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/pg_catalog.py
new file mode 100644
index 00000000..78f390a2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/pg_catalog.py
@@ -0,0 +1,300 @@
+# dialects/postgresql/pg_catalog.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+from .array import ARRAY
+from .types import OID
+from .types import REGCLASS
+from ... import Column
+from ... import func
+from ... import MetaData
+from ... import Table
+from ...types import BigInteger
+from ...types import Boolean
+from ...types import CHAR
+from ...types import Float
+from ...types import Integer
+from ...types import SmallInteger
+from ...types import String
+from ...types import Text
+from ...types import TypeDecorator
+
+
+# types
+class NAME(TypeDecorator):
+    impl = String(64, collation="C")
+    cache_ok = True
+
+
+class PG_NODE_TREE(TypeDecorator):
+    impl = Text(collation="C")
+    cache_ok = True
+
+
+class INT2VECTOR(TypeDecorator):
+    impl = ARRAY(SmallInteger)
+    cache_ok = True
+
+
+class OIDVECTOR(TypeDecorator):
+    impl = ARRAY(OID)
+    cache_ok = True
+
+
+class _SpaceVector:
+    def result_processor(self, dialect, coltype):
+        def process(value):
+            if value is None:
+                return value
+            return [int(p) for p in value.split(" ")]
+
+        return process
+
+
+REGPROC = REGCLASS  # seems an alias
+
+# functions
+_pg_cat = func.pg_catalog
+quote_ident = _pg_cat.quote_ident
+pg_table_is_visible = _pg_cat.pg_table_is_visible
+pg_type_is_visible = _pg_cat.pg_type_is_visible
+pg_get_viewdef = _pg_cat.pg_get_viewdef
+pg_get_serial_sequence = _pg_cat.pg_get_serial_sequence
+format_type = _pg_cat.format_type
+pg_get_expr = _pg_cat.pg_get_expr
+pg_get_constraintdef = _pg_cat.pg_get_constraintdef
+pg_get_indexdef = _pg_cat.pg_get_indexdef
+
+# constants
+RELKINDS_TABLE_NO_FOREIGN = ("r", "p")
+RELKINDS_TABLE = RELKINDS_TABLE_NO_FOREIGN + ("f",)
+RELKINDS_VIEW = ("v",)
+RELKINDS_MAT_VIEW = ("m",)
+RELKINDS_ALL_TABLE_LIKE = RELKINDS_TABLE + RELKINDS_VIEW + RELKINDS_MAT_VIEW
+
+# tables
+pg_catalog_meta = MetaData(schema="pg_catalog")
+
+pg_namespace = Table(
+    "pg_namespace",
+    pg_catalog_meta,
+    Column("oid", OID),
+    Column("nspname", NAME),
+    Column("nspowner", OID),
+)
+
+pg_class = Table(
+    "pg_class",
+    pg_catalog_meta,
+    Column("oid", OID, info={"server_version": (9, 3)}),
+    Column("relname", NAME),
+    Column("relnamespace", OID),
+    Column("reltype", OID),
+    Column("reloftype", OID),
+    Column("relowner", OID),
+    Column("relam", OID),
+    Column("relfilenode", OID),
+    Column("reltablespace", OID),
+    Column("relpages", Integer),
+    Column("reltuples", Float),
+    Column("relallvisible", Integer, info={"server_version": (9, 2)}),
+    Column("reltoastrelid", OID),
+    Column("relhasindex", Boolean),
+    Column("relisshared", Boolean),
+    Column("relpersistence", CHAR, info={"server_version": (9, 1)}),
+    Column("relkind", CHAR),
+    Column("relnatts", SmallInteger),
+    Column("relchecks", SmallInteger),
+    Column("relhasrules", Boolean),
+    Column("relhastriggers", Boolean),
+    Column("relhassubclass", Boolean),
+    Column("relrowsecurity", Boolean),
+    Column("relforcerowsecurity", Boolean, info={"server_version": (9, 5)}),
+    Column("relispopulated", Boolean, info={"server_version": (9, 3)}),
+    Column("relreplident", CHAR, info={"server_version": (9, 4)}),
+    Column("relispartition", Boolean, info={"server_version": (10,)}),
+    Column("relrewrite", OID, info={"server_version": (11,)}),
+    Column("reloptions", ARRAY(Text)),
+)
+
+pg_type = Table(
+    "pg_type",
+    pg_catalog_meta,
+    Column("oid", OID, info={"server_version": (9, 3)}),
+    Column("typname", NAME),
+    Column("typnamespace", OID),
+    Column("typowner", OID),
+    Column("typlen", SmallInteger),
+    Column("typbyval", Boolean),
+    Column("typtype", CHAR),
+    Column("typcategory", CHAR),
+    Column("typispreferred", Boolean),
+    Column("typisdefined", Boolean),
+    Column("typdelim", CHAR),
+    Column("typrelid", OID),
+    Column("typelem", OID),
+    Column("typarray", OID),
+    Column("typinput", REGPROC),
+    Column("typoutput", REGPROC),
+    Column("typreceive", REGPROC),
+    Column("typsend", REGPROC),
+    Column("typmodin", REGPROC),
+    Column("typmodout", REGPROC),
+    Column("typanalyze", REGPROC),
+    Column("typalign", CHAR),
+    Column("typstorage", CHAR),
+    Column("typnotnull", Boolean),
+    Column("typbasetype", OID),
+    Column("typtypmod", Integer),
+    Column("typndims", Integer),
+    Column("typcollation", OID, info={"server_version": (9, 1)}),
+    Column("typdefault", Text),
+)
+
+pg_index = Table(
+    "pg_index",
+    pg_catalog_meta,
+    Column("indexrelid", OID),
+    Column("indrelid", OID),
+    Column("indnatts", SmallInteger),
+    Column("indnkeyatts", SmallInteger, info={"server_version": (11,)}),
+    Column("indisunique", Boolean),
+    Column("indnullsnotdistinct", Boolean, info={"server_version": (15,)}),
+    Column("indisprimary", Boolean),
+    Column("indisexclusion", Boolean, info={"server_version": (9, 1)}),
+    Column("indimmediate", Boolean),
+    Column("indisclustered", Boolean),
+    Column("indisvalid", Boolean),
+    Column("indcheckxmin", Boolean),
+    Column("indisready", Boolean),
+    Column("indislive", Boolean, info={"server_version": (9, 3)}),  # 9.3
+    Column("indisreplident", Boolean),
+    Column("indkey", INT2VECTOR),
+    Column("indcollation", OIDVECTOR, info={"server_version": (9, 1)}),  # 9.1
+    Column("indclass", OIDVECTOR),
+    Column("indoption", INT2VECTOR),
+    Column("indexprs", PG_NODE_TREE),
+    Column("indpred", PG_NODE_TREE),
+)
+
+pg_attribute = Table(
+    "pg_attribute",
+    pg_catalog_meta,
+    Column("attrelid", OID),
+    Column("attname", NAME),
+    Column("atttypid", OID),
+    Column("attstattarget", Integer),
+    Column("attlen", SmallInteger),
+    Column("attnum", SmallInteger),
+    Column("attndims", Integer),
+    Column("attcacheoff", Integer),
+    Column("atttypmod", Integer),
+    Column("attbyval", Boolean),
+    Column("attstorage", CHAR),
+    Column("attalign", CHAR),
+    Column("attnotnull", Boolean),
+    Column("atthasdef", Boolean),
+    Column("atthasmissing", Boolean, info={"server_version": (11,)}),
+    Column("attidentity", CHAR, info={"server_version": (10,)}),
+    Column("attgenerated", CHAR, info={"server_version": (12,)}),
+    Column("attisdropped", Boolean),
+    Column("attislocal", Boolean),
+    Column("attinhcount", Integer),
+    Column("attcollation", OID, info={"server_version": (9, 1)}),
+)
+
+pg_constraint = Table(
+    "pg_constraint",
+    pg_catalog_meta,
+    Column("oid", OID),  # 9.3
+    Column("conname", NAME),
+    Column("connamespace", OID),
+    Column("contype", CHAR),
+    Column("condeferrable", Boolean),
+    Column("condeferred", Boolean),
+    Column("convalidated", Boolean, info={"server_version": (9, 1)}),
+    Column("conrelid", OID),
+    Column("contypid", OID),
+    Column("conindid", OID),
+    Column("conparentid", OID, info={"server_version": (11,)}),
+    Column("confrelid", OID),
+    Column("confupdtype", CHAR),
+    Column("confdeltype", CHAR),
+    Column("confmatchtype", CHAR),
+    Column("conislocal", Boolean),
+    Column("coninhcount", Integer),
+    Column("connoinherit", Boolean, info={"server_version": (9, 2)}),
+    Column("conkey", ARRAY(SmallInteger)),
+    Column("confkey", ARRAY(SmallInteger)),
+)
+
+pg_sequence = Table(
+    "pg_sequence",
+    pg_catalog_meta,
+    Column("seqrelid", OID),
+    Column("seqtypid", OID),
+    Column("seqstart", BigInteger),
+    Column("seqincrement", BigInteger),
+    Column("seqmax", BigInteger),
+    Column("seqmin", BigInteger),
+    Column("seqcache", BigInteger),
+    Column("seqcycle", Boolean),
+    info={"server_version": (10,)},
+)
+
+pg_attrdef = Table(
+    "pg_attrdef",
+    pg_catalog_meta,
+    Column("oid", OID, info={"server_version": (9, 3)}),
+    Column("adrelid", OID),
+    Column("adnum", SmallInteger),
+    Column("adbin", PG_NODE_TREE),
+)
+
+pg_description = Table(
+    "pg_description",
+    pg_catalog_meta,
+    Column("objoid", OID),
+    Column("classoid", OID),
+    Column("objsubid", Integer),
+    Column("description", Text(collation="C")),
+)
+
+pg_enum = Table(
+    "pg_enum",
+    pg_catalog_meta,
+    Column("oid", OID, info={"server_version": (9, 3)}),
+    Column("enumtypid", OID),
+    Column("enumsortorder", Float(), info={"server_version": (9, 1)}),
+    Column("enumlabel", NAME),
+)
+
+pg_am = Table(
+    "pg_am",
+    pg_catalog_meta,
+    Column("oid", OID, info={"server_version": (9, 3)}),
+    Column("amname", NAME),
+    Column("amhandler", REGPROC, info={"server_version": (9, 6)}),
+    Column("amtype", CHAR, info={"server_version": (9, 6)}),
+)
+
+pg_collation = Table(
+    "pg_collation",
+    pg_catalog_meta,
+    Column("oid", OID, info={"server_version": (9, 3)}),
+    Column("collname", NAME),
+    Column("collnamespace", OID),
+    Column("collowner", OID),
+    Column("collprovider", CHAR, info={"server_version": (10,)}),
+    Column("collisdeterministic", Boolean, info={"server_version": (12,)}),
+    Column("collencoding", Integer),
+    Column("collcollate", Text),
+    Column("collctype", Text),
+    Column("colliculocale", Text),
+    Column("collicurules", Text, info={"server_version": (16,)}),
+    Column("collversion", Text, info={"server_version": (10,)}),
+)
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/provision.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/provision.py
new file mode 100644
index 00000000..c76f5f51
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/provision.py
@@ -0,0 +1,175 @@
+# dialects/postgresql/provision.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+import time
+
+from ... import exc
+from ... import inspect
+from ... import text
+from ...testing import warn_test_suite
+from ...testing.provision import create_db
+from ...testing.provision import drop_all_schema_objects_post_tables
+from ...testing.provision import drop_all_schema_objects_pre_tables
+from ...testing.provision import drop_db
+from ...testing.provision import log
+from ...testing.provision import post_configure_engine
+from ...testing.provision import prepare_for_drop_tables
+from ...testing.provision import set_default_schema_on_connection
+from ...testing.provision import temp_table_keyword_args
+from ...testing.provision import upsert
+
+
+@create_db.for_db("postgresql")
+def _pg_create_db(cfg, eng, ident):
+    template_db = cfg.options.postgresql_templatedb
+
+    with eng.execution_options(isolation_level="AUTOCOMMIT").begin() as conn:
+        if not template_db:
+            template_db = conn.exec_driver_sql(
+                "select current_database()"
+            ).scalar()
+
+        attempt = 0
+        while True:
+            try:
+                conn.exec_driver_sql(
+                    "CREATE DATABASE %s TEMPLATE %s" % (ident, template_db)
+                )
+            except exc.OperationalError as err:
+                attempt += 1
+                if attempt >= 3:
+                    raise
+                if "accessed by other users" in str(err):
+                    log.info(
+                        "Waiting to create %s, URI %r, "
+                        "template DB %s is in use sleeping for .5",
+                        ident,
+                        eng.url,
+                        template_db,
+                    )
+                    time.sleep(0.5)
+            except:
+                raise
+            else:
+                break
+
+
+@drop_db.for_db("postgresql")
+def _pg_drop_db(cfg, eng, ident):
+    with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
+        with conn.begin():
+            conn.execute(
+                text(
+                    "select pg_terminate_backend(pid) from pg_stat_activity "
+                    "where usename=current_user and pid != pg_backend_pid() "
+                    "and datname=:dname"
+                ),
+                dict(dname=ident),
+            )
+            conn.exec_driver_sql("DROP DATABASE %s" % ident)
+
+
+@temp_table_keyword_args.for_db("postgresql")
+def _postgresql_temp_table_keyword_args(cfg, eng):
+    return {"prefixes": ["TEMPORARY"]}
+
+
+@set_default_schema_on_connection.for_db("postgresql")
+def _postgresql_set_default_schema_on_connection(
+    cfg, dbapi_connection, schema_name
+):
+    existing_autocommit = dbapi_connection.autocommit
+    dbapi_connection.autocommit = True
+    cursor = dbapi_connection.cursor()
+    cursor.execute("SET SESSION search_path='%s'" % schema_name)
+    cursor.close()
+    dbapi_connection.autocommit = existing_autocommit
+
+
+@drop_all_schema_objects_pre_tables.for_db("postgresql")
+def drop_all_schema_objects_pre_tables(cfg, eng):
+    with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
+        for xid in conn.exec_driver_sql(
+            "select gid from pg_prepared_xacts"
+        ).scalars():
+            conn.exec_driver_sql("ROLLBACK PREPARED '%s'" % xid)
+
+
+@drop_all_schema_objects_post_tables.for_db("postgresql")
+def drop_all_schema_objects_post_tables(cfg, eng):
+    from sqlalchemy.dialects import postgresql
+
+    inspector = inspect(eng)
+    with eng.begin() as conn:
+        for enum in inspector.get_enums("*"):
+            conn.execute(
+                postgresql.DropEnumType(
+                    postgresql.ENUM(name=enum["name"], schema=enum["schema"])
+                )
+            )
+
+
+@prepare_for_drop_tables.for_db("postgresql")
+def prepare_for_drop_tables(config, connection):
+    """Ensure there are no locks on the current username/database."""
+
+    result = connection.exec_driver_sql(
+        "select pid, state, wait_event_type, query "
+        # "select pg_terminate_backend(pid), state, wait_event_type "
+        "from pg_stat_activity where "
+        "usename=current_user "
+        "and datname=current_database() and state='idle in transaction' "
+        "and pid != pg_backend_pid()"
+    )
+    rows = result.all()  # noqa
+    if rows:
+        warn_test_suite(
+            "PostgreSQL may not be able to DROP tables due to "
+            "idle in transaction: %s"
+            % ("; ".join(row._mapping["query"] for row in rows))
+        )
+
+
+@upsert.for_db("postgresql")
+def _upsert(
+    cfg, table, returning, *, set_lambda=None, sort_by_parameter_order=False
+):
+    from sqlalchemy.dialects.postgresql import insert
+
+    stmt = insert(table)
+
+    table_pk = inspect(table).selectable
+
+    if set_lambda:
+        stmt = stmt.on_conflict_do_update(
+            index_elements=table_pk.primary_key, set_=set_lambda(stmt.excluded)
+        )
+    else:
+        stmt = stmt.on_conflict_do_nothing()
+
+    stmt = stmt.returning(
+        *returning, sort_by_parameter_order=sort_by_parameter_order
+    )
+    return stmt
+
+
+_extensions = [
+    ("citext", (13,)),
+    ("hstore", (13,)),
+]
+
+
+@post_configure_engine.for_db("postgresql")
+def _create_citext_extension(url, engine, follower_ident):
+    with engine.connect() as conn:
+        for extension, min_version in _extensions:
+            if conn.dialect.server_version_info >= min_version:
+                conn.execute(
+                    text(f"CREATE EXTENSION IF NOT EXISTS {extension}")
+                )
+                conn.commit()
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg.py
new file mode 100644
index 00000000..0554048c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg.py
@@ -0,0 +1,783 @@
+# dialects/postgresql/psycopg.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+r"""
+.. dialect:: postgresql+psycopg
+    :name: psycopg (a.k.a. psycopg 3)
+    :dbapi: psycopg
+    :connectstring: postgresql+psycopg://user:password@host:port/dbname[?key=value&key=value...]
+    :url: https://pypi.org/project/psycopg/
+
+``psycopg`` is the package and module name for version 3 of the ``psycopg``
+database driver, formerly known as ``psycopg2``.  This driver is different
+enough from its ``psycopg2`` predecessor that SQLAlchemy supports it
+via a totally separate dialect; support for ``psycopg2`` is expected to remain
+for as long as that package continues to function for modern Python versions,
+and also remains the default dialect for the ``postgresql://`` dialect
+series.
+
+The SQLAlchemy ``psycopg`` dialect provides both a sync and an async
+implementation under the same dialect name. The proper version is
+selected depending on how the engine is created:
+
+* calling :func:`_sa.create_engine` with ``postgresql+psycopg://...`` will
+  automatically select the sync version, e.g.::
+
+    from sqlalchemy import create_engine
+
+    sync_engine = create_engine(
+        "postgresql+psycopg://scott:tiger@localhost/test"
+    )
+
+* calling :func:`_asyncio.create_async_engine` with
+  ``postgresql+psycopg://...`` will automatically select the async version,
+  e.g.::
+
+    from sqlalchemy.ext.asyncio import create_async_engine
+
+    asyncio_engine = create_async_engine(
+        "postgresql+psycopg://scott:tiger@localhost/test"
+    )
+
+The asyncio version of the dialect may also be specified explicitly using the
+``psycopg_async`` suffix, as::
+
+    from sqlalchemy.ext.asyncio import create_async_engine
+
+    asyncio_engine = create_async_engine(
+        "postgresql+psycopg_async://scott:tiger@localhost/test"
+    )
+
+.. seealso::
+
+    :ref:`postgresql_psycopg2` - The SQLAlchemy ``psycopg``
+    dialect shares most of its behavior with the ``psycopg2`` dialect.
+    Further documentation is available there.
+
+Using a different Cursor class
+------------------------------
+
+One of the differences between ``psycopg`` and the older ``psycopg2``
+is how bound parameters are handled: ``psycopg2`` would bind them
+client side, while ``psycopg`` by default will bind them server side.
+
+It's possible to configure ``psycopg`` to do client side binding by
+specifying the ``cursor_factory`` to be ``ClientCursor`` when creating
+the engine::
+
+    from psycopg import ClientCursor
+
+    client_side_engine = create_engine(
+        "postgresql+psycopg://...",
+        connect_args={"cursor_factory": ClientCursor},
+    )
+
+Similarly when using an async engine the ``AsyncClientCursor`` can be
+specified::
+
+    from psycopg import AsyncClientCursor
+
+    client_side_engine = create_async_engine(
+        "postgresql+psycopg://...",
+        connect_args={"cursor_factory": AsyncClientCursor},
+    )
+
+.. seealso::
+
+    `Client-side-binding cursors <https://www.psycopg.org/psycopg3/docs/advanced/cursors.html#client-side-binding-cursors>`_
+
+"""  # noqa
+from __future__ import annotations
+
+from collections import deque
+import logging
+import re
+from typing import cast
+from typing import TYPE_CHECKING
+
+from . import ranges
+from ._psycopg_common import _PGDialect_common_psycopg
+from ._psycopg_common import _PGExecutionContext_common_psycopg
+from .base import INTERVAL
+from .base import PGCompiler
+from .base import PGIdentifierPreparer
+from .base import REGCONFIG
+from .json import JSON
+from .json import JSONB
+from .json import JSONPathType
+from .types import CITEXT
+from ... import pool
+from ... import util
+from ...engine import AdaptedConnection
+from ...sql import sqltypes
+from ...util.concurrency import await_fallback
+from ...util.concurrency import await_only
+
+if TYPE_CHECKING:
+    from typing import Iterable
+
+    from psycopg import AsyncConnection
+
+logger = logging.getLogger("sqlalchemy.dialects.postgresql")
+
+
+class _PGString(sqltypes.String):
+    render_bind_cast = True
+
+
+class _PGREGCONFIG(REGCONFIG):
+    render_bind_cast = True
+
+
+class _PGJSON(JSON):
+    def bind_processor(self, dialect):
+        return self._make_bind_processor(None, dialect._psycopg_Json)
+
+    def result_processor(self, dialect, coltype):
+        return None
+
+
+class _PGJSONB(JSONB):
+    def bind_processor(self, dialect):
+        return self._make_bind_processor(None, dialect._psycopg_Jsonb)
+
+    def result_processor(self, dialect, coltype):
+        return None
+
+
+class _PGJSONIntIndexType(sqltypes.JSON.JSONIntIndexType):
+    __visit_name__ = "json_int_index"
+
+    render_bind_cast = True
+
+
+class _PGJSONStrIndexType(sqltypes.JSON.JSONStrIndexType):
+    __visit_name__ = "json_str_index"
+
+    render_bind_cast = True
+
+
+class _PGJSONPathType(JSONPathType):
+    pass
+
+
+class _PGInterval(INTERVAL):
+    render_bind_cast = True
+
+
+class _PGTimeStamp(sqltypes.DateTime):
+    render_bind_cast = True
+
+
+class _PGDate(sqltypes.Date):
+    render_bind_cast = True
+
+
+class _PGTime(sqltypes.Time):
+    render_bind_cast = True
+
+
+class _PGInteger(sqltypes.Integer):
+    render_bind_cast = True
+
+
+class _PGSmallInteger(sqltypes.SmallInteger):
+    render_bind_cast = True
+
+
+class _PGNullType(sqltypes.NullType):
+    render_bind_cast = True
+
+
+class _PGBigInteger(sqltypes.BigInteger):
+    render_bind_cast = True
+
+
+class _PGBoolean(sqltypes.Boolean):
+    render_bind_cast = True
+
+
+class _PsycopgRange(ranges.AbstractSingleRangeImpl):
+    def bind_processor(self, dialect):
+        psycopg_Range = cast(PGDialect_psycopg, dialect)._psycopg_Range
+
+        def to_range(value):
+            if isinstance(value, ranges.Range):
+                value = psycopg_Range(
+                    value.lower, value.upper, value.bounds, value.empty
+                )
+            return value
+
+        return to_range
+
+    def result_processor(self, dialect, coltype):
+        def to_range(value):
+            if value is not None:
+                value = ranges.Range(
+                    value._lower,
+                    value._upper,
+                    bounds=value._bounds if value._bounds else "[)",
+                    empty=not value._bounds,
+                )
+            return value
+
+        return to_range
+
+
+class _PsycopgMultiRange(ranges.AbstractMultiRangeImpl):
+    def bind_processor(self, dialect):
+        psycopg_Range = cast(PGDialect_psycopg, dialect)._psycopg_Range
+        psycopg_Multirange = cast(
+            PGDialect_psycopg, dialect
+        )._psycopg_Multirange
+
+        NoneType = type(None)
+
+        def to_range(value):
+            if isinstance(value, (str, NoneType, psycopg_Multirange)):
+                return value
+
+            return psycopg_Multirange(
+                [
+                    psycopg_Range(
+                        element.lower,
+                        element.upper,
+                        element.bounds,
+                        element.empty,
+                    )
+                    for element in cast("Iterable[ranges.Range]", value)
+                ]
+            )
+
+        return to_range
+
+    def result_processor(self, dialect, coltype):
+        def to_range(value):
+            if value is None:
+                return None
+            else:
+                return ranges.MultiRange(
+                    ranges.Range(
+                        elem._lower,
+                        elem._upper,
+                        bounds=elem._bounds if elem._bounds else "[)",
+                        empty=not elem._bounds,
+                    )
+                    for elem in value
+                )
+
+        return to_range
+
+
+class PGExecutionContext_psycopg(_PGExecutionContext_common_psycopg):
+    pass
+
+
+class PGCompiler_psycopg(PGCompiler):
+    pass
+
+
+class PGIdentifierPreparer_psycopg(PGIdentifierPreparer):
+    pass
+
+
+def _log_notices(diagnostic):
+    logger.info("%s: %s", diagnostic.severity, diagnostic.message_primary)
+
+
+class PGDialect_psycopg(_PGDialect_common_psycopg):
+    driver = "psycopg"
+
+    supports_statement_cache = True
+    supports_server_side_cursors = True
+    default_paramstyle = "pyformat"
+    supports_sane_multi_rowcount = True
+
+    execution_ctx_cls = PGExecutionContext_psycopg
+    statement_compiler = PGCompiler_psycopg
+    preparer = PGIdentifierPreparer_psycopg
+    psycopg_version = (0, 0)
+
+    _has_native_hstore = True
+    _psycopg_adapters_map = None
+
+    colspecs = util.update_copy(
+        _PGDialect_common_psycopg.colspecs,
+        {
+            sqltypes.String: _PGString,
+            REGCONFIG: _PGREGCONFIG,
+            JSON: _PGJSON,
+            CITEXT: CITEXT,
+            sqltypes.JSON: _PGJSON,
+            JSONB: _PGJSONB,
+            sqltypes.JSON.JSONPathType: _PGJSONPathType,
+            sqltypes.JSON.JSONIntIndexType: _PGJSONIntIndexType,
+            sqltypes.JSON.JSONStrIndexType: _PGJSONStrIndexType,
+            sqltypes.Interval: _PGInterval,
+            INTERVAL: _PGInterval,
+            sqltypes.Date: _PGDate,
+            sqltypes.DateTime: _PGTimeStamp,
+            sqltypes.Time: _PGTime,
+            sqltypes.Integer: _PGInteger,
+            sqltypes.SmallInteger: _PGSmallInteger,
+            sqltypes.BigInteger: _PGBigInteger,
+            ranges.AbstractSingleRange: _PsycopgRange,
+            ranges.AbstractMultiRange: _PsycopgMultiRange,
+        },
+    )
+
+    def __init__(self, **kwargs):
+        super().__init__(**kwargs)
+
+        if self.dbapi:
+            m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
+            if m:
+                self.psycopg_version = tuple(
+                    int(x) for x in m.group(1, 2, 3) if x is not None
+                )
+
+            if self.psycopg_version < (3, 0, 2):
+                raise ImportError(
+                    "psycopg version 3.0.2 or higher is required."
+                )
+
+            from psycopg.adapt import AdaptersMap
+
+            self._psycopg_adapters_map = adapters_map = AdaptersMap(
+                self.dbapi.adapters
+            )
+
+            if self._native_inet_types is False:
+                import psycopg.types.string
+
+                adapters_map.register_loader(
+                    "inet", psycopg.types.string.TextLoader
+                )
+                adapters_map.register_loader(
+                    "cidr", psycopg.types.string.TextLoader
+                )
+
+            if self._json_deserializer:
+                from psycopg.types.json import set_json_loads
+
+                set_json_loads(self._json_deserializer, adapters_map)
+
+            if self._json_serializer:
+                from psycopg.types.json import set_json_dumps
+
+                set_json_dumps(self._json_serializer, adapters_map)
+
+    def create_connect_args(self, url):
+        # see https://github.com/psycopg/psycopg/issues/83
+        cargs, cparams = super().create_connect_args(url)
+
+        if self._psycopg_adapters_map:
+            cparams["context"] = self._psycopg_adapters_map
+        if self.client_encoding is not None:
+            cparams["client_encoding"] = self.client_encoding
+        return cargs, cparams
+
+    def _type_info_fetch(self, connection, name):
+        from psycopg.types import TypeInfo
+
+        return TypeInfo.fetch(connection.connection.driver_connection, name)
+
+    def initialize(self, connection):
+        super().initialize(connection)
+
+        # PGDialect.initialize() checks server version for <= 8.2 and sets
+        # this flag to False if so
+        if not self.insert_returning:
+            self.insert_executemany_returning = False
+
+        # HSTORE can't be registered until we have a connection so that
+        # we can look up its OID, so we set up this adapter in
+        # initialize()
+        if self.use_native_hstore:
+            info = self._type_info_fetch(connection, "hstore")
+            self._has_native_hstore = info is not None
+            if self._has_native_hstore:
+                from psycopg.types.hstore import register_hstore
+
+                # register the adapter for connections made subsequent to
+                # this one
+                assert self._psycopg_adapters_map
+                register_hstore(info, self._psycopg_adapters_map)
+
+                # register the adapter for this connection
+                assert connection.connection
+                register_hstore(info, connection.connection.driver_connection)
+
+    @classmethod
+    def import_dbapi(cls):
+        import psycopg
+
+        return psycopg
+
+    @classmethod
+    def get_async_dialect_cls(cls, url):
+        return PGDialectAsync_psycopg
+
+    @util.memoized_property
+    def _isolation_lookup(self):
+        return {
+            "READ COMMITTED": self.dbapi.IsolationLevel.READ_COMMITTED,
+            "READ UNCOMMITTED": self.dbapi.IsolationLevel.READ_UNCOMMITTED,
+            "REPEATABLE READ": self.dbapi.IsolationLevel.REPEATABLE_READ,
+            "SERIALIZABLE": self.dbapi.IsolationLevel.SERIALIZABLE,
+        }
+
+    @util.memoized_property
+    def _psycopg_Json(self):
+        from psycopg.types import json
+
+        return json.Json
+
+    @util.memoized_property
+    def _psycopg_Jsonb(self):
+        from psycopg.types import json
+
+        return json.Jsonb
+
+    @util.memoized_property
+    def _psycopg_TransactionStatus(self):
+        from psycopg.pq import TransactionStatus
+
+        return TransactionStatus
+
+    @util.memoized_property
+    def _psycopg_Range(self):
+        from psycopg.types.range import Range
+
+        return Range
+
+    @util.memoized_property
+    def _psycopg_Multirange(self):
+        from psycopg.types.multirange import Multirange
+
+        return Multirange
+
+    def _do_isolation_level(self, connection, autocommit, isolation_level):
+        connection.autocommit = autocommit
+        connection.isolation_level = isolation_level
+
+    def get_isolation_level(self, dbapi_connection):
+        status_before = dbapi_connection.info.transaction_status
+        value = super().get_isolation_level(dbapi_connection)
+
+        # don't rely on psycopg providing enum symbols, compare with
+        # eq/ne
+        if status_before == self._psycopg_TransactionStatus.IDLE:
+            dbapi_connection.rollback()
+        return value
+
+    def set_isolation_level(self, dbapi_connection, level):
+        if level == "AUTOCOMMIT":
+            self._do_isolation_level(
+                dbapi_connection, autocommit=True, isolation_level=None
+            )
+        else:
+            self._do_isolation_level(
+                dbapi_connection,
+                autocommit=False,
+                isolation_level=self._isolation_lookup[level],
+            )
+
+    def set_readonly(self, connection, value):
+        connection.read_only = value
+
+    def get_readonly(self, connection):
+        return connection.read_only
+
+    def on_connect(self):
+        def notices(conn):
+            conn.add_notice_handler(_log_notices)
+
+        fns = [notices]
+
+        if self.isolation_level is not None:
+
+            def on_connect(conn):
+                self.set_isolation_level(conn, self.isolation_level)
+
+            fns.append(on_connect)
+
+        # fns always has the notices function
+        def on_connect(conn):
+            for fn in fns:
+                fn(conn)
+
+        return on_connect
+
+    def is_disconnect(self, e, connection, cursor):
+        if isinstance(e, self.dbapi.Error) and connection is not None:
+            if connection.closed or connection.broken:
+                return True
+        return False
+
+    def _do_prepared_twophase(self, connection, command, recover=False):
+        dbapi_conn = connection.connection.dbapi_connection
+        if (
+            recover
+            # don't rely on psycopg providing enum symbols, compare with
+            # eq/ne
+            or dbapi_conn.info.transaction_status
+            != self._psycopg_TransactionStatus.IDLE
+        ):
+            dbapi_conn.rollback()
+        before_autocommit = dbapi_conn.autocommit
+        try:
+            if not before_autocommit:
+                self._do_autocommit(dbapi_conn, True)
+            dbapi_conn.execute(command)
+        finally:
+            if not before_autocommit:
+                self._do_autocommit(dbapi_conn, before_autocommit)
+
+    def do_rollback_twophase(
+        self, connection, xid, is_prepared=True, recover=False
+    ):
+        if is_prepared:
+            self._do_prepared_twophase(
+                connection, f"ROLLBACK PREPARED '{xid}'", recover=recover
+            )
+        else:
+            self.do_rollback(connection.connection)
+
+    def do_commit_twophase(
+        self, connection, xid, is_prepared=True, recover=False
+    ):
+        if is_prepared:
+            self._do_prepared_twophase(
+                connection, f"COMMIT PREPARED '{xid}'", recover=recover
+            )
+        else:
+            self.do_commit(connection.connection)
+
+    @util.memoized_property
+    def _dialect_specific_select_one(self):
+        return ";"
+
+
+class AsyncAdapt_psycopg_cursor:
+    __slots__ = ("_cursor", "await_", "_rows")
+
+    _psycopg_ExecStatus = None
+
+    def __init__(self, cursor, await_) -> None:
+        self._cursor = cursor
+        self.await_ = await_
+        self._rows = deque()
+
+    def __getattr__(self, name):
+        return getattr(self._cursor, name)
+
+    @property
+    def arraysize(self):
+        return self._cursor.arraysize
+
+    @arraysize.setter
+    def arraysize(self, value):
+        self._cursor.arraysize = value
+
+    def close(self):
+        self._rows.clear()
+        # Normal cursor just call _close() in a non-sync way.
+        self._cursor._close()
+
+    def execute(self, query, params=None, **kw):
+        result = self.await_(self._cursor.execute(query, params, **kw))
+        # sqlalchemy result is not async, so need to pull all rows here
+        res = self._cursor.pgresult
+
+        # don't rely on psycopg providing enum symbols, compare with
+        # eq/ne
+        if res and res.status == self._psycopg_ExecStatus.TUPLES_OK:
+            rows = self.await_(self._cursor.fetchall())
+            self._rows = deque(rows)
+        return result
+
+    def executemany(self, query, params_seq):
+        return self.await_(self._cursor.executemany(query, params_seq))
+
+    def __iter__(self):
+        while self._rows:
+            yield self._rows.popleft()
+
+    def fetchone(self):
+        if self._rows:
+            return self._rows.popleft()
+        else:
+            return None
+
+    def fetchmany(self, size=None):
+        if size is None:
+            size = self._cursor.arraysize
+
+        rr = self._rows
+        return [rr.popleft() for _ in range(min(size, len(rr)))]
+
+    def fetchall(self):
+        retval = list(self._rows)
+        self._rows.clear()
+        return retval
+
+
+class AsyncAdapt_psycopg_ss_cursor(AsyncAdapt_psycopg_cursor):
+    def execute(self, query, params=None, **kw):
+        self.await_(self._cursor.execute(query, params, **kw))
+        return self
+
+    def close(self):
+        self.await_(self._cursor.close())
+
+    def fetchone(self):
+        return self.await_(self._cursor.fetchone())
+
+    def fetchmany(self, size=0):
+        return self.await_(self._cursor.fetchmany(size))
+
+    def fetchall(self):
+        return self.await_(self._cursor.fetchall())
+
+    def __iter__(self):
+        iterator = self._cursor.__aiter__()
+        while True:
+            try:
+                yield self.await_(iterator.__anext__())
+            except StopAsyncIteration:
+                break
+
+
+class AsyncAdapt_psycopg_connection(AdaptedConnection):
+    _connection: AsyncConnection
+    __slots__ = ()
+    await_ = staticmethod(await_only)
+
+    def __init__(self, connection) -> None:
+        self._connection = connection
+
+    def __getattr__(self, name):
+        return getattr(self._connection, name)
+
+    def execute(self, query, params=None, **kw):
+        cursor = self.await_(self._connection.execute(query, params, **kw))
+        return AsyncAdapt_psycopg_cursor(cursor, self.await_)
+
+    def cursor(self, *args, **kw):
+        cursor = self._connection.cursor(*args, **kw)
+        if hasattr(cursor, "name"):
+            return AsyncAdapt_psycopg_ss_cursor(cursor, self.await_)
+        else:
+            return AsyncAdapt_psycopg_cursor(cursor, self.await_)
+
+    def commit(self):
+        self.await_(self._connection.commit())
+
+    def rollback(self):
+        self.await_(self._connection.rollback())
+
+    def close(self):
+        self.await_(self._connection.close())
+
+    @property
+    def autocommit(self):
+        return self._connection.autocommit
+
+    @autocommit.setter
+    def autocommit(self, value):
+        self.set_autocommit(value)
+
+    def set_autocommit(self, value):
+        self.await_(self._connection.set_autocommit(value))
+
+    def set_isolation_level(self, value):
+        self.await_(self._connection.set_isolation_level(value))
+
+    def set_read_only(self, value):
+        self.await_(self._connection.set_read_only(value))
+
+    def set_deferrable(self, value):
+        self.await_(self._connection.set_deferrable(value))
+
+
+class AsyncAdaptFallback_psycopg_connection(AsyncAdapt_psycopg_connection):
+    __slots__ = ()
+    await_ = staticmethod(await_fallback)
+
+
+class PsycopgAdaptDBAPI:
+    def __init__(self, psycopg) -> None:
+        self.psycopg = psycopg
+
+        for k, v in self.psycopg.__dict__.items():
+            if k != "connect":
+                self.__dict__[k] = v
+
+    def connect(self, *arg, **kw):
+        async_fallback = kw.pop("async_fallback", False)
+        creator_fn = kw.pop(
+            "async_creator_fn", self.psycopg.AsyncConnection.connect
+        )
+        if util.asbool(async_fallback):
+            return AsyncAdaptFallback_psycopg_connection(
+                await_fallback(creator_fn(*arg, **kw))
+            )
+        else:
+            return AsyncAdapt_psycopg_connection(
+                await_only(creator_fn(*arg, **kw))
+            )
+
+
+class PGDialectAsync_psycopg(PGDialect_psycopg):
+    is_async = True
+    supports_statement_cache = True
+
+    @classmethod
+    def import_dbapi(cls):
+        import psycopg
+        from psycopg.pq import ExecStatus
+
+        AsyncAdapt_psycopg_cursor._psycopg_ExecStatus = ExecStatus
+
+        return PsycopgAdaptDBAPI(psycopg)
+
+    @classmethod
+    def get_pool_class(cls, url):
+        async_fallback = url.query.get("async_fallback", False)
+
+        if util.asbool(async_fallback):
+            return pool.FallbackAsyncAdaptedQueuePool
+        else:
+            return pool.AsyncAdaptedQueuePool
+
+    def _type_info_fetch(self, connection, name):
+        from psycopg.types import TypeInfo
+
+        adapted = connection.connection
+        return adapted.await_(TypeInfo.fetch(adapted.driver_connection, name))
+
+    def _do_isolation_level(self, connection, autocommit, isolation_level):
+        connection.set_autocommit(autocommit)
+        connection.set_isolation_level(isolation_level)
+
+    def _do_autocommit(self, connection, value):
+        connection.set_autocommit(value)
+
+    def set_readonly(self, connection, value):
+        connection.set_read_only(value)
+
+    def set_deferrable(self, connection, value):
+        connection.set_deferrable(value)
+
+    def get_driver_connection(self, connection):
+        return connection._connection
+
+
+dialect = PGDialect_psycopg
+dialect_async = PGDialectAsync_psycopg
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py
new file mode 100644
index 00000000..eeb7604f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -0,0 +1,892 @@
+# dialects/postgresql/psycopg2.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+r"""
+.. dialect:: postgresql+psycopg2
+    :name: psycopg2
+    :dbapi: psycopg2
+    :connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
+    :url: https://pypi.org/project/psycopg2/
+
+.. _psycopg2_toplevel:
+
+psycopg2 Connect Arguments
+--------------------------
+
+Keyword arguments that are specific to the SQLAlchemy psycopg2 dialect
+may be passed to :func:`_sa.create_engine()`, and include the following:
+
+
+* ``isolation_level``: This option, available for all PostgreSQL dialects,
+  includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
+  dialect.   This option sets the **default** isolation level for the
+  connection that is set immediately upon connection to the database before
+  the connection is pooled.  This option is generally superseded by the more
+  modern :paramref:`_engine.Connection.execution_options.isolation_level`
+  execution option, detailed at :ref:`dbapi_autocommit`.
+
+  .. seealso::
+
+    :ref:`psycopg2_isolation_level`
+
+    :ref:`dbapi_autocommit`
+
+
+* ``client_encoding``: sets the client encoding in a libpq-agnostic way,
+  using psycopg2's ``set_client_encoding()`` method.
+
+  .. seealso::
+
+    :ref:`psycopg2_unicode`
+
+
+* ``executemany_mode``, ``executemany_batch_page_size``,
+  ``executemany_values_page_size``: Allows use of psycopg2
+  extensions for optimizing "executemany"-style queries.  See the referenced
+  section below for details.
+
+  .. seealso::
+
+    :ref:`psycopg2_executemany_mode`
+
+.. tip::
+
+    The above keyword arguments are **dialect** keyword arguments, meaning
+    that they are passed as explicit keyword arguments to :func:`_sa.create_engine()`::
+
+        engine = create_engine(
+            "postgresql+psycopg2://scott:tiger@localhost/test",
+            isolation_level="SERIALIZABLE",
+        )
+
+    These should not be confused with **DBAPI** connect arguments, which
+    are passed as part of the :paramref:`_sa.create_engine.connect_args`
+    dictionary and/or are passed in the URL query string, as detailed in
+    the section :ref:`custom_dbapi_args`.
+
+.. _psycopg2_ssl:
+
+SSL Connections
+---------------
+
+The psycopg2 module has a connection argument named ``sslmode`` for
+controlling its behavior regarding secure (SSL) connections. The default is
+``sslmode=prefer``; it will attempt an SSL connection and if that fails it
+will fall back to an unencrypted connection. ``sslmode=require`` may be used
+to ensure that only secure connections are established.  Consult the
+psycopg2 / libpq documentation for further options that are available.
+
+Note that ``sslmode`` is specific to psycopg2 so it is included in the
+connection URI::
+
+    engine = sa.create_engine(
+        "postgresql+psycopg2://scott:tiger@192.168.0.199:5432/test?sslmode=require"
+    )
+
+Unix Domain Connections
+------------------------
+
+psycopg2 supports connecting via Unix domain connections.   When the ``host``
+portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
+which specifies Unix-domain communication rather than TCP/IP communication::
+
+    create_engine("postgresql+psycopg2://user:password@/dbname")
+
+By default, the socket file used is to connect to a Unix-domain socket
+in ``/tmp``, or whatever socket directory was specified when PostgreSQL
+was built.  This value can be overridden by passing a pathname to psycopg2,
+using ``host`` as an additional keyword argument::
+
+    create_engine(
+        "postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql"
+    )
+
+.. warning::  The format accepted here allows for a hostname in the main URL
+   in addition to the "host" query string argument.  **When using this URL
+   format, the initial host is silently ignored**.  That is, this URL::
+
+        engine = create_engine(
+            "postgresql+psycopg2://user:password@myhost1/dbname?host=myhost2"
+        )
+
+   Above, the hostname ``myhost1`` is **silently ignored and discarded.**  The
+   host which is connected is the ``myhost2`` host.
+
+   This is to maintain some degree of compatibility with PostgreSQL's own URL
+   format which has been tested to behave the same way and for which tools like
+   PifPaf hardcode two hostnames.
+
+.. seealso::
+
+    `PQconnectdbParams \
+    <https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
+
+.. _psycopg2_multi_host:
+
+Specifying multiple fallback hosts
+-----------------------------------
+
+psycopg2 supports multiple connection points in the connection string.
+When the ``host`` parameter is used multiple times in the query section of
+the URL, SQLAlchemy will create a single string of the host and port
+information provided to make the connections.  Tokens may consist of
+``host::port`` or just ``host``; in the latter case, the default port
+is selected by libpq.  In the example below, three host connections
+are specified, for ``HostA::PortA``, ``HostB`` connecting to the default port,
+and ``HostC::PortC``::
+
+    create_engine(
+        "postgresql+psycopg2://user:password@/dbname?host=HostA:PortA&host=HostB&host=HostC:PortC"
+    )
+
+As an alternative, libpq query string format also may be used; this specifies
+``host`` and ``port`` as single query string arguments with comma-separated
+lists - the default port can be chosen by indicating an empty value
+in the comma separated list::
+
+    create_engine(
+        "postgresql+psycopg2://user:password@/dbname?host=HostA,HostB,HostC&port=PortA,,PortC"
+    )
+
+With either URL style, connections to each host is attempted based on a
+configurable strategy, which may be configured using the libpq
+``target_session_attrs`` parameter.  Per libpq this defaults to ``any``
+which indicates a connection to each host is then attempted until a connection is successful.
+Other strategies include ``primary``, ``prefer-standby``, etc.  The complete
+list is documented by PostgreSQL at
+`libpq connection strings <https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING>`_.
+
+For example, to indicate two hosts using the ``primary`` strategy::
+
+    create_engine(
+        "postgresql+psycopg2://user:password@/dbname?host=HostA:PortA&host=HostB&host=HostC:PortC&target_session_attrs=primary"
+    )
+
+.. versionchanged:: 1.4.40 Port specification in psycopg2 multiple host format
+   is repaired, previously ports were not correctly interpreted in this context.
+   libpq comma-separated format is also now supported.
+
+.. versionadded:: 1.3.20 Support for multiple hosts in PostgreSQL connection
+   string.
+
+.. seealso::
+
+    `libpq connection strings <https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING>`_ - please refer
+    to this section in the libpq documentation for complete background on multiple host support.
+
+
+Empty DSN Connections / Environment Variable Connections
+---------------------------------------------------------
+
+The psycopg2 DBAPI can connect to PostgreSQL by passing an empty DSN to the
+libpq client library, which by default indicates to connect to a localhost
+PostgreSQL database that is open for "trust" connections.  This behavior can be
+further tailored using a particular set of environment variables which are
+prefixed with ``PG_...``, which are  consumed by ``libpq`` to take the place of
+any or all elements of the connection string.
+
+For this form, the URL can be passed without any elements other than the
+initial scheme::
+
+    engine = create_engine("postgresql+psycopg2://")
+
+In the above form, a blank "dsn" string is passed to the ``psycopg2.connect()``
+function which in turn represents an empty DSN passed to libpq.
+
+.. versionadded:: 1.3.2 support for parameter-less connections with psycopg2.
+
+.. seealso::
+
+    `Environment Variables\
+    <https://www.postgresql.org/docs/current/libpq-envars.html>`_ -
+    PostgreSQL documentation on how to use ``PG_...``
+    environment variables for connections.
+
+.. _psycopg2_execution_options:
+
+Per-Statement/Connection Execution Options
+-------------------------------------------
+
+The following DBAPI-specific options are respected when used with
+:meth:`_engine.Connection.execution_options`,
+:meth:`.Executable.execution_options`,
+:meth:`_query.Query.execution_options`,
+in addition to those not specific to DBAPIs:
+
+* ``isolation_level`` - Set the transaction isolation level for the lifespan
+  of a :class:`_engine.Connection` (can only be set on a connection,
+  not a statement
+  or query).   See :ref:`psycopg2_isolation_level`.
+
+* ``stream_results`` - Enable or disable usage of psycopg2 server side
+  cursors - this feature makes use of "named" cursors in combination with
+  special result handling methods so that result rows are not fully buffered.
+  Defaults to False, meaning cursors are buffered by default.
+
+* ``max_row_buffer`` - when using ``stream_results``, an integer value that
+  specifies the maximum number of rows to buffer at a time.  This is
+  interpreted by the :class:`.BufferedRowCursorResult`, and if omitted the
+  buffer will grow to ultimately store 1000 rows at a time.
+
+  .. versionchanged:: 1.4  The ``max_row_buffer`` size can now be greater than
+     1000, and the buffer will grow to that size.
+
+.. _psycopg2_batch_mode:
+
+.. _psycopg2_executemany_mode:
+
+Psycopg2 Fast Execution Helpers
+-------------------------------
+
+Modern versions of psycopg2 include a feature known as
+`Fast Execution Helpers \
+<https://www.psycopg.org/docs/extras.html#fast-execution-helpers>`_, which
+have been shown in benchmarking to improve psycopg2's executemany()
+performance, primarily with INSERT statements, by at least
+an order of magnitude.
+
+SQLAlchemy implements a native form of the "insert many values"
+handler that will rewrite a single-row INSERT statement to accommodate for
+many values at once within an extended VALUES clause; this handler is
+equivalent to psycopg2's ``execute_values()`` handler; an overview of this
+feature and its configuration are at :ref:`engine_insertmanyvalues`.
+
+.. versionadded:: 2.0 Replaced psycopg2's ``execute_values()`` fast execution
+   helper with a native SQLAlchemy mechanism known as
+   :ref:`insertmanyvalues <engine_insertmanyvalues>`.
+
+The psycopg2 dialect retains the ability to use the psycopg2-specific
+``execute_batch()`` feature, although it is not expected that this is a widely
+used feature.  The use of this extension may be enabled using the
+``executemany_mode`` flag which may be passed to :func:`_sa.create_engine`::
+
+    engine = create_engine(
+        "postgresql+psycopg2://scott:tiger@host/dbname",
+        executemany_mode="values_plus_batch",
+    )
+
+Possible options for ``executemany_mode`` include:
+
+* ``values_only`` - this is the default value.  SQLAlchemy's native
+  :ref:`insertmanyvalues <engine_insertmanyvalues>` handler is used for qualifying
+  INSERT statements, assuming
+  :paramref:`_sa.create_engine.use_insertmanyvalues` is left at
+  its default value of ``True``.  This handler rewrites simple
+  INSERT statements to include multiple VALUES clauses so that many
+  parameter sets can be inserted with one statement.
+
+* ``'values_plus_batch'``- SQLAlchemy's native
+  :ref:`insertmanyvalues <engine_insertmanyvalues>` handler is used for qualifying
+  INSERT statements, assuming
+  :paramref:`_sa.create_engine.use_insertmanyvalues` is left at its default
+  value of ``True``. Then, psycopg2's ``execute_batch()`` handler is used for
+  qualifying UPDATE and DELETE statements when executed with multiple parameter
+  sets. When using this mode, the :attr:`_engine.CursorResult.rowcount`
+  attribute will not contain a value for executemany-style executions against
+  UPDATE and DELETE statements.
+
+.. versionchanged:: 2.0 Removed the ``'batch'`` and ``'None'`` options
+   from psycopg2 ``executemany_mode``.  Control over batching for INSERT
+   statements is now configured via the
+   :paramref:`_sa.create_engine.use_insertmanyvalues` engine-level parameter.
+
+The term "qualifying statements" refers to the statement being executed
+being a Core :func:`_expression.insert`, :func:`_expression.update`
+or :func:`_expression.delete` construct, and **not** a plain textual SQL
+string or one constructed using :func:`_expression.text`.  It also may **not** be
+a special "extension" statement such as an "ON CONFLICT" "upsert" statement.
+When using the ORM, all insert/update/delete statements used by the ORM flush process
+are qualifying.
+
+The "page size" for the psycopg2 "batch" strategy can be affected
+by using the ``executemany_batch_page_size`` parameter, which defaults to
+100.
+
+For the "insertmanyvalues" feature, the page size can be controlled using the
+:paramref:`_sa.create_engine.insertmanyvalues_page_size` parameter,
+which defaults to 1000.  An example of modifying both parameters
+is below::
+
+    engine = create_engine(
+        "postgresql+psycopg2://scott:tiger@host/dbname",
+        executemany_mode="values_plus_batch",
+        insertmanyvalues_page_size=5000,
+        executemany_batch_page_size=500,
+    )
+
+.. seealso::
+
+    :ref:`engine_insertmanyvalues` - background on "insertmanyvalues"
+
+    :ref:`tutorial_multiple_parameters` - General information on using the
+    :class:`_engine.Connection`
+    object to execute statements in such a way as to make
+    use of the DBAPI ``.executemany()`` method.
+
+
+.. _psycopg2_unicode:
+
+Unicode with Psycopg2
+----------------------
+
+The psycopg2 DBAPI driver supports Unicode data transparently.
+
+The client character encoding can be controlled for the psycopg2 dialect
+in the following ways:
+
+* For PostgreSQL 9.1 and above, the ``client_encoding`` parameter may be
+  passed in the database URL; this parameter is consumed by the underlying
+  ``libpq`` PostgreSQL client library::
+
+    engine = create_engine(
+        "postgresql+psycopg2://user:pass@host/dbname?client_encoding=utf8"
+    )
+
+  Alternatively, the above ``client_encoding`` value may be passed using
+  :paramref:`_sa.create_engine.connect_args` for programmatic establishment with
+  ``libpq``::
+
+    engine = create_engine(
+        "postgresql+psycopg2://user:pass@host/dbname",
+        connect_args={"client_encoding": "utf8"},
+    )
+
+* For all PostgreSQL versions, psycopg2 supports a client-side encoding
+  value that will be passed to database connections when they are first
+  established.  The SQLAlchemy psycopg2 dialect supports this using the
+  ``client_encoding`` parameter passed to :func:`_sa.create_engine`::
+
+      engine = create_engine(
+          "postgresql+psycopg2://user:pass@host/dbname", client_encoding="utf8"
+      )
+
+  .. tip:: The above ``client_encoding`` parameter admittedly is very similar
+      in appearance to usage of the parameter within the
+      :paramref:`_sa.create_engine.connect_args` dictionary; the difference
+      above is that the parameter is consumed by psycopg2 and is
+      passed to the database connection using ``SET client_encoding TO
+      'utf8'``; in the previously mentioned style, the parameter is instead
+      passed through psycopg2 and consumed by the ``libpq`` library.
+
+* A common way to set up client encoding with PostgreSQL databases is to
+  ensure it is configured within the server-side postgresql.conf file;
+  this is the recommended way to set encoding for a server that is
+  consistently of one encoding in all databases::
+
+    # postgresql.conf file
+
+    # client_encoding = sql_ascii # actually, defaults to database
+    # encoding
+    client_encoding = utf8
+
+Transactions
+------------
+
+The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
+
+.. _psycopg2_isolation_level:
+
+Psycopg2 Transaction Isolation Level
+-------------------------------------
+
+As discussed in :ref:`postgresql_isolation_level`,
+all PostgreSQL dialects support setting of transaction isolation level
+both via the ``isolation_level`` parameter passed to :func:`_sa.create_engine`
+,
+as well as the ``isolation_level`` argument used by
+:meth:`_engine.Connection.execution_options`.  When using the psycopg2 dialect
+, these
+options make use of psycopg2's ``set_isolation_level()`` connection method,
+rather than emitting a PostgreSQL directive; this is because psycopg2's
+API-level setting is always emitted at the start of each transaction in any
+case.
+
+The psycopg2 dialect supports these constants for isolation level:
+
+* ``READ COMMITTED``
+* ``READ UNCOMMITTED``
+* ``REPEATABLE READ``
+* ``SERIALIZABLE``
+* ``AUTOCOMMIT``
+
+.. seealso::
+
+    :ref:`postgresql_isolation_level`
+
+    :ref:`pg8000_isolation_level`
+
+
+NOTICE logging
+---------------
+
+The psycopg2 dialect will log PostgreSQL NOTICE messages
+via the ``sqlalchemy.dialects.postgresql`` logger.  When this logger
+is set to the ``logging.INFO`` level, notice messages will be logged::
+
+    import logging
+
+    logging.getLogger("sqlalchemy.dialects.postgresql").setLevel(logging.INFO)
+
+Above, it is assumed that logging is configured externally.  If this is not
+the case, configuration such as ``logging.basicConfig()`` must be utilized::
+
+    import logging
+
+    logging.basicConfig()  # log messages to stdout
+    logging.getLogger("sqlalchemy.dialects.postgresql").setLevel(logging.INFO)
+
+.. seealso::
+
+    `Logging HOWTO <https://docs.python.org/3/howto/logging.html>`_ - on the python.org website
+
+.. _psycopg2_hstore:
+
+HSTORE type
+------------
+
+The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
+the HSTORE type.   The SQLAlchemy psycopg2 dialect will enable this extension
+by default when psycopg2 version 2.4 or greater is used, and
+it is detected that the target database has the HSTORE type set up for use.
+In other words, when the dialect makes the first
+connection, a sequence like the following is performed:
+
+1. Request the available HSTORE oids using
+   ``psycopg2.extras.HstoreAdapter.get_oids()``.
+   If this function returns a list of HSTORE identifiers, we then determine
+   that the ``HSTORE`` extension is present.
+   This function is **skipped** if the version of psycopg2 installed is
+   less than version 2.4.
+
+2. If the ``use_native_hstore`` flag is at its default of ``True``, and
+   we've detected that ``HSTORE`` oids are available, the
+   ``psycopg2.extensions.register_hstore()`` extension is invoked for all
+   connections.
+
+The ``register_hstore()`` extension has the effect of **all Python
+dictionaries being accepted as parameters regardless of the type of target
+column in SQL**. The dictionaries are converted by this extension into a
+textual HSTORE expression.  If this behavior is not desired, disable the
+use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
+follows::
+
+    engine = create_engine(
+        "postgresql+psycopg2://scott:tiger@localhost/test",
+        use_native_hstore=False,
+    )
+
+The ``HSTORE`` type is **still supported** when the
+``psycopg2.extensions.register_hstore()`` extension is not used.  It merely
+means that the coercion between Python dictionaries and the HSTORE
+string format, on both the parameter side and the result side, will take
+place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
+which may be more performant.
+
+"""  # noqa
+from __future__ import annotations
+
+import collections.abc as collections_abc
+import logging
+import re
+from typing import cast
+
+from . import ranges
+from ._psycopg_common import _PGDialect_common_psycopg
+from ._psycopg_common import _PGExecutionContext_common_psycopg
+from .base import PGIdentifierPreparer
+from .json import JSON
+from .json import JSONB
+from ... import types as sqltypes
+from ... import util
+from ...util import FastIntFlag
+from ...util import parse_user_argument_for_enum
+
+logger = logging.getLogger("sqlalchemy.dialects.postgresql")
+
+
+class _PGJSON(JSON):
+    def result_processor(self, dialect, coltype):
+        return None
+
+
+class _PGJSONB(JSONB):
+    def result_processor(self, dialect, coltype):
+        return None
+
+
+class _Psycopg2Range(ranges.AbstractSingleRangeImpl):
+    _psycopg2_range_cls = "none"
+
+    def bind_processor(self, dialect):
+        psycopg2_Range = getattr(
+            cast(PGDialect_psycopg2, dialect)._psycopg2_extras,
+            self._psycopg2_range_cls,
+        )
+
+        def to_range(value):
+            if isinstance(value, ranges.Range):
+                value = psycopg2_Range(
+                    value.lower, value.upper, value.bounds, value.empty
+                )
+            return value
+
+        return to_range
+
+    def result_processor(self, dialect, coltype):
+        def to_range(value):
+            if value is not None:
+                value = ranges.Range(
+                    value._lower,
+                    value._upper,
+                    bounds=value._bounds if value._bounds else "[)",
+                    empty=not value._bounds,
+                )
+            return value
+
+        return to_range
+
+
+class _Psycopg2NumericRange(_Psycopg2Range):
+    _psycopg2_range_cls = "NumericRange"
+
+
+class _Psycopg2DateRange(_Psycopg2Range):
+    _psycopg2_range_cls = "DateRange"
+
+
+class _Psycopg2DateTimeRange(_Psycopg2Range):
+    _psycopg2_range_cls = "DateTimeRange"
+
+
+class _Psycopg2DateTimeTZRange(_Psycopg2Range):
+    _psycopg2_range_cls = "DateTimeTZRange"
+
+
+class PGExecutionContext_psycopg2(_PGExecutionContext_common_psycopg):
+    _psycopg2_fetched_rows = None
+
+    def post_exec(self):
+        self._log_notices(self.cursor)
+
+    def _log_notices(self, cursor):
+        # check also that notices is an iterable, after it's already
+        # established that we will be iterating through it.  This is to get
+        # around test suites such as SQLAlchemy's using a Mock object for
+        # cursor
+        if not cursor.connection.notices or not isinstance(
+            cursor.connection.notices, collections_abc.Iterable
+        ):
+            return
+
+        for notice in cursor.connection.notices:
+            # NOTICE messages have a
+            # newline character at the end
+            logger.info(notice.rstrip())
+
+        cursor.connection.notices[:] = []
+
+
+class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
+    pass
+
+
+class ExecutemanyMode(FastIntFlag):
+    EXECUTEMANY_VALUES = 0
+    EXECUTEMANY_VALUES_PLUS_BATCH = 1
+
+
+(
+    EXECUTEMANY_VALUES,
+    EXECUTEMANY_VALUES_PLUS_BATCH,
+) = ExecutemanyMode.__members__.values()
+
+
+class PGDialect_psycopg2(_PGDialect_common_psycopg):
+    driver = "psycopg2"
+
+    supports_statement_cache = True
+    supports_server_side_cursors = True
+
+    default_paramstyle = "pyformat"
+    # set to true based on psycopg2 version
+    supports_sane_multi_rowcount = False
+    execution_ctx_cls = PGExecutionContext_psycopg2
+    preparer = PGIdentifierPreparer_psycopg2
+    psycopg2_version = (0, 0)
+    use_insertmanyvalues_wo_returning = True
+
+    returns_native_bytes = False
+
+    _has_native_hstore = True
+
+    colspecs = util.update_copy(
+        _PGDialect_common_psycopg.colspecs,
+        {
+            JSON: _PGJSON,
+            sqltypes.JSON: _PGJSON,
+            JSONB: _PGJSONB,
+            ranges.INT4RANGE: _Psycopg2NumericRange,
+            ranges.INT8RANGE: _Psycopg2NumericRange,
+            ranges.NUMRANGE: _Psycopg2NumericRange,
+            ranges.DATERANGE: _Psycopg2DateRange,
+            ranges.TSRANGE: _Psycopg2DateTimeRange,
+            ranges.TSTZRANGE: _Psycopg2DateTimeTZRange,
+        },
+    )
+
+    def __init__(
+        self,
+        executemany_mode="values_only",
+        executemany_batch_page_size=100,
+        **kwargs,
+    ):
+        _PGDialect_common_psycopg.__init__(self, **kwargs)
+
+        if self._native_inet_types:
+            raise NotImplementedError(
+                "The psycopg2 dialect does not implement "
+                "ipaddress type handling; native_inet_types cannot be set "
+                "to ``True`` when using this dialect."
+            )
+
+        # Parse executemany_mode argument, allowing it to be only one of the
+        # symbol names
+        self.executemany_mode = parse_user_argument_for_enum(
+            executemany_mode,
+            {
+                EXECUTEMANY_VALUES: ["values_only"],
+                EXECUTEMANY_VALUES_PLUS_BATCH: ["values_plus_batch"],
+            },
+            "executemany_mode",
+        )
+
+        self.executemany_batch_page_size = executemany_batch_page_size
+
+        if self.dbapi and hasattr(self.dbapi, "__version__"):
+            m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
+            if m:
+                self.psycopg2_version = tuple(
+                    int(x) for x in m.group(1, 2, 3) if x is not None
+                )
+
+            if self.psycopg2_version < (2, 7):
+                raise ImportError(
+                    "psycopg2 version 2.7 or higher is required."
+                )
+
+    def initialize(self, connection):
+        super().initialize(connection)
+        self._has_native_hstore = (
+            self.use_native_hstore
+            and self._hstore_oids(connection.connection.dbapi_connection)
+            is not None
+        )
+
+        self.supports_sane_multi_rowcount = (
+            self.executemany_mode is not EXECUTEMANY_VALUES_PLUS_BATCH
+        )
+
+    @classmethod
+    def import_dbapi(cls):
+        import psycopg2
+
+        return psycopg2
+
+    @util.memoized_property
+    def _psycopg2_extensions(cls):
+        from psycopg2 import extensions
+
+        return extensions
+
+    @util.memoized_property
+    def _psycopg2_extras(cls):
+        from psycopg2 import extras
+
+        return extras
+
+    @util.memoized_property
+    def _isolation_lookup(self):
+        extensions = self._psycopg2_extensions
+        return {
+            "AUTOCOMMIT": extensions.ISOLATION_LEVEL_AUTOCOMMIT,
+            "READ COMMITTED": extensions.ISOLATION_LEVEL_READ_COMMITTED,
+            "READ UNCOMMITTED": extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
+            "REPEATABLE READ": extensions.ISOLATION_LEVEL_REPEATABLE_READ,
+            "SERIALIZABLE": extensions.ISOLATION_LEVEL_SERIALIZABLE,
+        }
+
+    def set_isolation_level(self, dbapi_connection, level):
+        dbapi_connection.set_isolation_level(self._isolation_lookup[level])
+
+    def set_readonly(self, connection, value):
+        connection.readonly = value
+
+    def get_readonly(self, connection):
+        return connection.readonly
+
+    def set_deferrable(self, connection, value):
+        connection.deferrable = value
+
+    def get_deferrable(self, connection):
+        return connection.deferrable
+
+    def on_connect(self):
+        extras = self._psycopg2_extras
+
+        fns = []
+        if self.client_encoding is not None:
+
+            def on_connect(dbapi_conn):
+                dbapi_conn.set_client_encoding(self.client_encoding)
+
+            fns.append(on_connect)
+
+        if self.dbapi:
+
+            def on_connect(dbapi_conn):
+                extras.register_uuid(None, dbapi_conn)
+
+            fns.append(on_connect)
+
+        if self.dbapi and self.use_native_hstore:
+
+            def on_connect(dbapi_conn):
+                hstore_oids = self._hstore_oids(dbapi_conn)
+                if hstore_oids is not None:
+                    oid, array_oid = hstore_oids
+                    kw = {"oid": oid}
+                    kw["array_oid"] = array_oid
+                    extras.register_hstore(dbapi_conn, **kw)
+
+            fns.append(on_connect)
+
+        if self.dbapi and self._json_deserializer:
+
+            def on_connect(dbapi_conn):
+                extras.register_default_json(
+                    dbapi_conn, loads=self._json_deserializer
+                )
+                extras.register_default_jsonb(
+                    dbapi_conn, loads=self._json_deserializer
+                )
+
+            fns.append(on_connect)
+
+        if fns:
+
+            def on_connect(dbapi_conn):
+                for fn in fns:
+                    fn(dbapi_conn)
+
+            return on_connect
+        else:
+            return None
+
+    def do_executemany(self, cursor, statement, parameters, context=None):
+        if self.executemany_mode is EXECUTEMANY_VALUES_PLUS_BATCH:
+            if self.executemany_batch_page_size:
+                kwargs = {"page_size": self.executemany_batch_page_size}
+            else:
+                kwargs = {}
+            self._psycopg2_extras.execute_batch(
+                cursor, statement, parameters, **kwargs
+            )
+        else:
+            cursor.executemany(statement, parameters)
+
+    def do_begin_twophase(self, connection, xid):
+        connection.connection.tpc_begin(xid)
+
+    def do_prepare_twophase(self, connection, xid):
+        connection.connection.tpc_prepare()
+
+    def _do_twophase(self, dbapi_conn, operation, xid, recover=False):
+        if recover:
+            if dbapi_conn.status != self._psycopg2_extensions.STATUS_READY:
+                dbapi_conn.rollback()
+            operation(xid)
+        else:
+            operation()
+
+    def do_rollback_twophase(
+        self, connection, xid, is_prepared=True, recover=False
+    ):
+        dbapi_conn = connection.connection.dbapi_connection
+        self._do_twophase(
+            dbapi_conn, dbapi_conn.tpc_rollback, xid, recover=recover
+        )
+
+    def do_commit_twophase(
+        self, connection, xid, is_prepared=True, recover=False
+    ):
+        dbapi_conn = connection.connection.dbapi_connection
+        self._do_twophase(
+            dbapi_conn, dbapi_conn.tpc_commit, xid, recover=recover
+        )
+
+    @util.memoized_instancemethod
+    def _hstore_oids(self, dbapi_connection):
+        extras = self._psycopg2_extras
+        oids = extras.HstoreAdapter.get_oids(dbapi_connection)
+        if oids is not None and oids[0]:
+            return oids[0:2]
+        else:
+            return None
+
+    def is_disconnect(self, e, connection, cursor):
+        if isinstance(e, self.dbapi.Error):
+            # check the "closed" flag.  this might not be
+            # present on old psycopg2 versions.   Also,
+            # this flag doesn't actually help in a lot of disconnect
+            # situations, so don't rely on it.
+            if getattr(connection, "closed", False):
+                return True
+
+            # checks based on strings.  in the case that .closed
+            # didn't cut it, fall back onto these.
+            str_e = str(e).partition("\n")[0]
+            for msg in self._is_disconnect_messages:
+                idx = str_e.find(msg)
+                if idx >= 0 and '"' not in str_e[:idx]:
+                    return True
+        return False
+
+    @util.memoized_property
+    def _is_disconnect_messages(self):
+        return (
+            # these error messages from libpq: interfaces/libpq/fe-misc.c
+            # and interfaces/libpq/fe-secure.c.
+            "terminating connection",
+            "closed the connection",
+            "connection not open",
+            "could not receive data from server",
+            "could not send data to server",
+            # psycopg2 client errors, psycopg2/connection.h,
+            # psycopg2/cursor.h
+            "connection already closed",
+            "cursor already closed",
+            # not sure where this path is originally from, it may
+            # be obsolete.   It really says "losed", not "closed".
+            "losed the connection unexpectedly",
+            # these can occur in newer SSL
+            "connection has been closed unexpectedly",
+            "SSL error: decryption failed or bad record mac",
+            "SSL SYSCALL error: Bad file descriptor",
+            "SSL SYSCALL error: EOF detected",
+            "SSL SYSCALL error: Operation timed out",
+            "SSL SYSCALL error: Bad address",
+            # This can occur in OpenSSL 1 when an unexpected EOF occurs.
+            # https://www.openssl.org/docs/man1.1.1/man3/SSL_get_error.html#BUGS
+            # It may also occur in newer OpenSSL for a non-recoverable I/O
+            # error as a result of a system call that does not set 'errno'
+            # in libc.
+            "SSL SYSCALL error: Success",
+        )
+
+
+dialect = PGDialect_psycopg2
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg2cffi.py
new file mode 100644
index 00000000..55e17607
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/psycopg2cffi.py
@@ -0,0 +1,61 @@
+# dialects/postgresql/psycopg2cffi.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: ignore-errors
+
+r"""
+.. dialect:: postgresql+psycopg2cffi
+    :name: psycopg2cffi
+    :dbapi: psycopg2cffi
+    :connectstring: postgresql+psycopg2cffi://user:password@host:port/dbname[?key=value&key=value...]
+    :url: https://pypi.org/project/psycopg2cffi/
+
+``psycopg2cffi`` is an adaptation of ``psycopg2``, using CFFI for the C
+layer. This makes it suitable for use in e.g. PyPy. Documentation
+is as per ``psycopg2``.
+
+.. seealso::
+
+    :mod:`sqlalchemy.dialects.postgresql.psycopg2`
+
+"""  # noqa
+from .psycopg2 import PGDialect_psycopg2
+from ... import util
+
+
+class PGDialect_psycopg2cffi(PGDialect_psycopg2):
+    driver = "psycopg2cffi"
+    supports_unicode_statements = True
+    supports_statement_cache = True
+
+    # psycopg2cffi's first release is 2.5.0, but reports
+    # __version__ as 2.4.4.  Subsequent releases seem to have
+    # fixed this.
+
+    FEATURE_VERSION_MAP = dict(
+        native_json=(2, 4, 4),
+        native_jsonb=(2, 7, 1),
+        sane_multi_rowcount=(2, 4, 4),
+        array_oid=(2, 4, 4),
+        hstore_adapter=(2, 4, 4),
+    )
+
+    @classmethod
+    def import_dbapi(cls):
+        return __import__("psycopg2cffi")
+
+    @util.memoized_property
+    def _psycopg2_extensions(cls):
+        root = __import__("psycopg2cffi", fromlist=["extensions"])
+        return root.extensions
+
+    @util.memoized_property
+    def _psycopg2_extras(cls):
+        root = __import__("psycopg2cffi", fromlist=["extras"])
+        return root.extras
+
+
+dialect = PGDialect_psycopg2cffi
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/ranges.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/ranges.py
new file mode 100644
index 00000000..93253570
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/ranges.py
@@ -0,0 +1,1031 @@
+# dialects/postgresql/ranges.py
+# Copyright (C) 2013-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+from __future__ import annotations
+
+import dataclasses
+from datetime import date
+from datetime import datetime
+from datetime import timedelta
+from decimal import Decimal
+from typing import Any
+from typing import cast
+from typing import Generic
+from typing import List
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from .operators import ADJACENT_TO
+from .operators import CONTAINED_BY
+from .operators import CONTAINS
+from .operators import NOT_EXTEND_LEFT_OF
+from .operators import NOT_EXTEND_RIGHT_OF
+from .operators import OVERLAP
+from .operators import STRICTLY_LEFT_OF
+from .operators import STRICTLY_RIGHT_OF
+from ... import types as sqltypes
+from ...sql import operators
+from ...sql.type_api import TypeEngine
+from ...util import py310
+from ...util.typing import Literal
+
+if TYPE_CHECKING:
+    from ...sql.elements import ColumnElement
+    from ...sql.type_api import _TE
+    from ...sql.type_api import TypeEngineMixin
+
+_T = TypeVar("_T", bound=Any)
+
+_BoundsType = Literal["()", "[)", "(]", "[]"]
+
+if py310:
+    dc_slots = {"slots": True}
+    dc_kwonly = {"kw_only": True}
+else:
+    dc_slots = {}
+    dc_kwonly = {}
+
+
+@dataclasses.dataclass(frozen=True, **dc_slots)
+class Range(Generic[_T]):
+    """Represent a PostgreSQL range.
+
+    E.g.::
+
+        r = Range(10, 50, bounds="()")
+
+    The calling style is similar to that of psycopg and psycopg2, in part
+    to allow easier migration from previous SQLAlchemy versions that used
+    these objects directly.
+
+    :param lower: Lower bound value, or None
+    :param upper: Upper bound value, or None
+    :param bounds: keyword-only, optional string value that is one of
+     ``"()"``, ``"[)"``, ``"(]"``, ``"[]"``.  Defaults to ``"[)"``.
+    :param empty: keyword-only, optional bool indicating this is an "empty"
+     range
+
+    .. versionadded:: 2.0
+
+    """
+
+    lower: Optional[_T] = None
+    """the lower bound"""
+
+    upper: Optional[_T] = None
+    """the upper bound"""
+
+    if TYPE_CHECKING:
+        bounds: _BoundsType = dataclasses.field(default="[)")
+        empty: bool = dataclasses.field(default=False)
+    else:
+        bounds: _BoundsType = dataclasses.field(default="[)", **dc_kwonly)
+        empty: bool = dataclasses.field(default=False, **dc_kwonly)
+
+    if not py310:
+
+        def __init__(
+            self,
+            lower: Optional[_T] = None,
+            upper: Optional[_T] = None,
+            *,
+            bounds: _BoundsType = "[)",
+            empty: bool = False,
+        ):
+            # no __slots__ either so we can update dict
+            self.__dict__.update(
+                {
+                    "lower": lower,
+                    "upper": upper,
+                    "bounds": bounds,
+                    "empty": empty,
+                }
+            )
+
+    def __bool__(self) -> bool:
+        return not self.empty
+
+    @property
+    def isempty(self) -> bool:
+        "A synonym for the 'empty' attribute."
+
+        return self.empty
+
+    @property
+    def is_empty(self) -> bool:
+        "A synonym for the 'empty' attribute."
+
+        return self.empty
+
+    @property
+    def lower_inc(self) -> bool:
+        """Return True if the lower bound is inclusive."""
+
+        return self.bounds[0] == "["
+
+    @property
+    def lower_inf(self) -> bool:
+        """Return True if this range is non-empty and lower bound is
+        infinite."""
+
+        return not self.empty and self.lower is None
+
+    @property
+    def upper_inc(self) -> bool:
+        """Return True if the upper bound is inclusive."""
+
+        return self.bounds[1] == "]"
+
+    @property
+    def upper_inf(self) -> bool:
+        """Return True if this range is non-empty and the upper bound is
+        infinite."""
+
+        return not self.empty and self.upper is None
+
+    @property
+    def __sa_type_engine__(self) -> AbstractSingleRange[_T]:
+        return AbstractSingleRange()
+
+    def _contains_value(self, value: _T) -> bool:
+        """Return True if this range contains the given value."""
+
+        if self.empty:
+            return False
+
+        if self.lower is None:
+            return self.upper is None or (
+                value < self.upper
+                if self.bounds[1] == ")"
+                else value <= self.upper
+            )
+
+        if self.upper is None:
+            return (  # type: ignore
+                value > self.lower
+                if self.bounds[0] == "("
+                else value >= self.lower
+            )
+
+        return (  # type: ignore
+            value > self.lower
+            if self.bounds[0] == "("
+            else value >= self.lower
+        ) and (
+            value < self.upper
+            if self.bounds[1] == ")"
+            else value <= self.upper
+        )
+
+    def _get_discrete_step(self) -> Any:
+        "Determine the “step” for this range, if it is a discrete one."
+
+        # See
+        # https://www.postgresql.org/docs/current/rangetypes.html#RANGETYPES-DISCRETE
+        # for the rationale
+
+        if isinstance(self.lower, int) or isinstance(self.upper, int):
+            return 1
+        elif isinstance(self.lower, datetime) or isinstance(
+            self.upper, datetime
+        ):
+            # This is required, because a `isinstance(datetime.now(), date)`
+            # is True
+            return None
+        elif isinstance(self.lower, date) or isinstance(self.upper, date):
+            return timedelta(days=1)
+        else:
+            return None
+
+    def _compare_edges(
+        self,
+        value1: Optional[_T],
+        bound1: str,
+        value2: Optional[_T],
+        bound2: str,
+        only_values: bool = False,
+    ) -> int:
+        """Compare two range bounds.
+
+        Return -1, 0 or 1 respectively when `value1` is less than,
+        equal to or greater than `value2`.
+
+        When `only_value` is ``True``, do not consider the *inclusivity*
+        of the edges, just their values.
+        """
+
+        value1_is_lower_bound = bound1 in {"[", "("}
+        value2_is_lower_bound = bound2 in {"[", "("}
+
+        # Infinite edges are equal when they are on the same side,
+        # otherwise a lower edge is considered less than the upper end
+        if value1 is value2 is None:
+            if value1_is_lower_bound == value2_is_lower_bound:
+                return 0
+            else:
+                return -1 if value1_is_lower_bound else 1
+        elif value1 is None:
+            return -1 if value1_is_lower_bound else 1
+        elif value2 is None:
+            return 1 if value2_is_lower_bound else -1
+
+        # Short path for trivial case
+        if bound1 == bound2 and value1 == value2:
+            return 0
+
+        value1_inc = bound1 in {"[", "]"}
+        value2_inc = bound2 in {"[", "]"}
+        step = self._get_discrete_step()
+
+        if step is not None:
+            # "Normalize" the two edges as '[)', to simplify successive
+            # logic when the range is discrete: otherwise we would need
+            # to handle the comparison between ``(0`` and ``[1`` that
+            # are equal when dealing with integers while for floats the
+            # former is lesser than the latter
+
+            if value1_is_lower_bound:
+                if not value1_inc:
+                    value1 += step
+                    value1_inc = True
+            else:
+                if value1_inc:
+                    value1 += step
+                    value1_inc = False
+            if value2_is_lower_bound:
+                if not value2_inc:
+                    value2 += step
+                    value2_inc = True
+            else:
+                if value2_inc:
+                    value2 += step
+                    value2_inc = False
+
+        if value1 < value2:  # type: ignore
+            return -1
+        elif value1 > value2:  # type: ignore
+            return 1
+        elif only_values:
+            return 0
+        else:
+            # Neither one is infinite but are equal, so we
+            # need to consider the respective inclusive/exclusive
+            # flag
+
+            if value1_inc and value2_inc:
+                return 0
+            elif not value1_inc and not value2_inc:
+                if value1_is_lower_bound == value2_is_lower_bound:
+                    return 0
+                else:
+                    return 1 if value1_is_lower_bound else -1
+            elif not value1_inc:
+                return 1 if value1_is_lower_bound else -1
+            elif not value2_inc:
+                return -1 if value2_is_lower_bound else 1
+            else:
+                return 0
+
+    def __eq__(self, other: Any) -> bool:
+        """Compare this range to the `other` taking into account
+        bounds inclusivity, returning ``True`` if they are equal.
+        """
+
+        if not isinstance(other, Range):
+            return NotImplemented
+
+        if self.empty and other.empty:
+            return True
+        elif self.empty != other.empty:
+            return False
+
+        slower = self.lower
+        slower_b = self.bounds[0]
+        olower = other.lower
+        olower_b = other.bounds[0]
+        supper = self.upper
+        supper_b = self.bounds[1]
+        oupper = other.upper
+        oupper_b = other.bounds[1]
+
+        return (
+            self._compare_edges(slower, slower_b, olower, olower_b) == 0
+            and self._compare_edges(supper, supper_b, oupper, oupper_b) == 0
+        )
+
+    def contained_by(self, other: Range[_T]) -> bool:
+        "Determine whether this range is a contained by `other`."
+
+        # Any range contains the empty one
+        if self.empty:
+            return True
+
+        # An empty range does not contain any range except the empty one
+        if other.empty:
+            return False
+
+        slower = self.lower
+        slower_b = self.bounds[0]
+        olower = other.lower
+        olower_b = other.bounds[0]
+
+        if self._compare_edges(slower, slower_b, olower, olower_b) < 0:
+            return False
+
+        supper = self.upper
+        supper_b = self.bounds[1]
+        oupper = other.upper
+        oupper_b = other.bounds[1]
+
+        if self._compare_edges(supper, supper_b, oupper, oupper_b) > 0:
+            return False
+
+        return True
+
+    def contains(self, value: Union[_T, Range[_T]]) -> bool:
+        "Determine whether this range contains `value`."
+
+        if isinstance(value, Range):
+            return value.contained_by(self)
+        else:
+            return self._contains_value(value)
+
+    __contains__ = contains
+
+    def overlaps(self, other: Range[_T]) -> bool:
+        "Determine whether this range overlaps with `other`."
+
+        # Empty ranges never overlap with any other range
+        if self.empty or other.empty:
+            return False
+
+        slower = self.lower
+        slower_b = self.bounds[0]
+        supper = self.upper
+        supper_b = self.bounds[1]
+        olower = other.lower
+        olower_b = other.bounds[0]
+        oupper = other.upper
+        oupper_b = other.bounds[1]
+
+        # Check whether this lower bound is contained in the other range
+        if (
+            self._compare_edges(slower, slower_b, olower, olower_b) >= 0
+            and self._compare_edges(slower, slower_b, oupper, oupper_b) <= 0
+        ):
+            return True
+
+        # Check whether other lower bound is contained in this range
+        if (
+            self._compare_edges(olower, olower_b, slower, slower_b) >= 0
+            and self._compare_edges(olower, olower_b, supper, supper_b) <= 0
+        ):
+            return True
+
+        return False
+
+    def strictly_left_of(self, other: Range[_T]) -> bool:
+        "Determine whether this range is completely to the left of `other`."
+
+        # Empty ranges are neither to left nor to the right of any other range
+        if self.empty or other.empty:
+            return False
+
+        supper = self.upper
+        supper_b = self.bounds[1]
+        olower = other.lower
+        olower_b = other.bounds[0]
+
+        # Check whether this upper edge is less than other's lower end
+        return self._compare_edges(supper, supper_b, olower, olower_b) < 0
+
+    __lshift__ = strictly_left_of
+
+    def strictly_right_of(self, other: Range[_T]) -> bool:
+        "Determine whether this range is completely to the right of `other`."
+
+        # Empty ranges are neither to left nor to the right of any other range
+        if self.empty or other.empty:
+            return False
+
+        slower = self.lower
+        slower_b = self.bounds[0]
+        oupper = other.upper
+        oupper_b = other.bounds[1]
+
+        # Check whether this lower edge is greater than other's upper end
+        return self._compare_edges(slower, slower_b, oupper, oupper_b) > 0
+
+    __rshift__ = strictly_right_of
+
+    def not_extend_left_of(self, other: Range[_T]) -> bool:
+        "Determine whether this does not extend to the left of `other`."
+
+        # Empty ranges are neither to left nor to the right of any other range
+        if self.empty or other.empty:
+            return False
+
+        slower = self.lower
+        slower_b = self.bounds[0]
+        olower = other.lower
+        olower_b = other.bounds[0]
+
+        # Check whether this lower edge is not less than other's lower end
+        return self._compare_edges(slower, slower_b, olower, olower_b) >= 0
+
+    def not_extend_right_of(self, other: Range[_T]) -> bool:
+        "Determine whether this does not extend to the right of `other`."
+
+        # Empty ranges are neither to left nor to the right of any other range
+        if self.empty or other.empty:
+            return False
+
+        supper = self.upper
+        supper_b = self.bounds[1]
+        oupper = other.upper
+        oupper_b = other.bounds[1]
+
+        # Check whether this upper edge is not greater than other's upper end
+        return self._compare_edges(supper, supper_b, oupper, oupper_b) <= 0
+
+    def _upper_edge_adjacent_to_lower(
+        self,
+        value1: Optional[_T],
+        bound1: str,
+        value2: Optional[_T],
+        bound2: str,
+    ) -> bool:
+        """Determine whether an upper bound is immediately successive to a
+        lower bound."""
+
+        # Since we need a peculiar way to handle the bounds inclusivity,
+        # just do a comparison by value here
+        res = self._compare_edges(value1, bound1, value2, bound2, True)
+        if res == -1:
+            step = self._get_discrete_step()
+            if step is None:
+                return False
+            if bound1 == "]":
+                if bound2 == "[":
+                    return value1 == value2 - step  # type: ignore
+                else:
+                    return value1 == value2
+            else:
+                if bound2 == "[":
+                    return value1 == value2
+                else:
+                    return value1 == value2 - step  # type: ignore
+        elif res == 0:
+            # Cover cases like [0,0] -|- [1,] and [0,2) -|- (1,3]
+            if (
+                bound1 == "]"
+                and bound2 == "["
+                or bound1 == ")"
+                and bound2 == "("
+            ):
+                step = self._get_discrete_step()
+                if step is not None:
+                    return True
+            return (
+                bound1 == ")"
+                and bound2 == "["
+                or bound1 == "]"
+                and bound2 == "("
+            )
+        else:
+            return False
+
+    def adjacent_to(self, other: Range[_T]) -> bool:
+        "Determine whether this range is adjacent to the `other`."
+
+        # Empty ranges are not adjacent to any other range
+        if self.empty or other.empty:
+            return False
+
+        slower = self.lower
+        slower_b = self.bounds[0]
+        supper = self.upper
+        supper_b = self.bounds[1]
+        olower = other.lower
+        olower_b = other.bounds[0]
+        oupper = other.upper
+        oupper_b = other.bounds[1]
+
+        return self._upper_edge_adjacent_to_lower(
+            supper, supper_b, olower, olower_b
+        ) or self._upper_edge_adjacent_to_lower(
+            oupper, oupper_b, slower, slower_b
+        )
+
+    def union(self, other: Range[_T]) -> Range[_T]:
+        """Compute the union of this range with the `other`.
+
+        This raises a ``ValueError`` exception if the two ranges are
+        "disjunct", that is neither adjacent nor overlapping.
+        """
+
+        # Empty ranges are "additive identities"
+        if self.empty:
+            return other
+        if other.empty:
+            return self
+
+        if not self.overlaps(other) and not self.adjacent_to(other):
+            raise ValueError(
+                "Adding non-overlapping and non-adjacent"
+                " ranges is not implemented"
+            )
+
+        slower = self.lower
+        slower_b = self.bounds[0]
+        supper = self.upper
+        supper_b = self.bounds[1]
+        olower = other.lower
+        olower_b = other.bounds[0]
+        oupper = other.upper
+        oupper_b = other.bounds[1]
+
+        if self._compare_edges(slower, slower_b, olower, olower_b) < 0:
+            rlower = slower
+            rlower_b = slower_b
+        else:
+            rlower = olower
+            rlower_b = olower_b
+
+        if self._compare_edges(supper, supper_b, oupper, oupper_b) > 0:
+            rupper = supper
+            rupper_b = supper_b
+        else:
+            rupper = oupper
+            rupper_b = oupper_b
+
+        return Range(
+            rlower, rupper, bounds=cast(_BoundsType, rlower_b + rupper_b)
+        )
+
+    def __add__(self, other: Range[_T]) -> Range[_T]:
+        return self.union(other)
+
+    def difference(self, other: Range[_T]) -> Range[_T]:
+        """Compute the difference between this range and the `other`.
+
+        This raises a ``ValueError`` exception if the two ranges are
+        "disjunct", that is neither adjacent nor overlapping.
+        """
+
+        # Subtracting an empty range is a no-op
+        if self.empty or other.empty:
+            return self
+
+        slower = self.lower
+        slower_b = self.bounds[0]
+        supper = self.upper
+        supper_b = self.bounds[1]
+        olower = other.lower
+        olower_b = other.bounds[0]
+        oupper = other.upper
+        oupper_b = other.bounds[1]
+
+        sl_vs_ol = self._compare_edges(slower, slower_b, olower, olower_b)
+        su_vs_ou = self._compare_edges(supper, supper_b, oupper, oupper_b)
+        if sl_vs_ol < 0 and su_vs_ou > 0:
+            raise ValueError(
+                "Subtracting a strictly inner range is not implemented"
+            )
+
+        sl_vs_ou = self._compare_edges(slower, slower_b, oupper, oupper_b)
+        su_vs_ol = self._compare_edges(supper, supper_b, olower, olower_b)
+
+        # If the ranges do not overlap, result is simply the first
+        if sl_vs_ou > 0 or su_vs_ol < 0:
+            return self
+
+        # If this range is completely contained by the other, result is empty
+        if sl_vs_ol >= 0 and su_vs_ou <= 0:
+            return Range(None, None, empty=True)
+
+        # If this range extends to the left of the other and ends in its
+        # middle
+        if sl_vs_ol <= 0 and su_vs_ol >= 0 and su_vs_ou <= 0:
+            rupper_b = ")" if olower_b == "[" else "]"
+            if (
+                slower_b != "["
+                and rupper_b != "]"
+                and self._compare_edges(slower, slower_b, olower, rupper_b)
+                == 0
+            ):
+                return Range(None, None, empty=True)
+            else:
+                return Range(
+                    slower,
+                    olower,
+                    bounds=cast(_BoundsType, slower_b + rupper_b),
+                )
+
+        # If this range starts in the middle of the other and extends to its
+        # right
+        if sl_vs_ol >= 0 and su_vs_ou >= 0 and sl_vs_ou <= 0:
+            rlower_b = "(" if oupper_b == "]" else "["
+            if (
+                rlower_b != "["
+                and supper_b != "]"
+                and self._compare_edges(oupper, rlower_b, supper, supper_b)
+                == 0
+            ):
+                return Range(None, None, empty=True)
+            else:
+                return Range(
+                    oupper,
+                    supper,
+                    bounds=cast(_BoundsType, rlower_b + supper_b),
+                )
+
+        assert False, f"Unhandled case computing {self} - {other}"
+
+    def __sub__(self, other: Range[_T]) -> Range[_T]:
+        return self.difference(other)
+
+    def intersection(self, other: Range[_T]) -> Range[_T]:
+        """Compute the intersection of this range with the `other`.
+
+        .. versionadded:: 2.0.10
+
+        """
+        if self.empty or other.empty or not self.overlaps(other):
+            return Range(None, None, empty=True)
+
+        slower = self.lower
+        slower_b = self.bounds[0]
+        supper = self.upper
+        supper_b = self.bounds[1]
+        olower = other.lower
+        olower_b = other.bounds[0]
+        oupper = other.upper
+        oupper_b = other.bounds[1]
+
+        if self._compare_edges(slower, slower_b, olower, olower_b) < 0:
+            rlower = olower
+            rlower_b = olower_b
+        else:
+            rlower = slower
+            rlower_b = slower_b
+
+        if self._compare_edges(supper, supper_b, oupper, oupper_b) > 0:
+            rupper = oupper
+            rupper_b = oupper_b
+        else:
+            rupper = supper
+            rupper_b = supper_b
+
+        return Range(
+            rlower,
+            rupper,
+            bounds=cast(_BoundsType, rlower_b + rupper_b),
+        )
+
+    def __mul__(self, other: Range[_T]) -> Range[_T]:
+        return self.intersection(other)
+
+    def __str__(self) -> str:
+        return self._stringify()
+
+    def _stringify(self) -> str:
+        if self.empty:
+            return "empty"
+
+        l, r = self.lower, self.upper
+        l = "" if l is None else l  # type: ignore
+        r = "" if r is None else r  # type: ignore
+
+        b0, b1 = cast("Tuple[str, str]", self.bounds)
+
+        return f"{b0}{l},{r}{b1}"
+
+
+class MultiRange(List[Range[_T]]):
+    """Represents a multirange sequence.
+
+    This list subclass is an utility to allow automatic type inference of
+    the proper multi-range SQL type depending on the single range values.
+    This is useful when operating on literal multi-ranges::
+
+        import sqlalchemy as sa
+        from sqlalchemy.dialects.postgresql import MultiRange, Range
+
+        value = literal(MultiRange([Range(2, 4)]))
+
+        select(tbl).where(tbl.c.value.op("@")(MultiRange([Range(-3, 7)])))
+
+    .. versionadded:: 2.0.26
+
+    .. seealso::
+
+        - :ref:`postgresql_multirange_list_use`.
+    """
+
+    @property
+    def __sa_type_engine__(self) -> AbstractMultiRange[_T]:
+        return AbstractMultiRange()
+
+
+class AbstractRange(sqltypes.TypeEngine[_T]):
+    """Base class for single and multi Range SQL types."""
+
+    render_bind_cast = True
+
+    __abstract__ = True
+
+    @overload
+    def adapt(self, cls: Type[_TE], **kw: Any) -> _TE: ...
+
+    @overload
+    def adapt(
+        self, cls: Type[TypeEngineMixin], **kw: Any
+    ) -> TypeEngine[Any]: ...
+
+    def adapt(
+        self,
+        cls: Type[Union[TypeEngine[Any], TypeEngineMixin]],
+        **kw: Any,
+    ) -> TypeEngine[Any]:
+        """Dynamically adapt a range type to an abstract impl.
+
+        For example ``INT4RANGE().adapt(_Psycopg2NumericRange)`` should
+        produce a type that will have ``_Psycopg2NumericRange`` behaviors
+        and also render as ``INT4RANGE`` in SQL and DDL.
+
+        """
+        if (
+            issubclass(cls, (AbstractSingleRangeImpl, AbstractMultiRangeImpl))
+            and cls is not self.__class__
+        ):
+            # two ways to do this are:  1. create a new type on the fly
+            # or 2. have AbstractRangeImpl(visit_name) constructor and a
+            # visit_abstract_range_impl() method in the PG compiler.
+            # I'm choosing #1 as the resulting type object
+            # will then make use of the same mechanics
+            # as if we had made all these sub-types explicitly, and will
+            # also look more obvious under pdb etc.
+            # The adapt() operation here is cached per type-class-per-dialect,
+            # so is not much of a performance concern
+            visit_name = self.__visit_name__
+            return type(  # type: ignore
+                f"{visit_name}RangeImpl",
+                (cls, self.__class__),
+                {"__visit_name__": visit_name},
+            )()
+        else:
+            return super().adapt(cls)
+
+    class comparator_factory(TypeEngine.Comparator[Range[Any]]):
+        """Define comparison operations for range types."""
+
+        def contains(self, other: Any, **kw: Any) -> ColumnElement[bool]:
+            """Boolean expression. Returns true if the right hand operand,
+            which can be an element or a range, is contained within the
+            column.
+
+            kwargs may be ignored by this operator but are required for API
+            conformance.
+            """
+            return self.expr.operate(CONTAINS, other)
+
+        def contained_by(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression. Returns true if the column is contained
+            within the right hand operand.
+            """
+            return self.expr.operate(CONTAINED_BY, other)
+
+        def overlaps(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression. Returns true if the column overlaps
+            (has points in common with) the right hand operand.
+            """
+            return self.expr.operate(OVERLAP, other)
+
+        def strictly_left_of(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression. Returns true if the column is strictly
+            left of the right hand operand.
+            """
+            return self.expr.operate(STRICTLY_LEFT_OF, other)
+
+        __lshift__ = strictly_left_of
+
+        def strictly_right_of(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression. Returns true if the column is strictly
+            right of the right hand operand.
+            """
+            return self.expr.operate(STRICTLY_RIGHT_OF, other)
+
+        __rshift__ = strictly_right_of
+
+        def not_extend_right_of(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression. Returns true if the range in the column
+            does not extend right of the range in the operand.
+            """
+            return self.expr.operate(NOT_EXTEND_RIGHT_OF, other)
+
+        def not_extend_left_of(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression. Returns true if the range in the column
+            does not extend left of the range in the operand.
+            """
+            return self.expr.operate(NOT_EXTEND_LEFT_OF, other)
+
+        def adjacent_to(self, other: Any) -> ColumnElement[bool]:
+            """Boolean expression. Returns true if the range in the column
+            is adjacent to the range in the operand.
+            """
+            return self.expr.operate(ADJACENT_TO, other)
+
+        def union(self, other: Any) -> ColumnElement[bool]:
+            """Range expression. Returns the union of the two ranges.
+            Will raise an exception if the resulting range is not
+            contiguous.
+            """
+            return self.expr.operate(operators.add, other)
+
+        def difference(self, other: Any) -> ColumnElement[bool]:
+            """Range expression. Returns the union of the two ranges.
+            Will raise an exception if the resulting range is not
+            contiguous.
+            """
+            return self.expr.operate(operators.sub, other)
+
+        def intersection(self, other: Any) -> ColumnElement[Range[_T]]:
+            """Range expression. Returns the intersection of the two ranges.
+            Will raise an exception if the resulting range is not
+            contiguous.
+            """
+            return self.expr.operate(operators.mul, other)
+
+
+class AbstractSingleRange(AbstractRange[Range[_T]]):
+    """Base for PostgreSQL RANGE types.
+
+    These are types that return a single :class:`_postgresql.Range` object.
+
+    .. seealso::
+
+        `PostgreSQL range functions <https://www.postgresql.org/docs/current/static/functions-range.html>`_
+
+    """  # noqa: E501
+
+    __abstract__ = True
+
+    def _resolve_for_literal(self, value: Range[Any]) -> Any:
+        spec = value.lower if value.lower is not None else value.upper
+
+        if isinstance(spec, int):
+            # pg is unreasonably picky here: the query
+            # "select 1::INTEGER <@ '[1, 4)'::INT8RANGE" raises
+            # "operator does not exist: integer <@ int8range" as of pg 16
+            if _is_int32(value):
+                return INT4RANGE()
+            else:
+                return INT8RANGE()
+        elif isinstance(spec, (Decimal, float)):
+            return NUMRANGE()
+        elif isinstance(spec, datetime):
+            return TSRANGE() if not spec.tzinfo else TSTZRANGE()
+        elif isinstance(spec, date):
+            return DATERANGE()
+        else:
+            # empty Range, SQL datatype can't be determined here
+            return sqltypes.NULLTYPE
+
+
+class AbstractSingleRangeImpl(AbstractSingleRange[_T]):
+    """Marker for AbstractSingleRange that will apply a subclass-specific
+    adaptation"""
+
+
+class AbstractMultiRange(AbstractRange[Sequence[Range[_T]]]):
+    """Base for PostgreSQL MULTIRANGE types.
+
+    these are types that return a sequence of :class:`_postgresql.Range`
+    objects.
+
+    """
+
+    __abstract__ = True
+
+    def _resolve_for_literal(self, value: Sequence[Range[Any]]) -> Any:
+        if not value:
+            # empty MultiRange, SQL datatype can't be determined here
+            return sqltypes.NULLTYPE
+        first = value[0]
+        spec = first.lower if first.lower is not None else first.upper
+
+        if isinstance(spec, int):
+            # pg is unreasonably picky here: the query
+            # "select 1::INTEGER <@ '{[1, 4),[6,19)}'::INT8MULTIRANGE" raises
+            # "operator does not exist: integer <@ int8multirange" as of pg 16
+            if all(_is_int32(r) for r in value):
+                return INT4MULTIRANGE()
+            else:
+                return INT8MULTIRANGE()
+        elif isinstance(spec, (Decimal, float)):
+            return NUMMULTIRANGE()
+        elif isinstance(spec, datetime):
+            return TSMULTIRANGE() if not spec.tzinfo else TSTZMULTIRANGE()
+        elif isinstance(spec, date):
+            return DATEMULTIRANGE()
+        else:
+            # empty Range, SQL datatype can't be determined here
+            return sqltypes.NULLTYPE
+
+
+class AbstractMultiRangeImpl(AbstractMultiRange[_T]):
+    """Marker for AbstractMultiRange that will apply a subclass-specific
+    adaptation"""
+
+
+class INT4RANGE(AbstractSingleRange[int]):
+    """Represent the PostgreSQL INT4RANGE type."""
+
+    __visit_name__ = "INT4RANGE"
+
+
+class INT8RANGE(AbstractSingleRange[int]):
+    """Represent the PostgreSQL INT8RANGE type."""
+
+    __visit_name__ = "INT8RANGE"
+
+
+class NUMRANGE(AbstractSingleRange[Decimal]):
+    """Represent the PostgreSQL NUMRANGE type."""
+
+    __visit_name__ = "NUMRANGE"
+
+
+class DATERANGE(AbstractSingleRange[date]):
+    """Represent the PostgreSQL DATERANGE type."""
+
+    __visit_name__ = "DATERANGE"
+
+
+class TSRANGE(AbstractSingleRange[datetime]):
+    """Represent the PostgreSQL TSRANGE type."""
+
+    __visit_name__ = "TSRANGE"
+
+
+class TSTZRANGE(AbstractSingleRange[datetime]):
+    """Represent the PostgreSQL TSTZRANGE type."""
+
+    __visit_name__ = "TSTZRANGE"
+
+
+class INT4MULTIRANGE(AbstractMultiRange[int]):
+    """Represent the PostgreSQL INT4MULTIRANGE type."""
+
+    __visit_name__ = "INT4MULTIRANGE"
+
+
+class INT8MULTIRANGE(AbstractMultiRange[int]):
+    """Represent the PostgreSQL INT8MULTIRANGE type."""
+
+    __visit_name__ = "INT8MULTIRANGE"
+
+
+class NUMMULTIRANGE(AbstractMultiRange[Decimal]):
+    """Represent the PostgreSQL NUMMULTIRANGE type."""
+
+    __visit_name__ = "NUMMULTIRANGE"
+
+
+class DATEMULTIRANGE(AbstractMultiRange[date]):
+    """Represent the PostgreSQL DATEMULTIRANGE type."""
+
+    __visit_name__ = "DATEMULTIRANGE"
+
+
+class TSMULTIRANGE(AbstractMultiRange[datetime]):
+    """Represent the PostgreSQL TSRANGE type."""
+
+    __visit_name__ = "TSMULTIRANGE"
+
+
+class TSTZMULTIRANGE(AbstractMultiRange[datetime]):
+    """Represent the PostgreSQL TSTZRANGE type."""
+
+    __visit_name__ = "TSTZMULTIRANGE"
+
+
+_max_int_32 = 2**31 - 1
+_min_int_32 = -(2**31)
+
+
+def _is_int32(r: Range[int]) -> bool:
+    return (r.lower is None or _min_int_32 <= r.lower <= _max_int_32) and (
+        r.upper is None or _min_int_32 <= r.upper <= _max_int_32
+    )
diff --git a/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/types.py b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/types.py
new file mode 100644
index 00000000..1aed2bf4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sqlalchemy/dialects/postgresql/types.py
@@ -0,0 +1,313 @@
+# dialects/postgresql/types.py
+# Copyright (C) 2013-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+from __future__ import annotations
+
+import datetime as dt
+from typing import Any
+from typing import Optional
+from typing import overload
+from typing import Type
+from typing import TYPE_CHECKING
+from uuid import UUID as _python_UUID
+
+from ...sql import sqltypes
+from ...sql import type_api
+from ...util.typing import Literal
+
+if TYPE_CHECKING:
+    from ...engine.interfaces import Dialect
+    from ...sql.operators import OperatorType
+    from ...sql.type_api import _LiteralProcessorType
+    from ...sql.type_api import TypeEngine
+
+_DECIMAL_TYPES = (1231, 1700)
+_FLOAT_TYPES = (700, 701, 1021, 1022)
+_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
+
+
+class PGUuid(sqltypes.UUID[sqltypes._UUID_RETURN]):
+    render_bind_cast = True
+    render_literal_cast = True
+
+    if TYPE_CHECKING:
+
+        @overload
+        def __init__(
+            self: PGUuid[_python_UUID], as_uuid: Literal[True] = ...
+        ) -> None: ...
+
+        @overload
+        def __init__(
+            self: PGUuid[str], as_uuid: Literal[False] = ...
+        ) -> None: ...
+
+        def __init__(self, as_uuid: bool = True) -> None: ...
+
+
+class BYTEA(sqltypes.LargeBinary):
+    __visit_name__ = "BYTEA"
+
+
+class _NetworkAddressTypeMixin:
+
+    def coerce_compared_value(
+        self, op: Optional[OperatorType], value: Any
+    ) -> TypeEngine[Any]:
+        if TYPE_CHECKING:
+            assert isinstance(self, TypeEngine)
+        return self
+
+
+class INET(_NetworkAddressTypeMixin, sqltypes.TypeEngine[str]):
+    __visit_name__ = "INET"
+
+
+PGInet = INET
+
+
+class CIDR(_NetworkAddressTypeMixin, sqltypes.TypeEngine[str]):
+    __visit_name__ = "CIDR"
+
+
+PGCidr = CIDR
+
+
+class MACADDR(_NetworkAddressTypeMixin, sqltypes.TypeEngine[str]):
+    __visit_name__ = "MACADDR"
+
+
+PGMacAddr = MACADDR
+
+
+class MACADDR8(_NetworkAddressTypeMixin, sqltypes.TypeEngine[str]):
+    __visit_name__ = "MACADDR8"
+
+
+PGMacAddr8 = MACADDR8
+
+
+class MONEY(sqltypes.TypeEngine[str]):
+    r"""Provide the PostgreSQL MONEY type.
+
+    Depending on driver, result rows using this type may return a
+    string value which includes currency symbols.
+
+    For this reason, it may be preferable to provide conversion to a
+    numerically-based currency datatype using :class:`_types.TypeDecorator`::
+
+        import re
+        import decimal
+        from sqlalchemy import Dialect
+        from sqlalchemy import TypeDecorator
+
+
+        class NumericMoney(TypeDecorator):
+            impl = MONEY
+
+            def process_result_value(self, value: Any, dialect: Dialect) -> None:
+                if value is not None:
+                    # adjust this for the currency and numeric
+                    m = re.match(r"\$([\d.]+)", value)
+                    if m:
+                        value = decimal.Decimal(m.group(1))
+                return value
+
+    Alternatively, the conversion may be applied as a CAST using
+    the :meth:`_types.TypeDecorator.column_expression` method as follows::
+
+        import decimal
+        from sqlalchemy import cast
+        from sqlalchemy import TypeDecorator
+
+
+        class NumericMoney(TypeDecorator):
+            impl = MONEY
+
+            def column_expression(self, column: Any):
+                return cast(column, Numeric())
+
+    .. versionadded:: 1.2
+
+    """  # noqa: E501
+
+    __visit_name__ = "MONEY"
+
+
+class OID(sqltypes.TypeEngine[int]):
+    """Provide the PostgreSQL OID type."""
+
+    __visit_name__ = "OID"
+
+
+class REGCONFIG(sqltypes.TypeEngine[str]):
+    """Provide the PostgreSQL REGCONFIG type.
+
+    .. versionadded:: 2.0.0rc1
+
+    """
+
+    __visit_name__ = "REGCONFIG"
+
+
+class TSQUERY(sqltypes.TypeEngine[str]):
+    """Provide the PostgreSQL TSQUERY type.
+
+    .. versionadded:: 2.0.0rc1
+
+    """
+
+    __visit_name__ = "TSQUERY"
+
+
+class REGCLASS(sqltypes.TypeEngine[str]):
+    """Provide the PostgreSQL REGCLASS type.
+
+    .. versionadded:: 1.2.7
+
+    """
+
+    __visit_name__ = "REGCLASS"
+
+
+class TIMESTAMP(sqltypes.TIMESTAMP):
+    """Provide the PostgreSQL TIMESTAMP type."""
+
+    __visit_name__ = "TIMESTAMP"
+
+    def __init__(
+        self, timezone: bool = False, precision: Optional[int] = None
+    ) -> None:
+        """Construct a TIMESTAMP.
+
+        :param timezone: boolean value if timezone present, default False
+        :param precision: optional integer precision value
+
+         .. versionadded:: 1.4
+
+        """
+        super().__init__(timezone=timezone)
+        self.precision = precision
+
+
+class TIME(sqltypes.TIME):
+    """PostgreSQL TIME type."""
+
+    __visit_name__ = "TIME"
+
+    def __init__(
+        self, timezone: bool = False, precision: Optional[int] = None
+    ) -> None:
+        """Construct a TIME.
+
+        :param timezone: boolean value if timezone present, default False
+        :param precision: optional integer precision value
+
+         .. versionadded:: 1.4
+
+        """
+        super().__init__(timezone=timezone)
+        self.precision = precision
+
+
+class INTERVAL(type_api.NativeForEmulated, sqltypes._AbstractInterval):
+    """PostgreSQL INTERVAL type."""
+
+    __visit_name__ = "INTERVAL"
+    native = True
+
+    def __init__(
+        self, precision: Optional[int] = None, fields: Optional[str] = None
+    ) -> None:
+        """Construct an INTERVAL.
+
+        :param precision: optional integer precision value
+        :param fields: string fields specifier.  allows storage of fields
+         to be limited, such as ``"YEAR"``, ``"MONTH"``, ``"DAY TO HOUR"``,
+         etc.
+
+         .. versionadded:: 1.2
+
+        """
+        self.precision = precision
+        self.fields = fields
+
+    @classmethod
+    def adapt_emulated_to_native(
+        cls, interval: sqltypes.Interval, **kw: Any  # type: ignore[override]
+    ) -> INTERVAL:
+        return INTERVAL(precision=interval.second_precision)
+
+    @property
+    def _type_affinity(self) -> Type[sqltypes.Interval]:
+        return sqltypes.Interval
+
+    def as_generic(self, allow_nulltype: bool = False) -> sqltypes.Interval:
+        return sqltypes.Interval(native=True, second_precision=self.precision)
+
+    @property
+    def python_type(self) -> Type[dt.timedelta]:
+        return dt.timedelta
+
+    def literal_processor(
+        self, dialect: Dialect
+    ) -> Optional[_LiteralProcessorType[dt.timedelta]]:
+        def process(value: dt.timedelta) -> str:
+            return f"make_interval(secs=>{value.total_seconds()})"
+
+        return process
+
+
+PGInterval = INTERVAL
+
+
+class BIT(sqltypes.TypeEngine[int]):
+    __visit_name__ = "BIT"
+
+    def __init__(
+        self, length: Optional[int] = None, varying: bool = False
+    ) -> None:
+        if varying:
+            # BIT VARYING can be unlimited-length, so no default
+            self.length = length
+        else:
+            # BIT without VARYING defaults to length 1
+            self.length = length or 1
+        self.varying = varying
+
+
+PGBit = BIT
+
+
+class TSVECTOR(sqltypes.TypeEngine[str]):
+    """The :class:`_postgresql.TSVECTOR` type implements the PostgreSQL
+    text search type TSVECTOR.
+
+    It can be used to do full text queries on natural language
+    documents.
+
+    .. seealso::
+
+        :ref:`postgresql_match`
+
+    """
+
+    __visit_name__ = "TSVECTOR"
+
+
+class CITEXT(sqltypes.TEXT):
+    """Provide the PostgreSQL CITEXT type.
+
+    .. versionadded:: 2.0.7
+
+    """
+
+    __visit_name__ = "CITEXT"
+
+    def coerce_compared_value(
+        self, op: Optional[OperatorType], value: Any
+    ) -> TypeEngine[Any]:
+        return self