diff options
author | S. Solomon Darnell | 2025-03-28 21:52:21 -0500 |
---|---|---|
committer | S. Solomon Darnell | 2025-03-28 21:52:21 -0500 |
commit | 4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch) | |
tree | ee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/google | |
parent | cc961e04ba734dd72309fb548a2f97d67d578813 (diff) | |
download | gn-ai-master.tar.gz |
Diffstat (limited to '.venv/lib/python3.12/site-packages/google')
149 files changed, 62135 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/google/_upb/_message.abi3.so b/.venv/lib/python3.12/site-packages/google/_upb/_message.abi3.so Binary files differnew file mode 100644 index 00000000..8f9ed854 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/_upb/_message.abi3.so diff --git a/.venv/lib/python3.12/site-packages/google/auth/__init__.py b/.venv/lib/python3.12/site-packages/google/auth/__init__.py new file mode 100644 index 00000000..765bbd70 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/__init__.py @@ -0,0 +1,53 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Auth Library for Python.""" + +import logging +import sys +import warnings + +from google.auth import version as google_auth_version +from google.auth._default import ( + default, + load_credentials_from_dict, + load_credentials_from_file, +) + + +__version__ = google_auth_version.__version__ + + +__all__ = ["default", "load_credentials_from_file", "load_credentials_from_dict"] + + +class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER + """ + Deprecation warning raised when Python 3.7 runtime is detected. + Python 3.7 support will be dropped after January 1, 2024. + """ + + pass + + +# Checks if the current runtime is Python 3.7. +if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER + message = ( + "After January 1, 2024, new releases of this library will drop support " + "for Python 3.7." + ) + warnings.warn(message, Python37DeprecationWarning) + +# Set default logging handler to avoid "No handler found" warnings. +logging.getLogger(__name__).addHandler(logging.NullHandler()) diff --git a/.venv/lib/python3.12/site-packages/google/auth/_cloud_sdk.py b/.venv/lib/python3.12/site-packages/google/auth/_cloud_sdk.py new file mode 100644 index 00000000..a9441194 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_cloud_sdk.py @@ -0,0 +1,153 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for reading the Google Cloud SDK's configuration.""" + +import os +import subprocess + +from google.auth import _helpers +from google.auth import environment_vars +from google.auth import exceptions + + +# The ~/.config subdirectory containing gcloud credentials. +_CONFIG_DIRECTORY = "gcloud" +# Windows systems store config at %APPDATA%\gcloud +_WINDOWS_CONFIG_ROOT_ENV_VAR = "APPDATA" +# The name of the file in the Cloud SDK config that contains default +# credentials. +_CREDENTIALS_FILENAME = "application_default_credentials.json" +# The name of the Cloud SDK shell script +_CLOUD_SDK_POSIX_COMMAND = "gcloud" +_CLOUD_SDK_WINDOWS_COMMAND = "gcloud.cmd" +# The command to get the Cloud SDK configuration +_CLOUD_SDK_CONFIG_GET_PROJECT_COMMAND = ("config", "get", "project") +# The command to get google user access token +_CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND = ("auth", "print-access-token") +# Cloud SDK's application-default client ID +CLOUD_SDK_CLIENT_ID = ( + "764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com" +) + + +def get_config_path(): + """Returns the absolute path the the Cloud SDK's configuration directory. + + Returns: + str: The Cloud SDK config path. + """ + # If the path is explicitly set, return that. + try: + return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR] + except KeyError: + pass + + # Non-windows systems store this at ~/.config/gcloud + if os.name != "nt": + return os.path.join(os.path.expanduser("~"), ".config", _CONFIG_DIRECTORY) + # Windows systems store config at %APPDATA%\gcloud + else: + try: + return os.path.join( + os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR], _CONFIG_DIRECTORY + ) + except KeyError: + # This should never happen unless someone is really + # messing with things, but we'll cover the case anyway. + drive = os.environ.get("SystemDrive", "C:") + return os.path.join(drive, "\\", _CONFIG_DIRECTORY) + + +def get_application_default_credentials_path(): + """Gets the path to the application default credentials file. + + The path may or may not exist. + + Returns: + str: The full path to application default credentials. + """ + config_path = get_config_path() + return os.path.join(config_path, _CREDENTIALS_FILENAME) + + +def _run_subprocess_ignore_stderr(command): + """ Return subprocess.check_output with the given command and ignores stderr.""" + with open(os.devnull, "w") as devnull: + output = subprocess.check_output(command, stderr=devnull) + return output + + +def get_project_id(): + """Gets the project ID from the Cloud SDK. + + Returns: + Optional[str]: The project ID. + """ + if os.name == "nt": + command = _CLOUD_SDK_WINDOWS_COMMAND + else: + command = _CLOUD_SDK_POSIX_COMMAND + + try: + # Ignore the stderr coming from gcloud, so it won't be mixed into the output. + # https://github.com/googleapis/google-auth-library-python/issues/673 + project = _run_subprocess_ignore_stderr( + (command,) + _CLOUD_SDK_CONFIG_GET_PROJECT_COMMAND + ) + + # Turn bytes into a string and remove "\n" + project = _helpers.from_bytes(project).strip() + return project if project else None + except (subprocess.CalledProcessError, OSError, IOError): + return None + + +def get_auth_access_token(account=None): + """Load user access token with the ``gcloud auth print-access-token`` command. + + Args: + account (Optional[str]): Account to get the access token for. If not + specified, the current active account will be used. + + Returns: + str: The user access token. + + Raises: + google.auth.exceptions.UserAccessTokenError: if failed to get access + token from gcloud. + """ + if os.name == "nt": + command = _CLOUD_SDK_WINDOWS_COMMAND + else: + command = _CLOUD_SDK_POSIX_COMMAND + + try: + if account: + command = ( + (command,) + + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND + + ("--account=" + account,) + ) + else: + command = (command,) + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND + + access_token = subprocess.check_output(command, stderr=subprocess.STDOUT) + # remove the trailing "\n" + return access_token.decode("utf-8").strip() + except (subprocess.CalledProcessError, OSError, IOError) as caught_exc: + new_exc = exceptions.UserAccessTokenError( + "Failed to obtain access token", caught_exc + ) + raise new_exc from caught_exc diff --git a/.venv/lib/python3.12/site-packages/google/auth/_credentials_async.py b/.venv/lib/python3.12/site-packages/google/auth/_credentials_async.py new file mode 100644 index 00000000..760758d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_credentials_async.py @@ -0,0 +1,171 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Interfaces for credentials.""" + +import abc +import inspect + +from google.auth import credentials + + +class Credentials(credentials.Credentials, metaclass=abc.ABCMeta): + """Async inherited credentials class from google.auth.credentials. + The added functionality is the before_request call which requires + async/await syntax. + All credentials have a :attr:`token` that is used for authentication and + may also optionally set an :attr:`expiry` to indicate when the token will + no longer be valid. + + Most credentials will be :attr:`invalid` until :meth:`refresh` is called. + Credentials can do this automatically before the first HTTP request in + :meth:`before_request`. + + Although the token and expiration will change as the credentials are + :meth:`refreshed <refresh>` and used, credentials should be considered + immutable. Various credentials will accept configuration such as private + keys, scopes, and other options. These options are not changeable after + construction. Some classes will provide mechanisms to copy the credentials + with modifications such as :meth:`ScopedCredentials.with_scopes`. + """ + + async def before_request(self, request, method, url, headers): + """Performs credential-specific before request logic. + + Refreshes the credentials if necessary, then calls :meth:`apply` to + apply the token to the authentication header. + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + method (str): The request's HTTP method or the RPC method being + invoked. + url (str): The request's URI or the RPC service's URI. + headers (Mapping): The request's headers. + """ + # pylint: disable=unused-argument + # (Subclasses may use these arguments to ascertain information about + # the http request.) + + if not self.valid: + if inspect.iscoroutinefunction(self.refresh): + await self.refresh(request) + else: + self.refresh(request) + self.apply(headers) + + +class CredentialsWithQuotaProject(credentials.CredentialsWithQuotaProject): + """Abstract base for credentials supporting ``with_quota_project`` factory""" + + +class AnonymousCredentials(credentials.AnonymousCredentials, Credentials): + """Credentials that do not provide any authentication information. + + These are useful in the case of services that support anonymous access or + local service emulators that do not use credentials. This class inherits + from the sync anonymous credentials file, but is kept if async credentials + is initialized and we would like anonymous credentials. + """ + + +class ReadOnlyScoped(credentials.ReadOnlyScoped, metaclass=abc.ABCMeta): + """Interface for credentials whose scopes can be queried. + + OAuth 2.0-based credentials allow limiting access using scopes as described + in `RFC6749 Section 3.3`_. + If a credential class implements this interface then the credentials either + use scopes in their implementation. + + Some credentials require scopes in order to obtain a token. You can check + if scoping is necessary with :attr:`requires_scopes`:: + + if credentials.requires_scopes: + # Scoping is required. + credentials = _credentials_async.with_scopes(scopes=['one', 'two']) + + Credentials that require scopes must either be constructed with scopes:: + + credentials = SomeScopedCredentials(scopes=['one', 'two']) + + Or must copy an existing instance using :meth:`with_scopes`:: + + scoped_credentials = _credentials_async.with_scopes(scopes=['one', 'two']) + + Some credentials have scopes but do not allow or require scopes to be set, + these credentials can be used as-is. + + .. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3 + """ + + +class Scoped(credentials.Scoped): + """Interface for credentials whose scopes can be replaced while copying. + + OAuth 2.0-based credentials allow limiting access using scopes as described + in `RFC6749 Section 3.3`_. + If a credential class implements this interface then the credentials either + use scopes in their implementation. + + Some credentials require scopes in order to obtain a token. You can check + if scoping is necessary with :attr:`requires_scopes`:: + + if credentials.requires_scopes: + # Scoping is required. + credentials = _credentials_async.create_scoped(['one', 'two']) + + Credentials that require scopes must either be constructed with scopes:: + + credentials = SomeScopedCredentials(scopes=['one', 'two']) + + Or must copy an existing instance using :meth:`with_scopes`:: + + scoped_credentials = credentials.with_scopes(scopes=['one', 'two']) + + Some credentials have scopes but do not allow or require scopes to be set, + these credentials can be used as-is. + + .. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3 + """ + + +def with_scopes_if_required(credentials, scopes): + """Creates a copy of the credentials with scopes if scoping is required. + + This helper function is useful when you do not know (or care to know) the + specific type of credentials you are using (such as when you use + :func:`google.auth.default`). This function will call + :meth:`Scoped.with_scopes` if the credentials are scoped credentials and if + the credentials require scoping. Otherwise, it will return the credentials + as-is. + + Args: + credentials (google.auth.credentials.Credentials): The credentials to + scope if necessary. + scopes (Sequence[str]): The list of scopes to use. + + Returns: + google.auth._credentials_async.Credentials: Either a new set of scoped + credentials, or the passed in credentials instance if no scoping + was required. + """ + if isinstance(credentials, Scoped) and credentials.requires_scopes: + return credentials.with_scopes(scopes) + else: + return credentials + + +class Signing(credentials.Signing, metaclass=abc.ABCMeta): + """Interface for credentials that can cryptographically sign messages.""" diff --git a/.venv/lib/python3.12/site-packages/google/auth/_credentials_base.py b/.venv/lib/python3.12/site-packages/google/auth/_credentials_base.py new file mode 100644 index 00000000..64d5ce34 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_credentials_base.py @@ -0,0 +1,75 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Interface for base credentials.""" + +import abc + +from google.auth import _helpers + + +class _BaseCredentials(metaclass=abc.ABCMeta): + """Base class for all credentials. + + All credentials have a :attr:`token` that is used for authentication and + may also optionally set an :attr:`expiry` to indicate when the token will + no longer be valid. + + Most credentials will be :attr:`invalid` until :meth:`refresh` is called. + Credentials can do this automatically before the first HTTP request in + :meth:`before_request`. + + Although the token and expiration will change as the credentials are + :meth:`refreshed <refresh>` and used, credentials should be considered + immutable. Various credentials will accept configuration such as private + keys, scopes, and other options. These options are not changeable after + construction. Some classes will provide mechanisms to copy the credentials + with modifications such as :meth:`ScopedCredentials.with_scopes`. + + Attributes: + token (Optional[str]): The bearer token that can be used in HTTP headers to make + authenticated requests. + """ + + def __init__(self): + self.token = None + + @abc.abstractmethod + def refresh(self, request): + """Refreshes the access token. + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + + Raises: + google.auth.exceptions.RefreshError: If the credentials could + not be refreshed. + """ + # pylint: disable=missing-raises-doc + # (pylint doesn't recognize that this is abstract) + raise NotImplementedError("Refresh must be implemented") + + def _apply(self, headers, token=None): + """Apply the token to the authentication header. + + Args: + headers (Mapping): The HTTP request headers. + token (Optional[str]): If specified, overrides the current access + token. + """ + headers["authorization"] = "Bearer {}".format( + _helpers.from_bytes(token or self.token) + ) diff --git a/.venv/lib/python3.12/site-packages/google/auth/_default.py b/.venv/lib/python3.12/site-packages/google/auth/_default.py new file mode 100644 index 00000000..1234fb25 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_default.py @@ -0,0 +1,719 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Application default credentials. + +Implements application default credentials and project ID detection. +""" + +import io +import json +import logging +import os +import warnings + +from google.auth import environment_vars +from google.auth import exceptions +import google.auth.transport._http_client + +_LOGGER = logging.getLogger(__name__) + +# Valid types accepted for file-based credentials. +_AUTHORIZED_USER_TYPE = "authorized_user" +_SERVICE_ACCOUNT_TYPE = "service_account" +_EXTERNAL_ACCOUNT_TYPE = "external_account" +_EXTERNAL_ACCOUNT_AUTHORIZED_USER_TYPE = "external_account_authorized_user" +_IMPERSONATED_SERVICE_ACCOUNT_TYPE = "impersonated_service_account" +_GDCH_SERVICE_ACCOUNT_TYPE = "gdch_service_account" +_VALID_TYPES = ( + _AUTHORIZED_USER_TYPE, + _SERVICE_ACCOUNT_TYPE, + _EXTERNAL_ACCOUNT_TYPE, + _EXTERNAL_ACCOUNT_AUTHORIZED_USER_TYPE, + _IMPERSONATED_SERVICE_ACCOUNT_TYPE, + _GDCH_SERVICE_ACCOUNT_TYPE, +) + +# Help message when no credentials can be found. +_CLOUD_SDK_MISSING_CREDENTIALS = """\ +Your default credentials were not found. To set up Application Default Credentials, \ +see https://cloud.google.com/docs/authentication/external/set-up-adc for more information.\ +""" + +# Warning when using Cloud SDK user credentials +_CLOUD_SDK_CREDENTIALS_WARNING = """\ +Your application has authenticated using end user credentials from Google \ +Cloud SDK without a quota project. You might receive a "quota exceeded" \ +or "API not enabled" error. See the following page for troubleshooting: \ +https://cloud.google.com/docs/authentication/adc-troubleshooting/user-creds. \ +""" + +# The subject token type used for AWS external_account credentials. +_AWS_SUBJECT_TOKEN_TYPE = "urn:ietf:params:aws:token-type:aws4_request" + + +def _warn_about_problematic_credentials(credentials): + """Determines if the credentials are problematic. + + Credentials from the Cloud SDK that are associated with Cloud SDK's project + are problematic because they may not have APIs enabled and have limited + quota. If this is the case, warn about it. + """ + from google.auth import _cloud_sdk + + if credentials.client_id == _cloud_sdk.CLOUD_SDK_CLIENT_ID: + warnings.warn(_CLOUD_SDK_CREDENTIALS_WARNING) + + +def load_credentials_from_file( + filename, scopes=None, default_scopes=None, quota_project_id=None, request=None +): + """Loads Google credentials from a file. + + The credentials file must be a service account key, stored authorized + user credentials, external account credentials, or impersonated service + account credentials. + + .. warning:: + Important: If you accept a credential configuration (credential JSON/File/Stream) + from an external source for authentication to Google Cloud Platform, you must + validate it before providing it to any Google API or client library. Providing an + unvalidated credential configuration to Google APIs or libraries can compromise + the security of your systems and data. For more information, refer to + `Validate credential configurations from external sources`_. + + .. _Validate credential configurations from external sources: + https://cloud.google.com/docs/authentication/external/externally-sourced-credentials + + Args: + filename (str): The full path to the credentials file. + scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If + specified, the credentials will automatically be scoped if + necessary + default_scopes (Optional[Sequence[str]]): Default scopes passed by a + Google client library. Use 'scopes' for user-defined scopes. + quota_project_id (Optional[str]): The project ID used for + quota and billing. + request (Optional[google.auth.transport.Request]): An object used to make + HTTP requests. This is used to determine the associated project ID + for a workload identity pool resource (external account credentials). + If not specified, then it will use a + google.auth.transport.requests.Request client to make requests. + + Returns: + Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded + credentials and the project ID. Authorized user credentials do not + have the project ID information. External account credentials project + IDs may not always be determined. + + Raises: + google.auth.exceptions.DefaultCredentialsError: if the file is in the + wrong format or is missing. + """ + if not os.path.exists(filename): + raise exceptions.DefaultCredentialsError( + "File {} was not found.".format(filename) + ) + + with io.open(filename, "r") as file_obj: + try: + info = json.load(file_obj) + except ValueError as caught_exc: + new_exc = exceptions.DefaultCredentialsError( + "File {} is not a valid json file.".format(filename), caught_exc + ) + raise new_exc from caught_exc + return _load_credentials_from_info( + filename, info, scopes, default_scopes, quota_project_id, request + ) + + +def load_credentials_from_dict( + info, scopes=None, default_scopes=None, quota_project_id=None, request=None +): + """Loads Google credentials from a dict. + + The credentials file must be a service account key, stored authorized + user credentials, external account credentials, or impersonated service + account credentials. + + .. warning:: + Important: If you accept a credential configuration (credential JSON/File/Stream) + from an external source for authentication to Google Cloud Platform, you must + validate it before providing it to any Google API or client library. Providing an + unvalidated credential configuration to Google APIs or libraries can compromise + the security of your systems and data. For more information, refer to + `Validate credential configurations from external sources`_. + + .. _Validate credential configurations from external sources: + https://cloud.google.com/docs/authentication/external/externally-sourced-credentials + + Args: + info (Dict[str, Any]): A dict object containing the credentials + scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If + specified, the credentials will automatically be scoped if + necessary + default_scopes (Optional[Sequence[str]]): Default scopes passed by a + Google client library. Use 'scopes' for user-defined scopes. + quota_project_id (Optional[str]): The project ID used for + quota and billing. + request (Optional[google.auth.transport.Request]): An object used to make + HTTP requests. This is used to determine the associated project ID + for a workload identity pool resource (external account credentials). + If not specified, then it will use a + google.auth.transport.requests.Request client to make requests. + + Returns: + Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded + credentials and the project ID. Authorized user credentials do not + have the project ID information. External account credentials project + IDs may not always be determined. + + Raises: + google.auth.exceptions.DefaultCredentialsError: if the file is in the + wrong format or is missing. + """ + if not isinstance(info, dict): + raise exceptions.DefaultCredentialsError( + "info object was of type {} but dict type was expected.".format(type(info)) + ) + + return _load_credentials_from_info( + "dict object", info, scopes, default_scopes, quota_project_id, request + ) + + +def _load_credentials_from_info( + filename, info, scopes, default_scopes, quota_project_id, request +): + from google.auth.credentials import CredentialsWithQuotaProject + + credential_type = info.get("type") + + if credential_type == _AUTHORIZED_USER_TYPE: + credentials, project_id = _get_authorized_user_credentials( + filename, info, scopes + ) + + elif credential_type == _SERVICE_ACCOUNT_TYPE: + credentials, project_id = _get_service_account_credentials( + filename, info, scopes, default_scopes + ) + + elif credential_type == _EXTERNAL_ACCOUNT_TYPE: + credentials, project_id = _get_external_account_credentials( + info, + filename, + scopes=scopes, + default_scopes=default_scopes, + request=request, + ) + + elif credential_type == _EXTERNAL_ACCOUNT_AUTHORIZED_USER_TYPE: + credentials, project_id = _get_external_account_authorized_user_credentials( + filename, info, request + ) + + elif credential_type == _IMPERSONATED_SERVICE_ACCOUNT_TYPE: + credentials, project_id = _get_impersonated_service_account_credentials( + filename, info, scopes + ) + elif credential_type == _GDCH_SERVICE_ACCOUNT_TYPE: + credentials, project_id = _get_gdch_service_account_credentials(filename, info) + else: + raise exceptions.DefaultCredentialsError( + "The file {file} does not have a valid type. " + "Type is {type}, expected one of {valid_types}.".format( + file=filename, type=credential_type, valid_types=_VALID_TYPES + ) + ) + if isinstance(credentials, CredentialsWithQuotaProject): + credentials = _apply_quota_project_id(credentials, quota_project_id) + return credentials, project_id + + +def _get_gcloud_sdk_credentials(quota_project_id=None): + """Gets the credentials and project ID from the Cloud SDK.""" + from google.auth import _cloud_sdk + + _LOGGER.debug("Checking Cloud SDK credentials as part of auth process...") + + # Check if application default credentials exist. + credentials_filename = _cloud_sdk.get_application_default_credentials_path() + + if not os.path.isfile(credentials_filename): + _LOGGER.debug("Cloud SDK credentials not found on disk; not using them") + return None, None + + credentials, project_id = load_credentials_from_file( + credentials_filename, quota_project_id=quota_project_id + ) + credentials._cred_file_path = credentials_filename + + if not project_id: + project_id = _cloud_sdk.get_project_id() + + return credentials, project_id + + +def _get_explicit_environ_credentials(quota_project_id=None): + """Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment + variable.""" + from google.auth import _cloud_sdk + + cloud_sdk_adc_path = _cloud_sdk.get_application_default_credentials_path() + explicit_file = os.environ.get(environment_vars.CREDENTIALS) + + _LOGGER.debug( + "Checking %s for explicit credentials as part of auth process...", explicit_file + ) + + if explicit_file is not None and explicit_file == cloud_sdk_adc_path: + # Cloud sdk flow calls gcloud to fetch project id, so if the explicit + # file path is cloud sdk credentials path, then we should fall back + # to cloud sdk flow, otherwise project id cannot be obtained. + _LOGGER.debug( + "Explicit credentials path %s is the same as Cloud SDK credentials path, fall back to Cloud SDK credentials flow...", + explicit_file, + ) + return _get_gcloud_sdk_credentials(quota_project_id=quota_project_id) + + if explicit_file is not None: + credentials, project_id = load_credentials_from_file( + os.environ[environment_vars.CREDENTIALS], quota_project_id=quota_project_id + ) + credentials._cred_file_path = f"{explicit_file} file via the GOOGLE_APPLICATION_CREDENTIALS environment variable" + + return credentials, project_id + + else: + return None, None + + +def _get_gae_credentials(): + """Gets Google App Engine App Identity credentials and project ID.""" + # If not GAE gen1, prefer the metadata service even if the GAE APIs are + # available as per https://google.aip.dev/auth/4115. + if os.environ.get(environment_vars.LEGACY_APPENGINE_RUNTIME) != "python27": + return None, None + + # While this library is normally bundled with app_engine, there are + # some cases where it's not available, so we tolerate ImportError. + try: + _LOGGER.debug("Checking for App Engine runtime as part of auth process...") + import google.auth.app_engine as app_engine + except ImportError: + _LOGGER.warning("Import of App Engine auth library failed.") + return None, None + + try: + credentials = app_engine.Credentials() + project_id = app_engine.get_project_id() + return credentials, project_id + except EnvironmentError: + _LOGGER.debug( + "No App Engine library was found so cannot authentication via App Engine Identity Credentials." + ) + return None, None + + +def _get_gce_credentials(request=None, quota_project_id=None): + """Gets credentials and project ID from the GCE Metadata Service.""" + # Ping requires a transport, but we want application default credentials + # to require no arguments. So, we'll use the _http_client transport which + # uses http.client. This is only acceptable because the metadata server + # doesn't do SSL and never requires proxies. + + # While this library is normally bundled with compute_engine, there are + # some cases where it's not available, so we tolerate ImportError. + try: + from google.auth import compute_engine + from google.auth.compute_engine import _metadata + except ImportError: + _LOGGER.warning("Import of Compute Engine auth library failed.") + return None, None + + if request is None: + request = google.auth.transport._http_client.Request() + + if _metadata.is_on_gce(request=request): + # Get the project ID. + try: + project_id = _metadata.get_project_id(request=request) + except exceptions.TransportError: + project_id = None + + cred = compute_engine.Credentials() + cred = _apply_quota_project_id(cred, quota_project_id) + + return cred, project_id + else: + _LOGGER.warning( + "Authentication failed using Compute Engine authentication due to unavailable metadata server." + ) + return None, None + + +def _get_external_account_credentials( + info, filename, scopes=None, default_scopes=None, request=None +): + """Loads external account Credentials from the parsed external account info. + + The credentials information must correspond to a supported external account + credentials. + + Args: + info (Mapping[str, str]): The external account info in Google format. + filename (str): The full path to the credentials file. + scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If + specified, the credentials will automatically be scoped if + necessary. + default_scopes (Optional[Sequence[str]]): Default scopes passed by a + Google client library. Use 'scopes' for user-defined scopes. + request (Optional[google.auth.transport.Request]): An object used to make + HTTP requests. This is used to determine the associated project ID + for a workload identity pool resource (external account credentials). + If not specified, then it will use a + google.auth.transport.requests.Request client to make requests. + + Returns: + Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded + credentials and the project ID. External account credentials project + IDs may not always be determined. + + Raises: + google.auth.exceptions.DefaultCredentialsError: if the info dictionary + is in the wrong format or is missing required information. + """ + # There are currently 3 types of external_account credentials. + if info.get("subject_token_type") == _AWS_SUBJECT_TOKEN_TYPE: + # Check if configuration corresponds to an AWS credentials. + from google.auth import aws + + credentials = aws.Credentials.from_info( + info, scopes=scopes, default_scopes=default_scopes + ) + elif ( + info.get("credential_source") is not None + and info.get("credential_source").get("executable") is not None + ): + from google.auth import pluggable + + credentials = pluggable.Credentials.from_info( + info, scopes=scopes, default_scopes=default_scopes + ) + else: + try: + # Check if configuration corresponds to an Identity Pool credentials. + from google.auth import identity_pool + + credentials = identity_pool.Credentials.from_info( + info, scopes=scopes, default_scopes=default_scopes + ) + except ValueError: + # If the configuration is invalid or does not correspond to any + # supported external_account credentials, raise an error. + raise exceptions.DefaultCredentialsError( + "Failed to load external account credentials from {}".format(filename) + ) + if request is None: + import google.auth.transport.requests + + request = google.auth.transport.requests.Request() + + return credentials, credentials.get_project_id(request=request) + + +def _get_external_account_authorized_user_credentials( + filename, info, scopes=None, default_scopes=None, request=None +): + try: + from google.auth import external_account_authorized_user + + credentials = external_account_authorized_user.Credentials.from_info(info) + except ValueError: + raise exceptions.DefaultCredentialsError( + "Failed to load external account authorized user credentials from {}".format( + filename + ) + ) + + return credentials, None + + +def _get_authorized_user_credentials(filename, info, scopes=None): + from google.oauth2 import credentials + + try: + credentials = credentials.Credentials.from_authorized_user_info( + info, scopes=scopes + ) + except ValueError as caught_exc: + msg = "Failed to load authorized user credentials from {}".format(filename) + new_exc = exceptions.DefaultCredentialsError(msg, caught_exc) + raise new_exc from caught_exc + return credentials, None + + +def _get_service_account_credentials(filename, info, scopes=None, default_scopes=None): + from google.oauth2 import service_account + + try: + credentials = service_account.Credentials.from_service_account_info( + info, scopes=scopes, default_scopes=default_scopes + ) + except ValueError as caught_exc: + msg = "Failed to load service account credentials from {}".format(filename) + new_exc = exceptions.DefaultCredentialsError(msg, caught_exc) + raise new_exc from caught_exc + return credentials, info.get("project_id") + + +def _get_impersonated_service_account_credentials(filename, info, scopes): + from google.auth import impersonated_credentials + + try: + source_credentials_info = info.get("source_credentials") + source_credentials_type = source_credentials_info.get("type") + if source_credentials_type == _AUTHORIZED_USER_TYPE: + source_credentials, _ = _get_authorized_user_credentials( + filename, source_credentials_info + ) + elif source_credentials_type == _SERVICE_ACCOUNT_TYPE: + source_credentials, _ = _get_service_account_credentials( + filename, source_credentials_info + ) + elif source_credentials_type == _EXTERNAL_ACCOUNT_AUTHORIZED_USER_TYPE: + source_credentials, _ = _get_external_account_authorized_user_credentials( + filename, source_credentials_info + ) + else: + raise exceptions.InvalidType( + "source credential of type {} is not supported.".format( + source_credentials_type + ) + ) + impersonation_url = info.get("service_account_impersonation_url") + start_index = impersonation_url.rfind("/") + end_index = impersonation_url.find(":generateAccessToken") + if start_index == -1 or end_index == -1 or start_index > end_index: + raise exceptions.InvalidValue( + "Cannot extract target principal from {}".format(impersonation_url) + ) + target_principal = impersonation_url[start_index + 1 : end_index] + delegates = info.get("delegates") + quota_project_id = info.get("quota_project_id") + credentials = impersonated_credentials.Credentials( + source_credentials, + target_principal, + scopes, + delegates, + quota_project_id=quota_project_id, + ) + except ValueError as caught_exc: + msg = "Failed to load impersonated service account credentials from {}".format( + filename + ) + new_exc = exceptions.DefaultCredentialsError(msg, caught_exc) + raise new_exc from caught_exc + return credentials, None + + +def _get_gdch_service_account_credentials(filename, info): + from google.oauth2 import gdch_credentials + + try: + credentials = gdch_credentials.ServiceAccountCredentials.from_service_account_info( + info + ) + except ValueError as caught_exc: + msg = "Failed to load GDCH service account credentials from {}".format(filename) + new_exc = exceptions.DefaultCredentialsError(msg, caught_exc) + raise new_exc from caught_exc + return credentials, info.get("project") + + +def get_api_key_credentials(key): + """Return credentials with the given API key.""" + from google.auth import api_key + + return api_key.Credentials(key) + + +def _apply_quota_project_id(credentials, quota_project_id): + if quota_project_id: + credentials = credentials.with_quota_project(quota_project_id) + else: + credentials = credentials.with_quota_project_from_environment() + + from google.oauth2 import credentials as authorized_user_credentials + + if isinstance(credentials, authorized_user_credentials.Credentials) and ( + not credentials.quota_project_id + ): + _warn_about_problematic_credentials(credentials) + return credentials + + +def default(scopes=None, request=None, quota_project_id=None, default_scopes=None): + """Gets the default credentials for the current environment. + + `Application Default Credentials`_ provides an easy way to obtain + credentials to call Google APIs for server-to-server or local applications. + This function acquires credentials from the environment in the following + order: + + 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set + to the path of a valid service account JSON private key file, then it is + loaded and returned. The project ID returned is the project ID defined + in the service account file if available (some older files do not + contain project ID information). + + If the environment variable is set to the path of a valid external + account JSON configuration file (workload identity federation), then the + configuration file is used to determine and retrieve the external + credentials from the current environment (AWS, Azure, etc). + These will then be exchanged for Google access tokens via the Google STS + endpoint. + The project ID returned in this case is the one corresponding to the + underlying workload identity pool resource if determinable. + + If the environment variable is set to the path of a valid GDCH service + account JSON file (`Google Distributed Cloud Hosted`_), then a GDCH + credential will be returned. The project ID returned is the project + specified in the JSON file. + 2. If the `Google Cloud SDK`_ is installed and has application default + credentials set they are loaded and returned. + + To enable application default credentials with the Cloud SDK run:: + + gcloud auth application-default login + + If the Cloud SDK has an active project, the project ID is returned. The + active project can be set using:: + + gcloud config set project + + 3. If the application is running in the `App Engine standard environment`_ + (first generation) then the credentials and project ID from the + `App Identity Service`_ are used. + 4. If the application is running in `Compute Engine`_ or `Cloud Run`_ or + the `App Engine flexible environment`_ or the `App Engine standard + environment`_ (second generation) then the credentials and project ID + are obtained from the `Metadata Service`_. + 5. If no credentials are found, + :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised. + + .. _Application Default Credentials: https://developers.google.com\ + /identity/protocols/application-default-credentials + .. _Google Cloud SDK: https://cloud.google.com/sdk + .. _App Engine standard environment: https://cloud.google.com/appengine + .. _App Identity Service: https://cloud.google.com/appengine/docs/python\ + /appidentity/ + .. _Compute Engine: https://cloud.google.com/compute + .. _App Engine flexible environment: https://cloud.google.com\ + /appengine/flexible + .. _Metadata Service: https://cloud.google.com/compute/docs\ + /storing-retrieving-metadata + .. _Cloud Run: https://cloud.google.com/run + .. _Google Distributed Cloud Hosted: https://cloud.google.com/blog/topics\ + /hybrid-cloud/announcing-google-distributed-cloud-edge-and-hosted + + Example:: + + import google.auth + + credentials, project_id = google.auth.default() + + Args: + scopes (Sequence[str]): The list of scopes for the credentials. If + specified, the credentials will automatically be scoped if + necessary. + request (Optional[google.auth.transport.Request]): An object used to make + HTTP requests. This is used to either detect whether the application + is running on Compute Engine or to determine the associated project + ID for a workload identity pool resource (external account + credentials). If not specified, then it will either use the standard + library http client to make requests for Compute Engine credentials + or a google.auth.transport.requests.Request client for external + account credentials. + quota_project_id (Optional[str]): The project ID used for + quota and billing. + default_scopes (Optional[Sequence[str]]): Default scopes passed by a + Google client library. Use 'scopes' for user-defined scopes. + Returns: + Tuple[~google.auth.credentials.Credentials, Optional[str]]: + the current environment's credentials and project ID. Project ID + may be None, which indicates that the Project ID could not be + ascertained from the environment. + + Raises: + ~google.auth.exceptions.DefaultCredentialsError: + If no credentials were found, or if the credentials found were + invalid. + """ + from google.auth.credentials import with_scopes_if_required + from google.auth.credentials import CredentialsWithQuotaProject + + explicit_project_id = os.environ.get( + environment_vars.PROJECT, os.environ.get(environment_vars.LEGACY_PROJECT) + ) + + checkers = ( + # Avoid passing scopes here to prevent passing scopes to user credentials. + # with_scopes_if_required() below will ensure scopes/default scopes are + # safely set on the returned credentials since requires_scopes will + # guard against setting scopes on user credentials. + lambda: _get_explicit_environ_credentials(quota_project_id=quota_project_id), + lambda: _get_gcloud_sdk_credentials(quota_project_id=quota_project_id), + _get_gae_credentials, + lambda: _get_gce_credentials(request, quota_project_id=quota_project_id), + ) + + for checker in checkers: + credentials, project_id = checker() + if credentials is not None: + credentials = with_scopes_if_required( + credentials, scopes, default_scopes=default_scopes + ) + + effective_project_id = explicit_project_id or project_id + + # For external account credentials, scopes are required to determine + # the project ID. Try to get the project ID again if not yet + # determined. + if not effective_project_id and callable( + getattr(credentials, "get_project_id", None) + ): + if request is None: + import google.auth.transport.requests + + request = google.auth.transport.requests.Request() + effective_project_id = credentials.get_project_id(request=request) + + if quota_project_id and isinstance( + credentials, CredentialsWithQuotaProject + ): + credentials = credentials.with_quota_project(quota_project_id) + + if not effective_project_id: + _LOGGER.warning( + "No project ID could be determined. Consider running " + "`gcloud config set project` or setting the %s " + "environment variable", + environment_vars.PROJECT, + ) + return credentials, effective_project_id + + raise exceptions.DefaultCredentialsError(_CLOUD_SDK_MISSING_CREDENTIALS) diff --git a/.venv/lib/python3.12/site-packages/google/auth/_default_async.py b/.venv/lib/python3.12/site-packages/google/auth/_default_async.py new file mode 100644 index 00000000..2e53e208 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_default_async.py @@ -0,0 +1,282 @@ +# Copyright 2020 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Application default credentials. + +Implements application default credentials and project ID detection. +""" + +import io +import json +import os + +from google.auth import _default +from google.auth import environment_vars +from google.auth import exceptions + + +def load_credentials_from_file(filename, scopes=None, quota_project_id=None): + """Loads Google credentials from a file. + + The credentials file must be a service account key or stored authorized + user credentials. + + Args: + filename (str): The full path to the credentials file. + scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If + specified, the credentials will automatically be scoped if + necessary + quota_project_id (Optional[str]): The project ID used for + quota and billing. + + Returns: + Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded + credentials and the project ID. Authorized user credentials do not + have the project ID information. + + Raises: + google.auth.exceptions.DefaultCredentialsError: if the file is in the + wrong format or is missing. + """ + if not os.path.exists(filename): + raise exceptions.DefaultCredentialsError( + "File {} was not found.".format(filename) + ) + + with io.open(filename, "r") as file_obj: + try: + info = json.load(file_obj) + except ValueError as caught_exc: + new_exc = exceptions.DefaultCredentialsError( + "File {} is not a valid json file.".format(filename), caught_exc + ) + raise new_exc from caught_exc + + # The type key should indicate that the file is either a service account + # credentials file or an authorized user credentials file. + credential_type = info.get("type") + + if credential_type == _default._AUTHORIZED_USER_TYPE: + from google.oauth2 import _credentials_async as credentials + + try: + credentials = credentials.Credentials.from_authorized_user_info( + info, scopes=scopes + ) + except ValueError as caught_exc: + msg = "Failed to load authorized user credentials from {}".format(filename) + new_exc = exceptions.DefaultCredentialsError(msg, caught_exc) + raise new_exc from caught_exc + if quota_project_id: + credentials = credentials.with_quota_project(quota_project_id) + if not credentials.quota_project_id: + _default._warn_about_problematic_credentials(credentials) + return credentials, None + + elif credential_type == _default._SERVICE_ACCOUNT_TYPE: + from google.oauth2 import _service_account_async as service_account + + try: + credentials = service_account.Credentials.from_service_account_info( + info, scopes=scopes + ).with_quota_project(quota_project_id) + except ValueError as caught_exc: + msg = "Failed to load service account credentials from {}".format(filename) + new_exc = exceptions.DefaultCredentialsError(msg, caught_exc) + raise new_exc from caught_exc + return credentials, info.get("project_id") + + else: + raise exceptions.DefaultCredentialsError( + "The file {file} does not have a valid type. " + "Type is {type}, expected one of {valid_types}.".format( + file=filename, type=credential_type, valid_types=_default._VALID_TYPES + ) + ) + + +def _get_gcloud_sdk_credentials(quota_project_id=None): + """Gets the credentials and project ID from the Cloud SDK.""" + from google.auth import _cloud_sdk + + # Check if application default credentials exist. + credentials_filename = _cloud_sdk.get_application_default_credentials_path() + + if not os.path.isfile(credentials_filename): + return None, None + + credentials, project_id = load_credentials_from_file( + credentials_filename, quota_project_id=quota_project_id + ) + + if not project_id: + project_id = _cloud_sdk.get_project_id() + + return credentials, project_id + + +def _get_explicit_environ_credentials(quota_project_id=None): + """Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment + variable.""" + from google.auth import _cloud_sdk + + cloud_sdk_adc_path = _cloud_sdk.get_application_default_credentials_path() + explicit_file = os.environ.get(environment_vars.CREDENTIALS) + + if explicit_file is not None and explicit_file == cloud_sdk_adc_path: + # Cloud sdk flow calls gcloud to fetch project id, so if the explicit + # file path is cloud sdk credentials path, then we should fall back + # to cloud sdk flow, otherwise project id cannot be obtained. + return _get_gcloud_sdk_credentials(quota_project_id=quota_project_id) + + if explicit_file is not None: + credentials, project_id = load_credentials_from_file( + os.environ[environment_vars.CREDENTIALS], quota_project_id=quota_project_id + ) + + return credentials, project_id + + else: + return None, None + + +def _get_gae_credentials(): + """Gets Google App Engine App Identity credentials and project ID.""" + # While this library is normally bundled with app_engine, there are + # some cases where it's not available, so we tolerate ImportError. + + return _default._get_gae_credentials() + + +def _get_gce_credentials(request=None): + """Gets credentials and project ID from the GCE Metadata Service.""" + # Ping requires a transport, but we want application default credentials + # to require no arguments. So, we'll use the _http_client transport which + # uses http.client. This is only acceptable because the metadata server + # doesn't do SSL and never requires proxies. + + # While this library is normally bundled with compute_engine, there are + # some cases where it's not available, so we tolerate ImportError. + + return _default._get_gce_credentials(request) + + +def default_async(scopes=None, request=None, quota_project_id=None): + """Gets the default credentials for the current environment. + + `Application Default Credentials`_ provides an easy way to obtain + credentials to call Google APIs for server-to-server or local applications. + This function acquires credentials from the environment in the following + order: + + 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set + to the path of a valid service account JSON private key file, then it is + loaded and returned. The project ID returned is the project ID defined + in the service account file if available (some older files do not + contain project ID information). + 2. If the `Google Cloud SDK`_ is installed and has application default + credentials set they are loaded and returned. + + To enable application default credentials with the Cloud SDK run:: + + gcloud auth application-default login + + If the Cloud SDK has an active project, the project ID is returned. The + active project can be set using:: + + gcloud config set project + + 3. If the application is running in the `App Engine standard environment`_ + (first generation) then the credentials and project ID from the + `App Identity Service`_ are used. + 4. If the application is running in `Compute Engine`_ or `Cloud Run`_ or + the `App Engine flexible environment`_ or the `App Engine standard + environment`_ (second generation) then the credentials and project ID + are obtained from the `Metadata Service`_. + 5. If no credentials are found, + :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised. + + .. _Application Default Credentials: https://developers.google.com\ + /identity/protocols/application-default-credentials + .. _Google Cloud SDK: https://cloud.google.com/sdk + .. _App Engine standard environment: https://cloud.google.com/appengine + .. _App Identity Service: https://cloud.google.com/appengine/docs/python\ + /appidentity/ + .. _Compute Engine: https://cloud.google.com/compute + .. _App Engine flexible environment: https://cloud.google.com\ + /appengine/flexible + .. _Metadata Service: https://cloud.google.com/compute/docs\ + /storing-retrieving-metadata + .. _Cloud Run: https://cloud.google.com/run + + Example:: + + import google.auth + + credentials, project_id = google.auth.default() + + Args: + scopes (Sequence[str]): The list of scopes for the credentials. If + specified, the credentials will automatically be scoped if + necessary. + request (google.auth.transport.Request): An object used to make + HTTP requests. This is used to detect whether the application + is running on Compute Engine. If not specified, then it will + use the standard library http client to make requests. + quota_project_id (Optional[str]): The project ID used for + quota and billing. + Returns: + Tuple[~google.auth.credentials.Credentials, Optional[str]]: + the current environment's credentials and project ID. Project ID + may be None, which indicates that the Project ID could not be + ascertained from the environment. + + Raises: + ~google.auth.exceptions.DefaultCredentialsError: + If no credentials were found, or if the credentials found were + invalid. + """ + from google.auth._credentials_async import with_scopes_if_required + from google.auth.credentials import CredentialsWithQuotaProject + + explicit_project_id = os.environ.get( + environment_vars.PROJECT, os.environ.get(environment_vars.LEGACY_PROJECT) + ) + + checkers = ( + lambda: _get_explicit_environ_credentials(quota_project_id=quota_project_id), + lambda: _get_gcloud_sdk_credentials(quota_project_id=quota_project_id), + _get_gae_credentials, + lambda: _get_gce_credentials(request), + ) + + for checker in checkers: + credentials, project_id = checker() + if credentials is not None: + credentials = with_scopes_if_required(credentials, scopes) + if quota_project_id and isinstance( + credentials, CredentialsWithQuotaProject + ): + credentials = credentials.with_quota_project(quota_project_id) + effective_project_id = explicit_project_id or project_id + if not effective_project_id: + _default._LOGGER.warning( + "No project ID could be determined. Consider running " + "`gcloud config set project` or setting the %s " + "environment variable", + environment_vars.PROJECT, + ) + return credentials, effective_project_id + + raise exceptions.DefaultCredentialsError(_default._CLOUD_SDK_MISSING_CREDENTIALS) diff --git a/.venv/lib/python3.12/site-packages/google/auth/_exponential_backoff.py b/.venv/lib/python3.12/site-packages/google/auth/_exponential_backoff.py new file mode 100644 index 00000000..89853448 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_exponential_backoff.py @@ -0,0 +1,164 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import random +import time + +from google.auth import exceptions + +# The default amount of retry attempts +_DEFAULT_RETRY_TOTAL_ATTEMPTS = 3 + +# The default initial backoff period (1.0 second). +_DEFAULT_INITIAL_INTERVAL_SECONDS = 1.0 + +# The default randomization factor (0.1 which results in a random period ranging +# between 10% below and 10% above the retry interval). +_DEFAULT_RANDOMIZATION_FACTOR = 0.1 + +# The default multiplier value (2 which is 100% increase per back off). +_DEFAULT_MULTIPLIER = 2.0 + +"""Exponential Backoff Utility + +This is a private module that implements the exponential back off algorithm. +It can be used as a utility for code that needs to retry on failure, for example +an HTTP request. +""" + + +class _BaseExponentialBackoff: + """An exponential backoff iterator base class. + + Args: + total_attempts Optional[int]: + The maximum amount of retries that should happen. + The default value is 3 attempts. + initial_wait_seconds Optional[int]: + The amount of time to sleep in the first backoff. This parameter + should be in seconds. + The default value is 1 second. + randomization_factor Optional[float]: + The amount of jitter that should be in each backoff. For example, + a value of 0.1 will introduce a jitter range of 10% to the + current backoff period. + The default value is 0.1. + multiplier Optional[float]: + The backoff multipler. This adjusts how much each backoff will + increase. For example a value of 2.0 leads to a 200% backoff + on each attempt. If the initial_wait is 1.0 it would look like + this sequence [1.0, 2.0, 4.0, 8.0]. + The default value is 2.0. + """ + + def __init__( + self, + total_attempts=_DEFAULT_RETRY_TOTAL_ATTEMPTS, + initial_wait_seconds=_DEFAULT_INITIAL_INTERVAL_SECONDS, + randomization_factor=_DEFAULT_RANDOMIZATION_FACTOR, + multiplier=_DEFAULT_MULTIPLIER, + ): + if total_attempts < 1: + raise exceptions.InvalidValue( + f"total_attempts must be greater than or equal to 1 but was {total_attempts}" + ) + + self._total_attempts = total_attempts + self._initial_wait_seconds = initial_wait_seconds + + self._current_wait_in_seconds = self._initial_wait_seconds + + self._randomization_factor = randomization_factor + self._multiplier = multiplier + self._backoff_count = 0 + + @property + def total_attempts(self): + """The total amount of backoff attempts that will be made.""" + return self._total_attempts + + @property + def backoff_count(self): + """The current amount of backoff attempts that have been made.""" + return self._backoff_count + + def _reset(self): + self._backoff_count = 0 + self._current_wait_in_seconds = self._initial_wait_seconds + + def _calculate_jitter(self): + jitter_variance = self._current_wait_in_seconds * self._randomization_factor + jitter = random.uniform( + self._current_wait_in_seconds - jitter_variance, + self._current_wait_in_seconds + jitter_variance, + ) + + return jitter + + +class ExponentialBackoff(_BaseExponentialBackoff): + """An exponential backoff iterator. This can be used in a for loop to + perform requests with exponential backoff. + """ + + def __init__(self, *args, **kwargs): + super(ExponentialBackoff, self).__init__(*args, **kwargs) + + def __iter__(self): + self._reset() + return self + + def __next__(self): + if self._backoff_count >= self._total_attempts: + raise StopIteration + self._backoff_count += 1 + + if self._backoff_count <= 1: + return self._backoff_count + + jitter = self._calculate_jitter() + + time.sleep(jitter) + + self._current_wait_in_seconds *= self._multiplier + return self._backoff_count + + +class AsyncExponentialBackoff(_BaseExponentialBackoff): + """An async exponential backoff iterator. This can be used in a for loop to + perform async requests with exponential backoff. + """ + + def __init__(self, *args, **kwargs): + super(AsyncExponentialBackoff, self).__init__(*args, **kwargs) + + def __aiter__(self): + self._reset() + return self + + async def __anext__(self): + if self._backoff_count >= self._total_attempts: + raise StopAsyncIteration + self._backoff_count += 1 + + if self._backoff_count <= 1: + return self._backoff_count + + jitter = self._calculate_jitter() + + await asyncio.sleep(jitter) + + self._current_wait_in_seconds *= self._multiplier + return self._backoff_count diff --git a/.venv/lib/python3.12/site-packages/google/auth/_helpers.py b/.venv/lib/python3.12/site-packages/google/auth/_helpers.py new file mode 100644 index 00000000..a6c07f7d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_helpers.py @@ -0,0 +1,273 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helper functions for commonly used utilities.""" + +import base64 +import calendar +import datetime +from email.message import Message +import sys +import urllib + +from google.auth import exceptions + +# The smallest MDS cache used by this library stores tokens until 4 minutes from +# expiry. +REFRESH_THRESHOLD = datetime.timedelta(minutes=3, seconds=45) + + +def copy_docstring(source_class): + """Decorator that copies a method's docstring from another class. + + Args: + source_class (type): The class that has the documented method. + + Returns: + Callable: A decorator that will copy the docstring of the same + named method in the source class to the decorated method. + """ + + def decorator(method): + """Decorator implementation. + + Args: + method (Callable): The method to copy the docstring to. + + Returns: + Callable: the same method passed in with an updated docstring. + + Raises: + google.auth.exceptions.InvalidOperation: if the method already has a docstring. + """ + if method.__doc__: + raise exceptions.InvalidOperation("Method already has a docstring.") + + source_method = getattr(source_class, method.__name__) + method.__doc__ = source_method.__doc__ + + return method + + return decorator + + +def parse_content_type(header_value): + """Parse a 'content-type' header value to get just the plain media-type (without parameters). + + This is done using the class Message from email.message as suggested in PEP 594 + (because the cgi is now deprecated and will be removed in python 3.13, + see https://peps.python.org/pep-0594/#cgi). + + Args: + header_value (str): The value of a 'content-type' header as a string. + + Returns: + str: A string with just the lowercase media-type from the parsed 'content-type' header. + If the provided content-type is not parsable, returns 'text/plain', + the default value for textual files. + """ + m = Message() + m["content-type"] = header_value + return ( + m.get_content_type() + ) # Despite the name, actually returns just the media-type + + +def utcnow(): + """Returns the current UTC datetime. + + Returns: + datetime: The current time in UTC. + """ + # We used datetime.utcnow() before, since it's deprecated from python 3.12, + # we are using datetime.now(timezone.utc) now. "utcnow()" is offset-native + # (no timezone info), but "now()" is offset-aware (with timezone info). + # This will cause datetime comparison problem. For backward compatibility, + # we need to remove the timezone info. + now = datetime.datetime.now(datetime.timezone.utc) + now = now.replace(tzinfo=None) + return now + + +def datetime_to_secs(value): + """Convert a datetime object to the number of seconds since the UNIX epoch. + + Args: + value (datetime): The datetime to convert. + + Returns: + int: The number of seconds since the UNIX epoch. + """ + return calendar.timegm(value.utctimetuple()) + + +def to_bytes(value, encoding="utf-8"): + """Converts a string value to bytes, if necessary. + + Args: + value (Union[str, bytes]): The value to be converted. + encoding (str): The encoding to use to convert unicode to bytes. + Defaults to "utf-8". + + Returns: + bytes: The original value converted to bytes (if unicode) or as + passed in if it started out as bytes. + + Raises: + google.auth.exceptions.InvalidValue: If the value could not be converted to bytes. + """ + result = value.encode(encoding) if isinstance(value, str) else value + if isinstance(result, bytes): + return result + else: + raise exceptions.InvalidValue( + "{0!r} could not be converted to bytes".format(value) + ) + + +def from_bytes(value): + """Converts bytes to a string value, if necessary. + + Args: + value (Union[str, bytes]): The value to be converted. + + Returns: + str: The original value converted to unicode (if bytes) or as passed in + if it started out as unicode. + + Raises: + google.auth.exceptions.InvalidValue: If the value could not be converted to unicode. + """ + result = value.decode("utf-8") if isinstance(value, bytes) else value + if isinstance(result, str): + return result + else: + raise exceptions.InvalidValue( + "{0!r} could not be converted to unicode".format(value) + ) + + +def update_query(url, params, remove=None): + """Updates a URL's query parameters. + + Replaces any current values if they are already present in the URL. + + Args: + url (str): The URL to update. + params (Mapping[str, str]): A mapping of query parameter + keys to values. + remove (Sequence[str]): Parameters to remove from the query string. + + Returns: + str: The URL with updated query parameters. + + Examples: + + >>> url = 'http://example.com?a=1' + >>> update_query(url, {'a': '2'}) + http://example.com?a=2 + >>> update_query(url, {'b': '3'}) + http://example.com?a=1&b=3 + >> update_query(url, {'b': '3'}, remove=['a']) + http://example.com?b=3 + + """ + if remove is None: + remove = [] + + # Split the URL into parts. + parts = urllib.parse.urlparse(url) + # Parse the query string. + query_params = urllib.parse.parse_qs(parts.query) + # Update the query parameters with the new parameters. + query_params.update(params) + # Remove any values specified in remove. + query_params = { + key: value for key, value in query_params.items() if key not in remove + } + # Re-encoded the query string. + new_query = urllib.parse.urlencode(query_params, doseq=True) + # Unsplit the url. + new_parts = parts._replace(query=new_query) + return urllib.parse.urlunparse(new_parts) + + +def scopes_to_string(scopes): + """Converts scope value to a string suitable for sending to OAuth 2.0 + authorization servers. + + Args: + scopes (Sequence[str]): The sequence of scopes to convert. + + Returns: + str: The scopes formatted as a single string. + """ + return " ".join(scopes) + + +def string_to_scopes(scopes): + """Converts stringifed scopes value to a list. + + Args: + scopes (Union[Sequence, str]): The string of space-separated scopes + to convert. + Returns: + Sequence(str): The separated scopes. + """ + if not scopes: + return [] + + return scopes.split(" ") + + +def padded_urlsafe_b64decode(value): + """Decodes base64 strings lacking padding characters. + + Google infrastructure tends to omit the base64 padding characters. + + Args: + value (Union[str, bytes]): The encoded value. + + Returns: + bytes: The decoded value + """ + b64string = to_bytes(value) + padded = b64string + b"=" * (-len(b64string) % 4) + return base64.urlsafe_b64decode(padded) + + +def unpadded_urlsafe_b64encode(value): + """Encodes base64 strings removing any padding characters. + + `rfc 7515`_ defines Base64url to NOT include any padding + characters, but the stdlib doesn't do that by default. + + _rfc7515: https://tools.ietf.org/html/rfc7515#page-6 + + Args: + value (Union[str|bytes]): The bytes-like value to encode + + Returns: + Union[str|bytes]: The encoded value + """ + return base64.urlsafe_b64encode(value).rstrip(b"=") + + +def is_python_3(): + """Check if the Python interpreter is Python 2 or 3. + + Returns: + bool: True if the Python interpreter is Python 3 and False otherwise. + """ + return sys.version_info > (3, 0) diff --git a/.venv/lib/python3.12/site-packages/google/auth/_jwt_async.py b/.venv/lib/python3.12/site-packages/google/auth/_jwt_async.py new file mode 100644 index 00000000..3a1abc5b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_jwt_async.py @@ -0,0 +1,164 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""JSON Web Tokens + +Provides support for creating (encoding) and verifying (decoding) JWTs, +especially JWTs generated and consumed by Google infrastructure. + +See `rfc7519`_ for more details on JWTs. + +To encode a JWT use :func:`encode`:: + + from google.auth import crypt + from google.auth import jwt_async + + signer = crypt.Signer(private_key) + payload = {'some': 'payload'} + encoded = jwt_async.encode(signer, payload) + +To decode a JWT and verify claims use :func:`decode`:: + + claims = jwt_async.decode(encoded, certs=public_certs) + +You can also skip verification:: + + claims = jwt_async.decode(encoded, verify=False) + +.. _rfc7519: https://tools.ietf.org/html/rfc7519 + + +NOTE: This async support is experimental and marked internal. This surface may +change in minor releases. +""" + +from google.auth import _credentials_async +from google.auth import jwt + + +def encode(signer, payload, header=None, key_id=None): + """Make a signed JWT. + + Args: + signer (google.auth.crypt.Signer): The signer used to sign the JWT. + payload (Mapping[str, str]): The JWT payload. + header (Mapping[str, str]): Additional JWT header payload. + key_id (str): The key id to add to the JWT header. If the + signer has a key id it will be used as the default. If this is + specified it will override the signer's key id. + + Returns: + bytes: The encoded JWT. + """ + return jwt.encode(signer, payload, header, key_id) + + +def decode(token, certs=None, verify=True, audience=None): + """Decode and verify a JWT. + + Args: + token (str): The encoded JWT. + certs (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The + certificate used to validate the JWT signature. If bytes or string, + it must the the public key certificate in PEM format. If a mapping, + it must be a mapping of key IDs to public key certificates in PEM + format. The mapping must contain the same key ID that's specified + in the token's header. + verify (bool): Whether to perform signature and claim validation. + Verification is done by default. + audience (str): The audience claim, 'aud', that this JWT should + contain. If None then the JWT's 'aud' parameter is not verified. + + Returns: + Mapping[str, str]: The deserialized JSON payload in the JWT. + + Raises: + ValueError: if any verification checks failed. + """ + + return jwt.decode(token, certs, verify, audience) + + +class Credentials( + jwt.Credentials, _credentials_async.Signing, _credentials_async.Credentials +): + """Credentials that use a JWT as the bearer token. + + These credentials require an "audience" claim. This claim identifies the + intended recipient of the bearer token. + + The constructor arguments determine the claims for the JWT that is + sent with requests. Usually, you'll construct these credentials with + one of the helper constructors as shown in the next section. + + To create JWT credentials using a Google service account private key + JSON file:: + + audience = 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher' + credentials = jwt_async.Credentials.from_service_account_file( + 'service-account.json', + audience=audience) + + If you already have the service account file loaded and parsed:: + + service_account_info = json.load(open('service_account.json')) + credentials = jwt_async.Credentials.from_service_account_info( + service_account_info, + audience=audience) + + Both helper methods pass on arguments to the constructor, so you can + specify the JWT claims:: + + credentials = jwt_async.Credentials.from_service_account_file( + 'service-account.json', + audience=audience, + additional_claims={'meta': 'data'}) + + You can also construct the credentials directly if you have a + :class:`~google.auth.crypt.Signer` instance:: + + credentials = jwt_async.Credentials( + signer, + issuer='your-issuer', + subject='your-subject', + audience=audience) + + The claims are considered immutable. If you want to modify the claims, + you can easily create another instance using :meth:`with_claims`:: + + new_audience = ( + 'https://pubsub.googleapis.com/google.pubsub.v1.Subscriber') + new_credentials = credentials.with_claims(audience=new_audience) + """ + + +class OnDemandCredentials( + jwt.OnDemandCredentials, _credentials_async.Signing, _credentials_async.Credentials +): + """On-demand JWT credentials. + + Like :class:`Credentials`, this class uses a JWT as the bearer token for + authentication. However, this class does not require the audience at + construction time. Instead, it will generate a new token on-demand for + each request using the request URI as the audience. It caches tokens + so that multiple requests to the same URI do not incur the overhead + of generating a new token every time. + + This behavior is especially useful for `gRPC`_ clients. A gRPC service may + have multiple audience and gRPC clients may not know all of the audiences + required for accessing a particular service. With these credentials, + no knowledge of the audiences is required ahead of time. + + .. _grpc: http://www.grpc.io/ + """ diff --git a/.venv/lib/python3.12/site-packages/google/auth/_oauth2client.py b/.venv/lib/python3.12/site-packages/google/auth/_oauth2client.py new file mode 100644 index 00000000..8b83ff23 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_oauth2client.py @@ -0,0 +1,167 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for transitioning from oauth2client to google-auth. + +.. warning:: + This module is private as it is intended to assist first-party downstream + clients with the transition from oauth2client to google-auth. +""" + +from __future__ import absolute_import + +from google.auth import _helpers +import google.auth.app_engine +import google.auth.compute_engine +import google.oauth2.credentials +import google.oauth2.service_account + +try: + import oauth2client.client # type: ignore + import oauth2client.contrib.gce # type: ignore + import oauth2client.service_account # type: ignore +except ImportError as caught_exc: + raise ImportError("oauth2client is not installed.") from caught_exc + +try: + import oauth2client.contrib.appengine # type: ignore + + _HAS_APPENGINE = True +except ImportError: + _HAS_APPENGINE = False + + +_CONVERT_ERROR_TMPL = "Unable to convert {} to a google-auth credentials class." + + +def _convert_oauth2_credentials(credentials): + """Converts to :class:`google.oauth2.credentials.Credentials`. + + Args: + credentials (Union[oauth2client.client.OAuth2Credentials, + oauth2client.client.GoogleCredentials]): The credentials to + convert. + + Returns: + google.oauth2.credentials.Credentials: The converted credentials. + """ + new_credentials = google.oauth2.credentials.Credentials( + token=credentials.access_token, + refresh_token=credentials.refresh_token, + token_uri=credentials.token_uri, + client_id=credentials.client_id, + client_secret=credentials.client_secret, + scopes=credentials.scopes, + ) + + new_credentials._expires = credentials.token_expiry + + return new_credentials + + +def _convert_service_account_credentials(credentials): + """Converts to :class:`google.oauth2.service_account.Credentials`. + + Args: + credentials (Union[ + oauth2client.service_account.ServiceAccountCredentials, + oauth2client.service_account._JWTAccessCredentials]): The + credentials to convert. + + Returns: + google.oauth2.service_account.Credentials: The converted credentials. + """ + info = credentials.serialization_data.copy() + info["token_uri"] = credentials.token_uri + return google.oauth2.service_account.Credentials.from_service_account_info(info) + + +def _convert_gce_app_assertion_credentials(credentials): + """Converts to :class:`google.auth.compute_engine.Credentials`. + + Args: + credentials (oauth2client.contrib.gce.AppAssertionCredentials): The + credentials to convert. + + Returns: + google.oauth2.service_account.Credentials: The converted credentials. + """ + return google.auth.compute_engine.Credentials( + service_account_email=credentials.service_account_email + ) + + +def _convert_appengine_app_assertion_credentials(credentials): + """Converts to :class:`google.auth.app_engine.Credentials`. + + Args: + credentials (oauth2client.contrib.app_engine.AppAssertionCredentials): + The credentials to convert. + + Returns: + google.oauth2.service_account.Credentials: The converted credentials. + """ + # pylint: disable=invalid-name + return google.auth.app_engine.Credentials( + scopes=_helpers.string_to_scopes(credentials.scope), + service_account_id=credentials.service_account_id, + ) + + +_CLASS_CONVERSION_MAP = { + oauth2client.client.OAuth2Credentials: _convert_oauth2_credentials, + oauth2client.client.GoogleCredentials: _convert_oauth2_credentials, + oauth2client.service_account.ServiceAccountCredentials: _convert_service_account_credentials, + oauth2client.service_account._JWTAccessCredentials: _convert_service_account_credentials, + oauth2client.contrib.gce.AppAssertionCredentials: _convert_gce_app_assertion_credentials, +} + +if _HAS_APPENGINE: + _CLASS_CONVERSION_MAP[ + oauth2client.contrib.appengine.AppAssertionCredentials + ] = _convert_appengine_app_assertion_credentials + + +def convert(credentials): + """Convert oauth2client credentials to google-auth credentials. + + This class converts: + + - :class:`oauth2client.client.OAuth2Credentials` to + :class:`google.oauth2.credentials.Credentials`. + - :class:`oauth2client.client.GoogleCredentials` to + :class:`google.oauth2.credentials.Credentials`. + - :class:`oauth2client.service_account.ServiceAccountCredentials` to + :class:`google.oauth2.service_account.Credentials`. + - :class:`oauth2client.service_account._JWTAccessCredentials` to + :class:`google.oauth2.service_account.Credentials`. + - :class:`oauth2client.contrib.gce.AppAssertionCredentials` to + :class:`google.auth.compute_engine.Credentials`. + - :class:`oauth2client.contrib.appengine.AppAssertionCredentials` to + :class:`google.auth.app_engine.Credentials`. + + Returns: + google.auth.credentials.Credentials: The converted credentials. + + Raises: + ValueError: If the credentials could not be converted. + """ + + credentials_class = type(credentials) + + try: + return _CLASS_CONVERSION_MAP[credentials_class](credentials) + except KeyError as caught_exc: + new_exc = ValueError(_CONVERT_ERROR_TMPL.format(credentials_class)) + raise new_exc from caught_exc diff --git a/.venv/lib/python3.12/site-packages/google/auth/_refresh_worker.py b/.venv/lib/python3.12/site-packages/google/auth/_refresh_worker.py new file mode 100644 index 00000000..674032d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_refresh_worker.py @@ -0,0 +1,109 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import logging +import threading + +import google.auth.exceptions as e + +_LOGGER = logging.getLogger(__name__) + + +class RefreshThreadManager: + """ + Organizes exactly one background job that refresh a token. + """ + + def __init__(self): + """Initializes the manager.""" + + self._worker = None + self._lock = threading.Lock() # protects access to worker threads. + + def start_refresh(self, cred, request): + """Starts a refresh thread for the given credentials. + The credentials are refreshed using the request parameter. + request and cred MUST not be None + + Returns True if a background refresh was kicked off. False otherwise. + + Args: + cred: A credentials object. + request: A request object. + Returns: + bool + """ + if cred is None or request is None: + raise e.InvalidValue( + "Unable to start refresh. cred and request must be valid and instantiated objects." + ) + + with self._lock: + if self._worker is not None and self._worker._error_info is not None: + return False + + if self._worker is None or not self._worker.is_alive(): # pragma: NO COVER + self._worker = RefreshThread(cred=cred, request=copy.deepcopy(request)) + self._worker.start() + return True + + def clear_error(self): + """ + Removes any errors that were stored from previous background refreshes. + """ + with self._lock: + if self._worker: + self._worker._error_info = None + + def __getstate__(self): + """Pickle helper that serializes the _lock attribute.""" + state = self.__dict__.copy() + state["_lock"] = None + return state + + def __setstate__(self, state): + """Pickle helper that deserializes the _lock attribute.""" + state["_lock"] = threading.Lock() + self.__dict__.update(state) + + +class RefreshThread(threading.Thread): + """ + Thread that refreshes credentials. + """ + + def __init__(self, cred, request, **kwargs): + """Initializes the thread. + + Args: + cred: A Credential object to refresh. + request: A Request object used to perform a credential refresh. + **kwargs: Additional keyword arguments. + """ + + super().__init__(**kwargs) + self._cred = cred + self._request = request + self._error_info = None + + def run(self): + """ + Perform the credential refresh. + """ + try: + self._cred.refresh(self._request) + except Exception as err: # pragma: NO COVER + _LOGGER.error(f"Background refresh failed due to: {err}") + self._error_info = err diff --git a/.venv/lib/python3.12/site-packages/google/auth/_service_account_info.py b/.venv/lib/python3.12/site-packages/google/auth/_service_account_info.py new file mode 100644 index 00000000..6b64adca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/_service_account_info.py @@ -0,0 +1,80 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helper functions for loading data from a Google service account file.""" + +import io +import json + +from google.auth import crypt +from google.auth import exceptions + + +def from_dict(data, require=None, use_rsa_signer=True): + """Validates a dictionary containing Google service account data. + + Creates and returns a :class:`google.auth.crypt.Signer` instance from the + private key specified in the data. + + Args: + data (Mapping[str, str]): The service account data + require (Sequence[str]): List of keys required to be present in the + info. + use_rsa_signer (Optional[bool]): Whether to use RSA signer or EC signer. + We use RSA signer by default. + + Returns: + google.auth.crypt.Signer: A signer created from the private key in the + service account file. + + Raises: + MalformedError: if the data was in the wrong format, or if one of the + required keys is missing. + """ + keys_needed = set(require if require is not None else []) + + missing = keys_needed.difference(data.keys()) + + if missing: + raise exceptions.MalformedError( + "Service account info was not in the expected format, missing " + "fields {}.".format(", ".join(missing)) + ) + + # Create a signer. + if use_rsa_signer: + signer = crypt.RSASigner.from_service_account_info(data) + else: + signer = crypt.ES256Signer.from_service_account_info(data) + + return signer + + +def from_filename(filename, require=None, use_rsa_signer=True): + """Reads a Google service account JSON file and returns its parsed info. + + Args: + filename (str): The path to the service account .json file. + require (Sequence[str]): List of keys required to be present in the + info. + use_rsa_signer (Optional[bool]): Whether to use RSA signer or EC signer. + We use RSA signer by default. + + Returns: + Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified + info and a signer instance. + """ + with io.open(filename, "r", encoding="utf-8") as json_file: + data = json.load(json_file) + return data, from_dict(data, require=require, use_rsa_signer=use_rsa_signer) diff --git a/.venv/lib/python3.12/site-packages/google/auth/aio/__init__.py b/.venv/lib/python3.12/site-packages/google/auth/aio/__init__.py new file mode 100644 index 00000000..331708cb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/aio/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Auth AIO Library for Python.""" + +import logging + +from google.auth import version as google_auth_version + + +__version__ = google_auth_version.__version__ + +# Set default logging handler to avoid "No handler found" warnings. +logging.getLogger(__name__).addHandler(logging.NullHandler()) diff --git a/.venv/lib/python3.12/site-packages/google/auth/aio/credentials.py b/.venv/lib/python3.12/site-packages/google/auth/aio/credentials.py new file mode 100644 index 00000000..3bc6a5a6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/aio/credentials.py @@ -0,0 +1,143 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Interfaces for asynchronous credentials.""" + + +from google.auth import _helpers +from google.auth import exceptions +from google.auth._credentials_base import _BaseCredentials + + +class Credentials(_BaseCredentials): + """Base class for all asynchronous credentials. + + All credentials have a :attr:`token` that is used for authentication and + may also optionally set an :attr:`expiry` to indicate when the token will + no longer be valid. + + Most credentials will be :attr:`invalid` until :meth:`refresh` is called. + Credentials can do this automatically before the first HTTP request in + :meth:`before_request`. + + Although the token and expiration will change as the credentials are + :meth:`refreshed <refresh>` and used, credentials should be considered + immutable. Various credentials will accept configuration such as private + keys, scopes, and other options. These options are not changeable after + construction. Some classes will provide mechanisms to copy the credentials + with modifications such as :meth:`ScopedCredentials.with_scopes`. + """ + + def __init__(self): + super(Credentials, self).__init__() + + async def apply(self, headers, token=None): + """Apply the token to the authentication header. + + Args: + headers (Mapping): The HTTP request headers. + token (Optional[str]): If specified, overrides the current access + token. + """ + self._apply(headers, token=token) + + async def refresh(self, request): + """Refreshes the access token. + + Args: + request (google.auth.aio.transport.Request): The object used to make + HTTP requests. + + Raises: + google.auth.exceptions.RefreshError: If the credentials could + not be refreshed. + """ + raise NotImplementedError("Refresh must be implemented") + + async def before_request(self, request, method, url, headers): + """Performs credential-specific before request logic. + + Refreshes the credentials if necessary, then calls :meth:`apply` to + apply the token to the authentication header. + + Args: + request (google.auth.aio.transport.Request): The object used to make + HTTP requests. + method (str): The request's HTTP method or the RPC method being + invoked. + url (str): The request's URI or the RPC service's URI. + headers (Mapping): The request's headers. + """ + await self.apply(headers) + + +class StaticCredentials(Credentials): + """Asynchronous Credentials representing an immutable access token. + + The credentials are considered immutable except the tokens which can be + configured in the constructor :: + + credentials = StaticCredentials(token="token123") + + StaticCredentials does not support :meth `refresh` and assumes that the configured + token is valid and not expired. StaticCredentials will never attempt to + refresh the token. + """ + + def __init__(self, token): + """ + Args: + token (str): The access token. + """ + super(StaticCredentials, self).__init__() + self.token = token + + @_helpers.copy_docstring(Credentials) + async def refresh(self, request): + raise exceptions.InvalidOperation("Static credentials cannot be refreshed.") + + # Note: before_request should never try to refresh access tokens. + # StaticCredentials intentionally does not support it. + @_helpers.copy_docstring(Credentials) + async def before_request(self, request, method, url, headers): + await self.apply(headers) + + +class AnonymousCredentials(Credentials): + """Asynchronous Credentials that do not provide any authentication information. + + These are useful in the case of services that support anonymous access or + local service emulators that do not use credentials. + """ + + async def refresh(self, request): + """Raises :class:``InvalidOperation``, anonymous credentials cannot be + refreshed.""" + raise exceptions.InvalidOperation("Anonymous credentials cannot be refreshed.") + + async def apply(self, headers, token=None): + """Anonymous credentials do nothing to the request. + + The optional ``token`` argument is not supported. + + Raises: + google.auth.exceptions.InvalidValue: If a token was specified. + """ + if token is not None: + raise exceptions.InvalidValue("Anonymous credentials don't support tokens.") + + async def before_request(self, request, method, url, headers): + """Anonymous credentials do nothing to the request.""" + pass diff --git a/.venv/lib/python3.12/site-packages/google/auth/aio/transport/__init__.py b/.venv/lib/python3.12/site-packages/google/auth/aio/transport/__init__.py new file mode 100644 index 00000000..166a3be5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/aio/transport/__init__.py @@ -0,0 +1,144 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transport - Asynchronous HTTP client library support. + +:mod:`google.auth.aio` is designed to work with various asynchronous client libraries such +as aiohttp. In order to work across these libraries with different +interfaces some abstraction is needed. + +This module provides two interfaces that are implemented by transport adapters +to support HTTP libraries. :class:`Request` defines the interface expected by +:mod:`google.auth` to make asynchronous requests. :class:`Response` defines the interface +for the return value of :class:`Request`. +""" + +import abc +from typing import AsyncGenerator, Mapping, Optional + +import google.auth.transport + + +_DEFAULT_TIMEOUT_SECONDS = 180 + +DEFAULT_RETRYABLE_STATUS_CODES = google.auth.transport.DEFAULT_RETRYABLE_STATUS_CODES +"""Sequence[int]: HTTP status codes indicating a request can be retried. +""" + + +DEFAULT_MAX_RETRY_ATTEMPTS = 3 +"""int: How many times to retry a request.""" + + +class Response(metaclass=abc.ABCMeta): + """Asynchronous HTTP Response Interface.""" + + @property + @abc.abstractmethod + def status_code(self) -> int: + """ + The HTTP response status code. + + Returns: + int: The HTTP response status code. + + """ + raise NotImplementedError("status_code must be implemented.") + + @property + @abc.abstractmethod + def headers(self) -> Mapping[str, str]: + """The HTTP response headers. + + Returns: + Mapping[str, str]: The HTTP response headers. + """ + raise NotImplementedError("headers must be implemented.") + + @abc.abstractmethod + async def content(self, chunk_size: int) -> AsyncGenerator[bytes, None]: + """The raw response content. + + Args: + chunk_size (int): The size of each chunk. + + Yields: + AsyncGenerator[bytes, None]: An asynchronous generator yielding + response chunks as bytes. + """ + raise NotImplementedError("content must be implemented.") + + @abc.abstractmethod + async def read(self) -> bytes: + """Read the entire response content as bytes. + + Returns: + bytes: The entire response content. + """ + raise NotImplementedError("read must be implemented.") + + @abc.abstractmethod + async def close(self): + """Close the response after it is fully consumed to resource.""" + raise NotImplementedError("close must be implemented.") + + +class Request(metaclass=abc.ABCMeta): + """Interface for a callable that makes HTTP requests. + + Specific transport implementations should provide an implementation of + this that adapts their specific request / response API. + + .. automethod:: __call__ + """ + + @abc.abstractmethod + async def __call__( + self, + url: str, + method: str, + body: Optional[bytes], + headers: Optional[Mapping[str, str]], + timeout: float, + **kwargs + ) -> Response: + """Make an HTTP request. + + Args: + url (str): The URI to be requested. + method (str): The HTTP method to use for the request. Defaults + to 'GET'. + body (Optional[bytes]): The payload / body in HTTP request. + headers (Mapping[str, str]): Request headers. + timeout (float): The number of seconds to wait for a + response from the server. If not specified or if None, the + transport-specific default timeout will be used. + kwargs: Additional arguments passed on to the transport's + request method. + + Returns: + google.auth.aio.transport.Response: The HTTP response. + + Raises: + google.auth.exceptions.TransportError: If any exception occurred. + """ + # pylint: disable=redundant-returns-doc, missing-raises-doc + # (pylint doesn't play well with abstract docstrings.) + raise NotImplementedError("__call__ must be implemented.") + + async def close(self) -> None: + """ + Close the underlying session. + """ + raise NotImplementedError("close must be implemented.") diff --git a/.venv/lib/python3.12/site-packages/google/auth/aio/transport/aiohttp.py b/.venv/lib/python3.12/site-packages/google/auth/aio/transport/aiohttp.py new file mode 100644 index 00000000..074d1491 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/aio/transport/aiohttp.py @@ -0,0 +1,184 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transport adapter for Asynchronous HTTP Requests based on aiohttp. +""" + +import asyncio +from typing import AsyncGenerator, Mapping, Optional + +try: + import aiohttp # type: ignore +except ImportError as caught_exc: # pragma: NO COVER + raise ImportError( + "The aiohttp library is not installed from please install the aiohttp package to use the aiohttp transport." + ) from caught_exc + +from google.auth import _helpers +from google.auth import exceptions +from google.auth.aio import transport + + +class Response(transport.Response): + """ + Represents an HTTP response and its data. It is returned by ``google.auth.aio.transport.sessions.AsyncAuthorizedSession``. + + Args: + response (aiohttp.ClientResponse): An instance of aiohttp.ClientResponse. + + Attributes: + status_code (int): The HTTP status code of the response. + headers (Mapping[str, str]): The HTTP headers of the response. + """ + + def __init__(self, response: aiohttp.ClientResponse): + self._response = response + + @property + @_helpers.copy_docstring(transport.Response) + def status_code(self) -> int: + return self._response.status + + @property + @_helpers.copy_docstring(transport.Response) + def headers(self) -> Mapping[str, str]: + return {key: value for key, value in self._response.headers.items()} + + @_helpers.copy_docstring(transport.Response) + async def content(self, chunk_size: int = 1024) -> AsyncGenerator[bytes, None]: + try: + async for chunk in self._response.content.iter_chunked( + chunk_size + ): # pragma: no branch + yield chunk + except aiohttp.ClientPayloadError as exc: + raise exceptions.ResponseError( + "Failed to read from the payload stream." + ) from exc + + @_helpers.copy_docstring(transport.Response) + async def read(self) -> bytes: + try: + return await self._response.read() + except aiohttp.ClientResponseError as exc: + raise exceptions.ResponseError("Failed to read the response body.") from exc + + @_helpers.copy_docstring(transport.Response) + async def close(self): + self._response.close() + + +class Request(transport.Request): + """Asynchronous Requests request adapter. + + This class is used internally for making requests using aiohttp + in a consistent way. If you use :class:`google.auth.aio.transport.sessions.AsyncAuthorizedSession` + you do not need to construct or use this class directly. + + This class can be useful if you want to configure a Request callable + with a custom ``aiohttp.ClientSession`` in :class:`AuthorizedSession` or if + you want to manually refresh a :class:`~google.auth.aio.credentials.Credentials` instance:: + + import aiohttp + import google.auth.aio.transport.aiohttp + + # Default example: + request = google.auth.aio.transport.aiohttp.Request() + await credentials.refresh(request) + + # Custom aiohttp Session Example: + session = session=aiohttp.ClientSession(auto_decompress=False) + request = google.auth.aio.transport.aiohttp.Request(session=session) + auth_sesion = google.auth.aio.transport.sessions.AsyncAuthorizedSession(auth_request=request) + + Args: + session (aiohttp.ClientSession): An instance :class:`aiohttp.ClientSession` used + to make HTTP requests. If not specified, a session will be created. + + .. automethod:: __call__ + """ + + def __init__(self, session: aiohttp.ClientSession = None): + self._session = session + self._closed = False + + async def __call__( + self, + url: str, + method: str = "GET", + body: Optional[bytes] = None, + headers: Optional[Mapping[str, str]] = None, + timeout: float = transport._DEFAULT_TIMEOUT_SECONDS, + **kwargs, + ) -> transport.Response: + """ + Make an HTTP request using aiohttp. + + Args: + url (str): The URL to be requested. + method (Optional[str]): + The HTTP method to use for the request. Defaults to 'GET'. + body (Optional[bytes]): + The payload or body in HTTP request. + headers (Optional[Mapping[str, str]]): + Request headers. + timeout (float): The number of seconds to wait for a + response from the server. If not specified or if None, the + requests default timeout will be used. + kwargs: Additional arguments passed through to the underlying + aiohttp :meth:`aiohttp.Session.request` method. + + Returns: + google.auth.aio.transport.Response: The HTTP response. + + Raises: + - google.auth.exceptions.TransportError: If the request fails or if the session is closed. + - google.auth.exceptions.TimeoutError: If the request times out. + """ + + try: + if self._closed: + raise exceptions.TransportError("session is closed.") + + if not self._session: + self._session = aiohttp.ClientSession() + + client_timeout = aiohttp.ClientTimeout(total=timeout) + response = await self._session.request( + method, + url, + data=body, + headers=headers, + timeout=client_timeout, + **kwargs, + ) + return Response(response) + + except aiohttp.ClientError as caught_exc: + client_exc = exceptions.TransportError(f"Failed to send request to {url}.") + raise client_exc from caught_exc + + except asyncio.TimeoutError as caught_exc: + timeout_exc = exceptions.TimeoutError( + f"Request timed out after {timeout} seconds." + ) + raise timeout_exc from caught_exc + + async def close(self) -> None: + """ + Close the underlying aiohttp session to release the acquired resources. + """ + if not self._closed and self._session: + await self._session.close() + self._closed = True diff --git a/.venv/lib/python3.12/site-packages/google/auth/aio/transport/sessions.py b/.venv/lib/python3.12/site-packages/google/auth/aio/transport/sessions.py new file mode 100644 index 00000000..fea7cbbb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/aio/transport/sessions.py @@ -0,0 +1,268 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +from contextlib import asynccontextmanager +import functools +import time +from typing import Mapping, Optional + +from google.auth import _exponential_backoff, exceptions +from google.auth.aio import transport +from google.auth.aio.credentials import Credentials +from google.auth.exceptions import TimeoutError + +try: + from google.auth.aio.transport.aiohttp import Request as AiohttpRequest + + AIOHTTP_INSTALLED = True +except ImportError: # pragma: NO COVER + AIOHTTP_INSTALLED = False + + +@asynccontextmanager +async def timeout_guard(timeout): + """ + timeout_guard is an asynchronous context manager to apply a timeout to an asynchronous block of code. + + Args: + timeout (float): The time in seconds before the context manager times out. + + Raises: + google.auth.exceptions.TimeoutError: If the code within the context exceeds the provided timeout. + + Usage: + async with timeout_guard(10) as with_timeout: + await with_timeout(async_function()) + """ + start = time.monotonic() + total_timeout = timeout + + def _remaining_time(): + elapsed = time.monotonic() - start + remaining = total_timeout - elapsed + if remaining <= 0: + raise TimeoutError( + f"Context manager exceeded the configured timeout of {total_timeout}s." + ) + return remaining + + async def with_timeout(coro): + try: + remaining = _remaining_time() + response = await asyncio.wait_for(coro, remaining) + return response + except (asyncio.TimeoutError, TimeoutError) as e: + raise TimeoutError( + f"The operation {coro} exceeded the configured timeout of {total_timeout}s." + ) from e + + try: + yield with_timeout + + finally: + _remaining_time() + + +class AsyncAuthorizedSession: + """This is an asynchronous implementation of :class:`google.auth.requests.AuthorizedSession` class. + We utilize an instance of a class that implements :class:`google.auth.aio.transport.Request` configured + by the caller or otherwise default to `google.auth.aio.transport.aiohttp.Request` if the external aiohttp + package is installed. + + A Requests Session class with credentials. + + This class is used to perform asynchronous requests to API endpoints that require + authorization:: + + import aiohttp + from google.auth.aio.transport import sessions + + async with sessions.AsyncAuthorizedSession(credentials) as authed_session: + response = await authed_session.request( + 'GET', 'https://www.googleapis.com/storage/v1/b') + + The underlying :meth:`request` implementation handles adding the + credentials' headers to the request and refreshing credentials as needed. + + Args: + credentials (google.auth.aio.credentials.Credentials): + The credentials to add to the request. + auth_request (Optional[google.auth.aio.transport.Request]): + An instance of a class that implements + :class:`~google.auth.aio.transport.Request` used to make requests + and refresh credentials. If not passed, + an instance of :class:`~google.auth.aio.transport.aiohttp.Request` + is created. + + Raises: + - google.auth.exceptions.TransportError: If `auth_request` is `None` + and the external package `aiohttp` is not installed. + - google.auth.exceptions.InvalidType: If the provided credentials are + not of type `google.auth.aio.credentials.Credentials`. + """ + + def __init__( + self, credentials: Credentials, auth_request: Optional[transport.Request] = None + ): + if not isinstance(credentials, Credentials): + raise exceptions.InvalidType( + f"The configured credentials of type {type(credentials)} are invalid and must be of type `google.auth.aio.credentials.Credentials`" + ) + self._credentials = credentials + _auth_request = auth_request + if not _auth_request and AIOHTTP_INSTALLED: + _auth_request = AiohttpRequest() + if _auth_request is None: + raise exceptions.TransportError( + "`auth_request` must either be configured or the external package `aiohttp` must be installed to use the default value." + ) + self._auth_request = _auth_request + + async def request( + self, + method: str, + url: str, + data: Optional[bytes] = None, + headers: Optional[Mapping[str, str]] = None, + max_allowed_time: float = transport._DEFAULT_TIMEOUT_SECONDS, + timeout: float = transport._DEFAULT_TIMEOUT_SECONDS, + **kwargs, + ) -> transport.Response: + """ + Args: + method (str): The http method used to make the request. + url (str): The URI to be requested. + data (Optional[bytes]): The payload or body in HTTP request. + headers (Optional[Mapping[str, str]]): Request headers. + timeout (float): + The amount of time in seconds to wait for the server response + with each individual request. + max_allowed_time (float): + If the method runs longer than this, a ``Timeout`` exception is + automatically raised. Unlike the ``timeout`` parameter, this + value applies to the total method execution time, even if + multiple requests are made under the hood. + + Mind that it is not guaranteed that the timeout error is raised + at ``max_allowed_time``. It might take longer, for example, if + an underlying request takes a lot of time, but the request + itself does not timeout, e.g. if a large file is being + transmitted. The timout error will be raised after such + request completes. + + Returns: + google.auth.aio.transport.Response: The HTTP response. + + Raises: + google.auth.exceptions.TimeoutError: If the method does not complete within + the configured `max_allowed_time` or the request exceeds the configured + `timeout`. + """ + + retries = _exponential_backoff.AsyncExponentialBackoff( + total_attempts=transport.DEFAULT_MAX_RETRY_ATTEMPTS + ) + async with timeout_guard(max_allowed_time) as with_timeout: + await with_timeout( + # Note: before_request will attempt to refresh credentials if expired. + self._credentials.before_request( + self._auth_request, method, url, headers + ) + ) + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for _ in retries: # pragma: no branch + response = await with_timeout( + self._auth_request(url, method, data, headers, timeout, **kwargs) + ) + if response.status_code not in transport.DEFAULT_RETRYABLE_STATUS_CODES: + break + return response + + @functools.wraps(request) + async def get( + self, + url: str, + data: Optional[bytes] = None, + headers: Optional[Mapping[str, str]] = None, + max_allowed_time: float = transport._DEFAULT_TIMEOUT_SECONDS, + timeout: float = transport._DEFAULT_TIMEOUT_SECONDS, + **kwargs, + ) -> transport.Response: + return await self.request( + "GET", url, data, headers, max_allowed_time, timeout, **kwargs + ) + + @functools.wraps(request) + async def post( + self, + url: str, + data: Optional[bytes] = None, + headers: Optional[Mapping[str, str]] = None, + max_allowed_time: float = transport._DEFAULT_TIMEOUT_SECONDS, + timeout: float = transport._DEFAULT_TIMEOUT_SECONDS, + **kwargs, + ) -> transport.Response: + return await self.request( + "POST", url, data, headers, max_allowed_time, timeout, **kwargs + ) + + @functools.wraps(request) + async def put( + self, + url: str, + data: Optional[bytes] = None, + headers: Optional[Mapping[str, str]] = None, + max_allowed_time: float = transport._DEFAULT_TIMEOUT_SECONDS, + timeout: float = transport._DEFAULT_TIMEOUT_SECONDS, + **kwargs, + ) -> transport.Response: + return await self.request( + "PUT", url, data, headers, max_allowed_time, timeout, **kwargs + ) + + @functools.wraps(request) + async def patch( + self, + url: str, + data: Optional[bytes] = None, + headers: Optional[Mapping[str, str]] = None, + max_allowed_time: float = transport._DEFAULT_TIMEOUT_SECONDS, + timeout: float = transport._DEFAULT_TIMEOUT_SECONDS, + **kwargs, + ) -> transport.Response: + return await self.request( + "PATCH", url, data, headers, max_allowed_time, timeout, **kwargs + ) + + @functools.wraps(request) + async def delete( + self, + url: str, + data: Optional[bytes] = None, + headers: Optional[Mapping[str, str]] = None, + max_allowed_time: float = transport._DEFAULT_TIMEOUT_SECONDS, + timeout: float = transport._DEFAULT_TIMEOUT_SECONDS, + **kwargs, + ) -> transport.Response: + return await self.request( + "DELETE", url, data, headers, max_allowed_time, timeout, **kwargs + ) + + async def close(self) -> None: + """ + Close the underlying auth request session. + """ + await self._auth_request.close() diff --git a/.venv/lib/python3.12/site-packages/google/auth/api_key.py b/.venv/lib/python3.12/site-packages/google/auth/api_key.py new file mode 100644 index 00000000..4fdf7f27 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/api_key.py @@ -0,0 +1,76 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google API key support. +This module provides authentication using the `API key`_. +.. _API key: + https://cloud.google.com/docs/authentication/api-keys/ +""" + +from google.auth import _helpers +from google.auth import credentials +from google.auth import exceptions + + +class Credentials(credentials.Credentials): + """API key credentials. + These credentials use API key to provide authorization to applications. + """ + + def __init__(self, token): + """ + Args: + token (str): API key string + Raises: + ValueError: If the provided API key is not a non-empty string. + """ + super(Credentials, self).__init__() + if not token: + raise exceptions.InvalidValue("Token must be a non-empty API key string") + self.token = token + + @property + def expired(self): + return False + + @property + def valid(self): + return True + + @_helpers.copy_docstring(credentials.Credentials) + def refresh(self, request): + return + + def apply(self, headers, token=None): + """Apply the API key token to the x-goog-api-key header. + Args: + headers (Mapping): The HTTP request headers. + token (Optional[str]): If specified, overrides the current access + token. + """ + headers["x-goog-api-key"] = token or self.token + + def before_request(self, request, method, url, headers): + """Performs credential-specific before request logic. + Refreshes the credentials if necessary, then calls :meth:`apply` to + apply the token to the x-goog-api-key header. + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + method (str): The request's HTTP method or the RPC method being + invoked. + url (str): The request's URI or the RPC service's URI. + headers (Mapping): The request's headers. + """ + self.apply(headers) diff --git a/.venv/lib/python3.12/site-packages/google/auth/app_engine.py b/.venv/lib/python3.12/site-packages/google/auth/app_engine.py new file mode 100644 index 00000000..7083ee61 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/app_engine.py @@ -0,0 +1,180 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google App Engine standard environment support. + +This module provides authentication and signing for applications running on App +Engine in the standard environment using the `App Identity API`_. + + +.. _App Identity API: + https://cloud.google.com/appengine/docs/python/appidentity/ +""" + +import datetime + +from google.auth import _helpers +from google.auth import credentials +from google.auth import crypt +from google.auth import exceptions + +# pytype: disable=import-error +try: + from google.appengine.api import app_identity # type: ignore +except ImportError: + app_identity = None # type: ignore +# pytype: enable=import-error + + +class Signer(crypt.Signer): + """Signs messages using the App Engine App Identity service. + + This can be used in place of :class:`google.auth.crypt.Signer` when + running in the App Engine standard environment. + """ + + @property + def key_id(self): + """Optional[str]: The key ID used to identify this private key. + + .. warning:: + This is always ``None``. The key ID used by App Engine can not + be reliably determined ahead of time. + """ + return None + + @_helpers.copy_docstring(crypt.Signer) + def sign(self, message): + message = _helpers.to_bytes(message) + _, signature = app_identity.sign_blob(message) + return signature + + +def get_project_id(): + """Gets the project ID for the current App Engine application. + + Returns: + str: The project ID + + Raises: + google.auth.exceptions.OSError: If the App Engine APIs are unavailable. + """ + # pylint: disable=missing-raises-doc + # Pylint rightfully thinks google.auth.exceptions.OSError is OSError, but doesn't + # realize it's a valid alias. + if app_identity is None: + raise exceptions.OSError("The App Engine APIs are not available.") + return app_identity.get_application_id() + + +class Credentials( + credentials.Scoped, credentials.Signing, credentials.CredentialsWithQuotaProject +): + """App Engine standard environment credentials. + + These credentials use the App Engine App Identity API to obtain access + tokens. + """ + + def __init__( + self, + scopes=None, + default_scopes=None, + service_account_id=None, + quota_project_id=None, + ): + """ + Args: + scopes (Sequence[str]): Scopes to request from the App Identity + API. + default_scopes (Sequence[str]): Default scopes passed by a + Google client library. Use 'scopes' for user-defined scopes. + service_account_id (str): The service account ID passed into + :func:`google.appengine.api.app_identity.get_access_token`. + If not specified, the default application service account + ID will be used. + quota_project_id (Optional[str]): The project ID used for quota + and billing. + + Raises: + google.auth.exceptions.OSError: If the App Engine APIs are unavailable. + """ + # pylint: disable=missing-raises-doc + # Pylint rightfully thinks google.auth.exceptions.OSError is OSError, but doesn't + # realize it's a valid alias. + if app_identity is None: + raise exceptions.OSError("The App Engine APIs are not available.") + + super(Credentials, self).__init__() + self._scopes = scopes + self._default_scopes = default_scopes + self._service_account_id = service_account_id + self._signer = Signer() + self._quota_project_id = quota_project_id + + @_helpers.copy_docstring(credentials.Credentials) + def refresh(self, request): + scopes = self._scopes if self._scopes is not None else self._default_scopes + # pylint: disable=unused-argument + token, ttl = app_identity.get_access_token(scopes, self._service_account_id) + expiry = datetime.datetime.utcfromtimestamp(ttl) + + self.token, self.expiry = token, expiry + + @property + def service_account_email(self): + """The service account email.""" + if self._service_account_id is None: + self._service_account_id = app_identity.get_service_account_name() + return self._service_account_id + + @property + def requires_scopes(self): + """Checks if the credentials requires scopes. + + Returns: + bool: True if there are no scopes set otherwise False. + """ + return not self._scopes and not self._default_scopes + + @_helpers.copy_docstring(credentials.Scoped) + def with_scopes(self, scopes, default_scopes=None): + return self.__class__( + scopes=scopes, + default_scopes=default_scopes, + service_account_id=self._service_account_id, + quota_project_id=self.quota_project_id, + ) + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + return self.__class__( + scopes=self._scopes, + service_account_id=self._service_account_id, + quota_project_id=quota_project_id, + ) + + @_helpers.copy_docstring(credentials.Signing) + def sign_bytes(self, message): + return self._signer.sign(message) + + @property # type: ignore + @_helpers.copy_docstring(credentials.Signing) + def signer_email(self): + return self.service_account_email + + @property # type: ignore + @_helpers.copy_docstring(credentials.Signing) + def signer(self): + return self._signer diff --git a/.venv/lib/python3.12/site-packages/google/auth/aws.py b/.venv/lib/python3.12/site-packages/google/auth/aws.py new file mode 100644 index 00000000..28c065d3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/aws.py @@ -0,0 +1,861 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""AWS Credentials and AWS Signature V4 Request Signer. + +This module provides credentials to access Google Cloud resources from Amazon +Web Services (AWS) workloads. These credentials are recommended over the +use of service account credentials in AWS as they do not involve the management +of long-live service account private keys. + +AWS Credentials are initialized using external_account arguments which are +typically loaded from the external credentials JSON file. + +This module also provides a definition for an abstract AWS security credentials supplier. +This supplier can be implemented to return valid AWS security credentials and an AWS region +and used to create AWS credentials. The credentials will then call the +supplier instead of using pre-defined methods such as calling the EC2 metadata endpoints. + +This module also provides a basic implementation of the +`AWS Signature Version 4`_ request signing algorithm. + +AWS Credentials use serialized signed requests to the +`AWS STS GetCallerIdentity`_ API that can be exchanged for Google access tokens +via the GCP STS endpoint. + +.. _AWS Signature Version 4: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html +.. _AWS STS GetCallerIdentity: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity.html +""" + +import abc +from dataclasses import dataclass +import hashlib +import hmac +import http.client as http_client +import json +import os +import posixpath +import re +from typing import Optional +import urllib +from urllib.parse import urljoin + +from google.auth import _helpers +from google.auth import environment_vars +from google.auth import exceptions +from google.auth import external_account + +# AWS Signature Version 4 signing algorithm identifier. +_AWS_ALGORITHM = "AWS4-HMAC-SHA256" +# The termination string for the AWS credential scope value as defined in +# https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html +_AWS_REQUEST_TYPE = "aws4_request" +# The AWS authorization header name for the security session token if available. +_AWS_SECURITY_TOKEN_HEADER = "x-amz-security-token" +# The AWS authorization header name for the auto-generated date. +_AWS_DATE_HEADER = "x-amz-date" +# The default AWS regional credential verification URL. +_DEFAULT_AWS_REGIONAL_CREDENTIAL_VERIFICATION_URL = ( + "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" +) +# IMDSV2 session token lifetime. This is set to a low value because the session token is used immediately. +_IMDSV2_SESSION_TOKEN_TTL_SECONDS = "300" + + +class RequestSigner(object): + """Implements an AWS request signer based on the AWS Signature Version 4 signing + process. + https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + """ + + def __init__(self, region_name): + """Instantiates an AWS request signer used to compute authenticated signed + requests to AWS APIs based on the AWS Signature Version 4 signing process. + + Args: + region_name (str): The AWS region to use. + """ + + self._region_name = region_name + + def get_request_options( + self, + aws_security_credentials, + url, + method, + request_payload="", + additional_headers={}, + ): + """Generates the signed request for the provided HTTP request for calling + an AWS API. This follows the steps described at: + https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html + + Args: + aws_security_credentials (AWSSecurityCredentials): The AWS security credentials. + url (str): The AWS service URL containing the canonical URI and + query string. + method (str): The HTTP method used to call this API. + request_payload (Optional[str]): The optional request payload if + available. + additional_headers (Optional[Mapping[str, str]]): The optional + additional headers needed for the requested AWS API. + + Returns: + Mapping[str, str]: The AWS signed request dictionary object. + """ + + additional_headers = additional_headers or {} + + uri = urllib.parse.urlparse(url) + # Normalize the URL path. This is needed for the canonical_uri. + # os.path.normpath can't be used since it normalizes "/" paths + # to "\\" in Windows OS. + normalized_uri = urllib.parse.urlparse( + urljoin(url, posixpath.normpath(uri.path)) + ) + # Validate provided URL. + if not uri.hostname or uri.scheme != "https": + raise exceptions.InvalidResource("Invalid AWS service URL") + + header_map = _generate_authentication_header_map( + host=uri.hostname, + canonical_uri=normalized_uri.path or "/", + canonical_querystring=_get_canonical_querystring(uri.query), + method=method, + region=self._region_name, + aws_security_credentials=aws_security_credentials, + request_payload=request_payload, + additional_headers=additional_headers, + ) + headers = { + "Authorization": header_map.get("authorization_header"), + "host": uri.hostname, + } + # Add x-amz-date if available. + if "amz_date" in header_map: + headers[_AWS_DATE_HEADER] = header_map.get("amz_date") + # Append additional optional headers, eg. X-Amz-Target, Content-Type, etc. + for key in additional_headers: + headers[key] = additional_headers[key] + + # Add session token if available. + if aws_security_credentials.session_token is not None: + headers[_AWS_SECURITY_TOKEN_HEADER] = aws_security_credentials.session_token + + signed_request = {"url": url, "method": method, "headers": headers} + if request_payload: + signed_request["data"] = request_payload + return signed_request + + +def _get_canonical_querystring(query): + """Generates the canonical query string given a raw query string. + Logic is based on + https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + + Args: + query (str): The raw query string. + + Returns: + str: The canonical query string. + """ + # Parse raw query string. + querystring = urllib.parse.parse_qs(query) + querystring_encoded_map = {} + for key in querystring: + quote_key = urllib.parse.quote(key, safe="-_.~") + # URI encode key. + querystring_encoded_map[quote_key] = [] + for item in querystring[key]: + # For each key, URI encode all values for that key. + querystring_encoded_map[quote_key].append( + urllib.parse.quote(item, safe="-_.~") + ) + # Sort values for each key. + querystring_encoded_map[quote_key].sort() + # Sort keys. + sorted_keys = list(querystring_encoded_map.keys()) + sorted_keys.sort() + # Reconstruct the query string. Preserve keys with multiple values. + querystring_encoded_pairs = [] + for key in sorted_keys: + for item in querystring_encoded_map[key]: + querystring_encoded_pairs.append("{}={}".format(key, item)) + return "&".join(querystring_encoded_pairs) + + +def _sign(key, msg): + """Creates the HMAC-SHA256 hash of the provided message using the provided + key. + + Args: + key (str): The HMAC-SHA256 key to use. + msg (str): The message to hash. + + Returns: + str: The computed hash bytes. + """ + return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest() + + +def _get_signing_key(key, date_stamp, region_name, service_name): + """Calculates the signing key used to calculate the signature for + AWS Signature Version 4 based on: + https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html + + Args: + key (str): The AWS secret access key. + date_stamp (str): The '%Y%m%d' date format. + region_name (str): The AWS region. + service_name (str): The AWS service name, eg. sts. + + Returns: + str: The signing key bytes. + """ + k_date = _sign(("AWS4" + key).encode("utf-8"), date_stamp) + k_region = _sign(k_date, region_name) + k_service = _sign(k_region, service_name) + k_signing = _sign(k_service, "aws4_request") + return k_signing + + +def _generate_authentication_header_map( + host, + canonical_uri, + canonical_querystring, + method, + region, + aws_security_credentials, + request_payload="", + additional_headers={}, +): + """Generates the authentication header map needed for generating the AWS + Signature Version 4 signed request. + + Args: + host (str): The AWS service URL hostname. + canonical_uri (str): The AWS service URL path name. + canonical_querystring (str): The AWS service URL query string. + method (str): The HTTP method used to call this API. + region (str): The AWS region. + aws_security_credentials (AWSSecurityCredentials): The AWS security credentials. + request_payload (Optional[str]): The optional request payload if + available. + additional_headers (Optional[Mapping[str, str]]): The optional + additional headers needed for the requested AWS API. + + Returns: + Mapping[str, str]: The AWS authentication header dictionary object. + This contains the x-amz-date and authorization header information. + """ + # iam.amazonaws.com host => iam service. + # sts.us-east-2.amazonaws.com host => sts service. + service_name = host.split(".")[0] + + current_time = _helpers.utcnow() + amz_date = current_time.strftime("%Y%m%dT%H%M%SZ") + date_stamp = current_time.strftime("%Y%m%d") + + # Change all additional headers to be lower case. + full_headers = {} + for key in additional_headers: + full_headers[key.lower()] = additional_headers[key] + # Add AWS session token if available. + if aws_security_credentials.session_token is not None: + full_headers[ + _AWS_SECURITY_TOKEN_HEADER + ] = aws_security_credentials.session_token + + # Required headers + full_headers["host"] = host + # Do not use generated x-amz-date if the date header is provided. + # Previously the date was not fixed with x-amz- and could be provided + # manually. + # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-header-value-trim.req + if "date" not in full_headers: + full_headers[_AWS_DATE_HEADER] = amz_date + + # Header keys need to be sorted alphabetically. + canonical_headers = "" + header_keys = list(full_headers.keys()) + header_keys.sort() + for key in header_keys: + canonical_headers = "{}{}:{}\n".format( + canonical_headers, key, full_headers[key] + ) + signed_headers = ";".join(header_keys) + + payload_hash = hashlib.sha256((request_payload or "").encode("utf-8")).hexdigest() + + # https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + canonical_request = "{}\n{}\n{}\n{}\n{}\n{}".format( + method, + canonical_uri, + canonical_querystring, + canonical_headers, + signed_headers, + payload_hash, + ) + + credential_scope = "{}/{}/{}/{}".format( + date_stamp, region, service_name, _AWS_REQUEST_TYPE + ) + + # https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + string_to_sign = "{}\n{}\n{}\n{}".format( + _AWS_ALGORITHM, + amz_date, + credential_scope, + hashlib.sha256(canonical_request.encode("utf-8")).hexdigest(), + ) + + # https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html + signing_key = _get_signing_key( + aws_security_credentials.secret_access_key, date_stamp, region, service_name + ) + signature = hmac.new( + signing_key, string_to_sign.encode("utf-8"), hashlib.sha256 + ).hexdigest() + + # https://docs.aws.amazon.com/general/latest/gr/sigv4-add-signature-to-request.html + authorization_header = "{} Credential={}/{}, SignedHeaders={}, Signature={}".format( + _AWS_ALGORITHM, + aws_security_credentials.access_key_id, + credential_scope, + signed_headers, + signature, + ) + + authentication_header = {"authorization_header": authorization_header} + # Do not use generated x-amz-date if the date header is provided. + if "date" not in full_headers: + authentication_header["amz_date"] = amz_date + return authentication_header + + +@dataclass +class AwsSecurityCredentials: + """A class that models AWS security credentials with an optional session token. + + Attributes: + access_key_id (str): The AWS security credentials access key id. + secret_access_key (str): The AWS security credentials secret access key. + session_token (Optional[str]): The optional AWS security credentials session token. This should be set when using temporary credentials. + """ + + access_key_id: str + secret_access_key: str + session_token: Optional[str] = None + + +class AwsSecurityCredentialsSupplier(metaclass=abc.ABCMeta): + """Base class for AWS security credential suppliers. This can be implemented with custom logic to retrieve + AWS security credentials to exchange for a Google Cloud access token. The AWS external account credential does + not cache the AWS security credentials, so caching logic should be added in the implementation. + """ + + @abc.abstractmethod + def get_aws_security_credentials(self, context, request): + """Returns the AWS security credentials for the requested context. + + .. warning: This is not cached by the calling Google credential, so caching logic should be implemented in the supplier. + + Args: + context (google.auth.externalaccount.SupplierContext): The context object + containing information about the requested audience and subject token type. + request (google.auth.transport.Request): The object used to make + HTTP requests. + + Raises: + google.auth.exceptions.RefreshError: If an error is encountered during + security credential retrieval logic. + + Returns: + AwsSecurityCredentials: The requested AWS security credentials. + """ + raise NotImplementedError("") + + @abc.abstractmethod + def get_aws_region(self, context, request): + """Returns the AWS region for the requested context. + + Args: + context (google.auth.externalaccount.SupplierContext): The context object + containing information about the requested audience and subject token type. + request (google.auth.transport.Request): The object used to make + HTTP requests. + + Raises: + google.auth.exceptions.RefreshError: If an error is encountered during + region retrieval logic. + + Returns: + str: The AWS region. + """ + raise NotImplementedError("") + + +class _DefaultAwsSecurityCredentialsSupplier(AwsSecurityCredentialsSupplier): + """Default implementation of AWS security credentials supplier. Supports retrieving + credentials and region via EC2 metadata endpoints and environment variables. + """ + + def __init__(self, credential_source): + self._region_url = credential_source.get("region_url") + self._security_credentials_url = credential_source.get("url") + self._imdsv2_session_token_url = credential_source.get( + "imdsv2_session_token_url" + ) + + @_helpers.copy_docstring(AwsSecurityCredentialsSupplier) + def get_aws_security_credentials(self, context, request): + + # Check environment variables for permanent credentials first. + # https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html + env_aws_access_key_id = os.environ.get(environment_vars.AWS_ACCESS_KEY_ID) + env_aws_secret_access_key = os.environ.get( + environment_vars.AWS_SECRET_ACCESS_KEY + ) + # This is normally not available for permanent credentials. + env_aws_session_token = os.environ.get(environment_vars.AWS_SESSION_TOKEN) + if env_aws_access_key_id and env_aws_secret_access_key: + return AwsSecurityCredentials( + env_aws_access_key_id, env_aws_secret_access_key, env_aws_session_token + ) + + imdsv2_session_token = self._get_imdsv2_session_token(request) + role_name = self._get_metadata_role_name(request, imdsv2_session_token) + + # Get security credentials. + credentials = self._get_metadata_security_credentials( + request, role_name, imdsv2_session_token + ) + + return AwsSecurityCredentials( + credentials.get("AccessKeyId"), + credentials.get("SecretAccessKey"), + credentials.get("Token"), + ) + + @_helpers.copy_docstring(AwsSecurityCredentialsSupplier) + def get_aws_region(self, context, request): + # The AWS metadata server is not available in some AWS environments + # such as AWS lambda. Instead, it is available via environment + # variable. + env_aws_region = os.environ.get(environment_vars.AWS_REGION) + if env_aws_region is not None: + return env_aws_region + + env_aws_region = os.environ.get(environment_vars.AWS_DEFAULT_REGION) + if env_aws_region is not None: + return env_aws_region + + if not self._region_url: + raise exceptions.RefreshError("Unable to determine AWS region") + + headers = None + imdsv2_session_token = self._get_imdsv2_session_token(request) + if imdsv2_session_token is not None: + headers = {"X-aws-ec2-metadata-token": imdsv2_session_token} + + response = request(url=self._region_url, method="GET", headers=headers) + + # Support both string and bytes type response.data. + response_body = ( + response.data.decode("utf-8") + if hasattr(response.data, "decode") + else response.data + ) + + if response.status != http_client.OK: + raise exceptions.RefreshError( + "Unable to retrieve AWS region: {}".format(response_body) + ) + + # This endpoint will return the region in format: us-east-2b. + # Only the us-east-2 part should be used. + return response_body[:-1] + + def _get_imdsv2_session_token(self, request): + if request is not None and self._imdsv2_session_token_url is not None: + headers = { + "X-aws-ec2-metadata-token-ttl-seconds": _IMDSV2_SESSION_TOKEN_TTL_SECONDS + } + + imdsv2_session_token_response = request( + url=self._imdsv2_session_token_url, method="PUT", headers=headers + ) + + if imdsv2_session_token_response.status != http_client.OK: + raise exceptions.RefreshError( + "Unable to retrieve AWS Session Token: {}".format( + imdsv2_session_token_response.data + ) + ) + + return imdsv2_session_token_response.data + else: + return None + + def _get_metadata_security_credentials( + self, request, role_name, imdsv2_session_token + ): + """Retrieves the AWS security credentials required for signing AWS + requests from the AWS metadata server. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + role_name (str): The AWS role name required by the AWS metadata + server security_credentials endpoint in order to return the + credentials. + imdsv2_session_token (str): The AWS IMDSv2 session token to be added as a + header in the requests to AWS metadata endpoint. + + Returns: + Mapping[str, str]: The AWS metadata server security credentials + response. + + Raises: + google.auth.exceptions.RefreshError: If an error occurs while + retrieving the AWS security credentials. + """ + headers = {"Content-Type": "application/json"} + if imdsv2_session_token is not None: + headers["X-aws-ec2-metadata-token"] = imdsv2_session_token + + response = request( + url="{}/{}".format(self._security_credentials_url, role_name), + method="GET", + headers=headers, + ) + + # support both string and bytes type response.data + response_body = ( + response.data.decode("utf-8") + if hasattr(response.data, "decode") + else response.data + ) + + if response.status != http_client.OK: + raise exceptions.RefreshError( + "Unable to retrieve AWS security credentials: {}".format(response_body) + ) + + credentials_response = json.loads(response_body) + + return credentials_response + + def _get_metadata_role_name(self, request, imdsv2_session_token): + """Retrieves the AWS role currently attached to the current AWS + workload by querying the AWS metadata server. This is needed for the + AWS metadata server security credentials endpoint in order to retrieve + the AWS security credentials needed to sign requests to AWS APIs. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + imdsv2_session_token (str): The AWS IMDSv2 session token to be added as a + header in the requests to AWS metadata endpoint. + + Returns: + str: The AWS role name. + + Raises: + google.auth.exceptions.RefreshError: If an error occurs while + retrieving the AWS role name. + """ + if self._security_credentials_url is None: + raise exceptions.RefreshError( + "Unable to determine the AWS metadata server security credentials endpoint" + ) + + headers = None + if imdsv2_session_token is not None: + headers = {"X-aws-ec2-metadata-token": imdsv2_session_token} + + response = request( + url=self._security_credentials_url, method="GET", headers=headers + ) + + # support both string and bytes type response.data + response_body = ( + response.data.decode("utf-8") + if hasattr(response.data, "decode") + else response.data + ) + + if response.status != http_client.OK: + raise exceptions.RefreshError( + "Unable to retrieve AWS role name {}".format(response_body) + ) + + return response_body + + +class Credentials(external_account.Credentials): + """AWS external account credentials. + This is used to exchange serialized AWS signature v4 signed requests to + AWS STS GetCallerIdentity service for Google access tokens. + """ + + def __init__( + self, + audience, + subject_token_type, + token_url=external_account._DEFAULT_TOKEN_URL, + credential_source=None, + aws_security_credentials_supplier=None, + *args, + **kwargs + ): + """Instantiates an AWS workload external account credentials object. + + Args: + audience (str): The STS audience field. + subject_token_type (str): The subject token type based on the Oauth2.0 token exchange spec. + Expected values include:: + + “urn:ietf:params:aws:token-type:aws4_request” + + token_url (Optional [str]): The STS endpoint URL. If not provided, will default to "https://sts.googleapis.com/v1/token". + credential_source (Optional [Mapping]): The credential source dictionary used + to provide instructions on how to retrieve external credential to be exchanged for Google access tokens. + Either a credential source or an AWS security credentials supplier must be provided. + + Example credential_source for AWS credential:: + + { + "environment_id": "aws1", + "regional_cred_verification_url": "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15", + "region_url": "http://169.254.169.254/latest/meta-data/placement/availability-zone", + "url": "http://169.254.169.254/latest/meta-data/iam/security-credentials", + imdsv2_session_token_url": "http://169.254.169.254/latest/api/token" + } + + aws_security_credentials_supplier (Optional [AwsSecurityCredentialsSupplier]): Optional AWS security credentials supplier. + This will be called to supply valid AWS security credentails which will then + be exchanged for Google access tokens. Either an AWS security credentials supplier + or a credential source must be provided. + args (List): Optional positional arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method. + kwargs (Mapping): Optional keyword arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method. + + Raises: + google.auth.exceptions.RefreshError: If an error is encountered during + access token retrieval logic. + ValueError: For invalid parameters. + + .. note:: Typically one of the helper constructors + :meth:`from_file` or + :meth:`from_info` are used instead of calling the constructor directly. + """ + super(Credentials, self).__init__( + audience=audience, + subject_token_type=subject_token_type, + token_url=token_url, + credential_source=credential_source, + *args, + **kwargs + ) + if credential_source is None and aws_security_credentials_supplier is None: + raise exceptions.InvalidValue( + "A valid credential source or AWS security credentials supplier must be provided." + ) + if ( + credential_source is not None + and aws_security_credentials_supplier is not None + ): + raise exceptions.InvalidValue( + "AWS credential cannot have both a credential source and an AWS security credentials supplier." + ) + + if aws_security_credentials_supplier: + self._aws_security_credentials_supplier = aws_security_credentials_supplier + # The regional cred verification URL would normally be provided through the credential source. So set it to the default one here. + self._cred_verification_url = ( + _DEFAULT_AWS_REGIONAL_CREDENTIAL_VERIFICATION_URL + ) + else: + environment_id = credential_source.get("environment_id") or "" + self._aws_security_credentials_supplier = _DefaultAwsSecurityCredentialsSupplier( + credential_source + ) + self._cred_verification_url = credential_source.get( + "regional_cred_verification_url" + ) + + # Get the environment ID, i.e. "aws1". Currently, only one version supported (1). + matches = re.match(r"^(aws)([\d]+)$", environment_id) + if matches: + env_id, env_version = matches.groups() + else: + env_id, env_version = (None, None) + + if env_id != "aws" or self._cred_verification_url is None: + raise exceptions.InvalidResource( + "No valid AWS 'credential_source' provided" + ) + elif env_version is None or int(env_version) != 1: + raise exceptions.InvalidValue( + "aws version '{}' is not supported in the current build.".format( + env_version + ) + ) + + self._target_resource = audience + self._request_signer = None + + def retrieve_subject_token(self, request): + """Retrieves the subject token using the credential_source object. + The subject token is a serialized `AWS GetCallerIdentity signed request`_. + + The logic is summarized as: + + Retrieve the AWS region from the AWS_REGION or AWS_DEFAULT_REGION + environment variable or from the AWS metadata server availability-zone + if not found in the environment variable. + + Check AWS credentials in environment variables. If not found, retrieve + from the AWS metadata server security-credentials endpoint. + + When retrieving AWS credentials from the metadata server + security-credentials endpoint, the AWS role needs to be determined by + calling the security-credentials endpoint without any argument. Then the + credentials can be retrieved via: security-credentials/role_name + + Generate the signed request to AWS STS GetCallerIdentity action. + + Inject x-goog-cloud-target-resource into header and serialize the + signed request. This will be the subject-token to pass to GCP STS. + + .. _AWS GetCallerIdentity signed request: + https://cloud.google.com/iam/docs/access-resources-aws#exchange-token + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + Returns: + str: The retrieved subject token. + """ + + # Initialize the request signer if not yet initialized after determining + # the current AWS region. + if self._request_signer is None: + self._region = self._aws_security_credentials_supplier.get_aws_region( + self._supplier_context, request + ) + self._request_signer = RequestSigner(self._region) + + # Retrieve the AWS security credentials needed to generate the signed + # request. + aws_security_credentials = self._aws_security_credentials_supplier.get_aws_security_credentials( + self._supplier_context, request + ) + # Generate the signed request to AWS STS GetCallerIdentity API. + # Use the required regional endpoint. Otherwise, the request will fail. + request_options = self._request_signer.get_request_options( + aws_security_credentials, + self._cred_verification_url.replace("{region}", self._region), + "POST", + ) + # The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + request_headers = request_options.get("headers") + # The full, canonical resource name of the workload identity pool + # provider, with or without the HTTPS prefix. + # Including this header as part of the signature is recommended to + # ensure data integrity. + request_headers["x-goog-cloud-target-resource"] = self._target_resource + + # Serialize AWS signed request. + aws_signed_req = {} + aws_signed_req["url"] = request_options.get("url") + aws_signed_req["method"] = request_options.get("method") + aws_signed_req["headers"] = [] + # Reformat header to GCP STS expected format. + for key in request_headers.keys(): + aws_signed_req["headers"].append( + {"key": key, "value": request_headers[key]} + ) + + return urllib.parse.quote( + json.dumps(aws_signed_req, separators=(",", ":"), sort_keys=True) + ) + + def _create_default_metrics_options(self): + metrics_options = super(Credentials, self)._create_default_metrics_options() + metrics_options["source"] = "aws" + if self._has_custom_supplier(): + metrics_options["source"] = "programmatic" + return metrics_options + + def _has_custom_supplier(self): + return self._credential_source is None + + def _constructor_args(self): + args = super(Credentials, self)._constructor_args() + # If a custom supplier was used, append it to the args dict. + if self._has_custom_supplier(): + args.update( + { + "aws_security_credentials_supplier": self._aws_security_credentials_supplier + } + ) + return args + + @classmethod + def from_info(cls, info, **kwargs): + """Creates an AWS Credentials instance from parsed external account info. + + Args: + info (Mapping[str, str]): The AWS external account info in Google + format. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.aws.Credentials: The constructed credentials. + + Raises: + ValueError: For invalid parameters. + """ + aws_security_credentials_supplier = info.get( + "aws_security_credentials_supplier" + ) + kwargs.update( + {"aws_security_credentials_supplier": aws_security_credentials_supplier} + ) + return super(Credentials, cls).from_info(info, **kwargs) + + @classmethod + def from_file(cls, filename, **kwargs): + """Creates an AWS Credentials instance from an external account json file. + + Args: + filename (str): The path to the AWS external account json file. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.aws.Credentials: The constructed credentials. + """ + return super(Credentials, cls).from_file(filename, **kwargs) diff --git a/.venv/lib/python3.12/site-packages/google/auth/compute_engine/__init__.py b/.venv/lib/python3.12/site-packages/google/auth/compute_engine/__init__.py new file mode 100644 index 00000000..7e1206fc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/compute_engine/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Compute Engine authentication.""" + +from google.auth.compute_engine._metadata import detect_gce_residency_linux +from google.auth.compute_engine.credentials import Credentials +from google.auth.compute_engine.credentials import IDTokenCredentials + + +__all__ = ["Credentials", "IDTokenCredentials", "detect_gce_residency_linux"] diff --git a/.venv/lib/python3.12/site-packages/google/auth/compute_engine/_metadata.py b/.venv/lib/python3.12/site-packages/google/auth/compute_engine/_metadata.py new file mode 100644 index 00000000..06f99de0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/compute_engine/_metadata.py @@ -0,0 +1,375 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides helper methods for talking to the Compute Engine metadata server. + +See https://cloud.google.com/compute/docs/metadata for more details. +""" + +import datetime +import http.client as http_client +import json +import logging +import os +from urllib.parse import urljoin + +from google.auth import _helpers +from google.auth import environment_vars +from google.auth import exceptions +from google.auth import metrics +from google.auth import transport +from google.auth._exponential_backoff import ExponentialBackoff + +_LOGGER = logging.getLogger(__name__) + +# Environment variable GCE_METADATA_HOST is originally named +# GCE_METADATA_ROOT. For compatibility reasons, here it checks +# the new variable first; if not set, the system falls back +# to the old variable. +_GCE_METADATA_HOST = os.getenv(environment_vars.GCE_METADATA_HOST, None) +if not _GCE_METADATA_HOST: + _GCE_METADATA_HOST = os.getenv( + environment_vars.GCE_METADATA_ROOT, "metadata.google.internal" + ) +_METADATA_ROOT = "http://{}/computeMetadata/v1/".format(_GCE_METADATA_HOST) + +# This is used to ping the metadata server, it avoids the cost of a DNS +# lookup. +_METADATA_IP_ROOT = "http://{}".format( + os.getenv(environment_vars.GCE_METADATA_IP, "169.254.169.254") +) +_METADATA_FLAVOR_HEADER = "metadata-flavor" +_METADATA_FLAVOR_VALUE = "Google" +_METADATA_HEADERS = {_METADATA_FLAVOR_HEADER: _METADATA_FLAVOR_VALUE} + +# Timeout in seconds to wait for the GCE metadata server when detecting the +# GCE environment. +try: + _METADATA_DEFAULT_TIMEOUT = int(os.getenv("GCE_METADATA_TIMEOUT", 3)) +except ValueError: # pragma: NO COVER + _METADATA_DEFAULT_TIMEOUT = 3 + +# Detect GCE Residency +_GOOGLE = "Google" +_GCE_PRODUCT_NAME_FILE = "/sys/class/dmi/id/product_name" + + +def is_on_gce(request): + """Checks to see if the code runs on Google Compute Engine + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + + Returns: + bool: True if the code runs on Google Compute Engine, False otherwise. + """ + if ping(request): + return True + + if os.name == "nt": + # TODO: implement GCE residency detection on Windows + return False + + # Detect GCE residency on Linux + return detect_gce_residency_linux() + + +def detect_gce_residency_linux(): + """Detect Google Compute Engine residency by smbios check on Linux + + Returns: + bool: True if the GCE product name file is detected, False otherwise. + """ + try: + with open(_GCE_PRODUCT_NAME_FILE, "r") as file_obj: + content = file_obj.read().strip() + + except Exception: + return False + + return content.startswith(_GOOGLE) + + +def ping(request, timeout=_METADATA_DEFAULT_TIMEOUT, retry_count=3): + """Checks to see if the metadata server is available. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + timeout (int): How long to wait for the metadata server to respond. + retry_count (int): How many times to attempt connecting to metadata + server using above timeout. + + Returns: + bool: True if the metadata server is reachable, False otherwise. + """ + # NOTE: The explicit ``timeout`` is a workaround. The underlying + # issue is that resolving an unknown host on some networks will take + # 20-30 seconds; making this timeout short fixes the issue, but + # could lead to false negatives in the event that we are on GCE, but + # the metadata resolution was particularly slow. The latter case is + # "unlikely". + headers = _METADATA_HEADERS.copy() + headers[metrics.API_CLIENT_HEADER] = metrics.mds_ping() + + backoff = ExponentialBackoff(total_attempts=retry_count) + + for attempt in backoff: + try: + response = request( + url=_METADATA_IP_ROOT, method="GET", headers=headers, timeout=timeout + ) + + metadata_flavor = response.headers.get(_METADATA_FLAVOR_HEADER) + return ( + response.status == http_client.OK + and metadata_flavor == _METADATA_FLAVOR_VALUE + ) + + except exceptions.TransportError as e: + _LOGGER.warning( + "Compute Engine Metadata server unavailable on " + "attempt %s of %s. Reason: %s", + attempt, + retry_count, + e, + ) + + return False + + +def get( + request, + path, + root=_METADATA_ROOT, + params=None, + recursive=False, + retry_count=5, + headers=None, + return_none_for_not_found_error=False, +): + """Fetch a resource from the metadata server. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + path (str): The resource to retrieve. For example, + ``'instance/service-accounts/default'``. + root (str): The full path to the metadata server root. + params (Optional[Mapping[str, str]]): A mapping of query parameter + keys to values. + recursive (bool): Whether to do a recursive query of metadata. See + https://cloud.google.com/compute/docs/metadata#aggcontents for more + details. + retry_count (int): How many times to attempt connecting to metadata + server using above timeout. + headers (Optional[Mapping[str, str]]): Headers for the request. + return_none_for_not_found_error (Optional[bool]): If True, returns None + for 404 error instead of throwing an exception. + + Returns: + Union[Mapping, str]: If the metadata server returns JSON, a mapping of + the decoded JSON is returned. Otherwise, the response content is + returned as a string. + + Raises: + google.auth.exceptions.TransportError: if an error occurred while + retrieving metadata. + """ + base_url = urljoin(root, path) + query_params = {} if params is None else params + + headers_to_use = _METADATA_HEADERS.copy() + if headers: + headers_to_use.update(headers) + + if recursive: + query_params["recursive"] = "true" + + url = _helpers.update_query(base_url, query_params) + + backoff = ExponentialBackoff(total_attempts=retry_count) + failure_reason = None + for attempt in backoff: + try: + response = request(url=url, method="GET", headers=headers_to_use) + if response.status in transport.DEFAULT_RETRYABLE_STATUS_CODES: + _LOGGER.warning( + "Compute Engine Metadata server unavailable on " + "attempt %s of %s. Response status: %s", + attempt, + retry_count, + response.status, + ) + failure_reason = ( + response.data.decode("utf-8") + if hasattr(response.data, "decode") + else response.data + ) + continue + else: + break + + except exceptions.TransportError as e: + _LOGGER.warning( + "Compute Engine Metadata server unavailable on " + "attempt %s of %s. Reason: %s", + attempt, + retry_count, + e, + ) + failure_reason = e + else: + raise exceptions.TransportError( + "Failed to retrieve {} from the Google Compute Engine " + "metadata service. Compute Engine Metadata server unavailable due to {}".format( + url, failure_reason + ) + ) + + content = _helpers.from_bytes(response.data) + + if response.status == http_client.NOT_FOUND and return_none_for_not_found_error: + return None + + if response.status == http_client.OK: + if ( + _helpers.parse_content_type(response.headers["content-type"]) + == "application/json" + ): + try: + return json.loads(content) + except ValueError as caught_exc: + new_exc = exceptions.TransportError( + "Received invalid JSON from the Google Compute Engine " + "metadata service: {:.20}".format(content) + ) + raise new_exc from caught_exc + else: + return content + + raise exceptions.TransportError( + "Failed to retrieve {} from the Google Compute Engine " + "metadata service. Status: {} Response:\n{}".format( + url, response.status, response.data + ), + response, + ) + + +def get_project_id(request): + """Get the Google Cloud Project ID from the metadata server. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + + Returns: + str: The project ID + + Raises: + google.auth.exceptions.TransportError: if an error occurred while + retrieving metadata. + """ + return get(request, "project/project-id") + + +def get_universe_domain(request): + """Get the universe domain value from the metadata server. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + + Returns: + str: The universe domain value. If the universe domain endpoint is not + not found, return the default value, which is googleapis.com + + Raises: + google.auth.exceptions.TransportError: if an error other than + 404 occurs while retrieving metadata. + """ + universe_domain = get( + request, "universe/universe-domain", return_none_for_not_found_error=True + ) + if not universe_domain: + return "googleapis.com" + return universe_domain + + +def get_service_account_info(request, service_account="default"): + """Get information about a service account from the metadata server. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + service_account (str): The string 'default' or a service account email + address. The determines which service account for which to acquire + information. + + Returns: + Mapping: The service account's information, for example:: + + { + 'email': '...', + 'scopes': ['scope', ...], + 'aliases': ['default', '...'] + } + + Raises: + google.auth.exceptions.TransportError: if an error occurred while + retrieving metadata. + """ + path = "instance/service-accounts/{0}/".format(service_account) + # See https://cloud.google.com/compute/docs/metadata#aggcontents + # for more on the use of 'recursive'. + return get(request, path, params={"recursive": "true"}) + + +def get_service_account_token(request, service_account="default", scopes=None): + """Get the OAuth 2.0 access token for a service account. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + service_account (str): The string 'default' or a service account email + address. The determines which service account for which to acquire + an access token. + scopes (Optional[Union[str, List[str]]]): Optional string or list of + strings with auth scopes. + Returns: + Tuple[str, datetime]: The access token and its expiration. + + Raises: + google.auth.exceptions.TransportError: if an error occurred while + retrieving metadata. + """ + if scopes: + if not isinstance(scopes, str): + scopes = ",".join(scopes) + params = {"scopes": scopes} + else: + params = None + + metrics_header = { + metrics.API_CLIENT_HEADER: metrics.token_request_access_token_mds() + } + + path = "instance/service-accounts/{0}/token".format(service_account) + token_json = get(request, path, params=params, headers=metrics_header) + token_expiry = _helpers.utcnow() + datetime.timedelta( + seconds=token_json["expires_in"] + ) + return token_json["access_token"], token_expiry diff --git a/.venv/lib/python3.12/site-packages/google/auth/compute_engine/credentials.py b/.venv/lib/python3.12/site-packages/google/auth/compute_engine/credentials.py new file mode 100644 index 00000000..f0126c0a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/compute_engine/credentials.py @@ -0,0 +1,496 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Compute Engine credentials. + +This module provides authentication for an application running on Google +Compute Engine using the Compute Engine metadata server. + +""" + +import datetime + +from google.auth import _helpers +from google.auth import credentials +from google.auth import exceptions +from google.auth import iam +from google.auth import jwt +from google.auth import metrics +from google.auth.compute_engine import _metadata +from google.oauth2 import _client + + +class Credentials( + credentials.Scoped, + credentials.CredentialsWithQuotaProject, + credentials.CredentialsWithUniverseDomain, +): + """Compute Engine Credentials. + + These credentials use the Google Compute Engine metadata server to obtain + OAuth 2.0 access tokens associated with the instance's service account, + and are also used for Cloud Run, Flex and App Engine (except for the Python + 2.7 runtime, which is supported only on older versions of this library). + + For more information about Compute Engine authentication, including how + to configure scopes, see the `Compute Engine authentication + documentation`_. + + .. note:: On Compute Engine the metadata server ignores requested scopes. + On Cloud Run, Flex and App Engine the server honours requested scopes. + + .. _Compute Engine authentication documentation: + https://cloud.google.com/compute/docs/authentication#using + """ + + def __init__( + self, + service_account_email="default", + quota_project_id=None, + scopes=None, + default_scopes=None, + universe_domain=None, + ): + """ + Args: + service_account_email (str): The service account email to use, or + 'default'. A Compute Engine instance may have multiple service + accounts. + quota_project_id (Optional[str]): The project ID used for quota and + billing. + scopes (Optional[Sequence[str]]): The list of scopes for the credentials. + default_scopes (Optional[Sequence[str]]): Default scopes passed by a + Google client library. Use 'scopes' for user-defined scopes. + universe_domain (Optional[str]): The universe domain. If not + provided or None, credential will attempt to fetch the value + from metadata server. If metadata server doesn't have universe + domain endpoint, then the default googleapis.com will be used. + """ + super(Credentials, self).__init__() + self._service_account_email = service_account_email + self._quota_project_id = quota_project_id + self._scopes = scopes + self._default_scopes = default_scopes + self._universe_domain_cached = False + if universe_domain: + self._universe_domain = universe_domain + self._universe_domain_cached = True + + def _retrieve_info(self, request): + """Retrieve information about the service account. + + Updates the scopes and retrieves the full service account email. + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + """ + info = _metadata.get_service_account_info( + request, service_account=self._service_account_email + ) + + self._service_account_email = info["email"] + + # Don't override scopes requested by the user. + if self._scopes is None: + self._scopes = info["scopes"] + + def _metric_header_for_usage(self): + return metrics.CRED_TYPE_SA_MDS + + def refresh(self, request): + """Refresh the access token and scopes. + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + + Raises: + google.auth.exceptions.RefreshError: If the Compute Engine metadata + service can't be reached if if the instance has not + credentials. + """ + scopes = self._scopes if self._scopes is not None else self._default_scopes + try: + self._retrieve_info(request) + self.token, self.expiry = _metadata.get_service_account_token( + request, service_account=self._service_account_email, scopes=scopes + ) + except exceptions.TransportError as caught_exc: + new_exc = exceptions.RefreshError(caught_exc) + raise new_exc from caught_exc + + @property + def service_account_email(self): + """The service account email. + + .. note:: This is not guaranteed to be set until :meth:`refresh` has been + called. + """ + return self._service_account_email + + @property + def requires_scopes(self): + return not self._scopes + + @property + def universe_domain(self): + if self._universe_domain_cached: + return self._universe_domain + + from google.auth.transport import requests as google_auth_requests + + self._universe_domain = _metadata.get_universe_domain( + google_auth_requests.Request() + ) + self._universe_domain_cached = True + return self._universe_domain + + @_helpers.copy_docstring(credentials.Credentials) + def get_cred_info(self): + return { + "credential_source": "metadata server", + "credential_type": "VM credentials", + "principal": self.service_account_email, + } + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + creds = self.__class__( + service_account_email=self._service_account_email, + quota_project_id=quota_project_id, + scopes=self._scopes, + default_scopes=self._default_scopes, + ) + creds._universe_domain = self._universe_domain + creds._universe_domain_cached = self._universe_domain_cached + return creds + + @_helpers.copy_docstring(credentials.Scoped) + def with_scopes(self, scopes, default_scopes=None): + # Compute Engine credentials can not be scoped (the metadata service + # ignores the scopes parameter). App Engine, Cloud Run and Flex support + # requesting scopes. + creds = self.__class__( + scopes=scopes, + default_scopes=default_scopes, + service_account_email=self._service_account_email, + quota_project_id=self._quota_project_id, + ) + creds._universe_domain = self._universe_domain + creds._universe_domain_cached = self._universe_domain_cached + return creds + + @_helpers.copy_docstring(credentials.CredentialsWithUniverseDomain) + def with_universe_domain(self, universe_domain): + return self.__class__( + scopes=self._scopes, + default_scopes=self._default_scopes, + service_account_email=self._service_account_email, + quota_project_id=self._quota_project_id, + universe_domain=universe_domain, + ) + + +_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds +_DEFAULT_TOKEN_URI = "https://www.googleapis.com/oauth2/v4/token" + + +class IDTokenCredentials( + credentials.CredentialsWithQuotaProject, + credentials.Signing, + credentials.CredentialsWithTokenUri, +): + """Open ID Connect ID Token-based service account credentials. + + These credentials relies on the default service account of a GCE instance. + + ID token can be requested from `GCE metadata server identity endpoint`_, IAM + token endpoint or other token endpoints you specify. If metadata server + identity endpoint is not used, the GCE instance must have been started with + a service account that has access to the IAM Cloud API. + + .. _GCE metadata server identity endpoint: + https://cloud.google.com/compute/docs/instances/verifying-instance-identity + """ + + def __init__( + self, + request, + target_audience, + token_uri=None, + additional_claims=None, + service_account_email=None, + signer=None, + use_metadata_identity_endpoint=False, + quota_project_id=None, + ): + """ + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + target_audience (str): The intended audience for these credentials, + used when requesting the ID Token. The ID Token's ``aud`` claim + will be set to this string. + token_uri (str): The OAuth 2.0 Token URI. + additional_claims (Mapping[str, str]): Any additional claims for + the JWT assertion used in the authorization grant. + service_account_email (str): Optional explicit service account to + use to sign JWT tokens. + By default, this is the default GCE service account. + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + In case the signer is specified, the request argument will be + ignored. + use_metadata_identity_endpoint (bool): Whether to use GCE metadata + identity endpoint. For backward compatibility the default value + is False. If set to True, ``token_uri``, ``additional_claims``, + ``service_account_email``, ``signer`` argument should not be set; + otherwise ValueError will be raised. + quota_project_id (Optional[str]): The project ID used for quota and + billing. + + Raises: + ValueError: + If ``use_metadata_identity_endpoint`` is set to True, and one of + ``token_uri``, ``additional_claims``, ``service_account_email``, + ``signer`` arguments is set. + """ + super(IDTokenCredentials, self).__init__() + + self._quota_project_id = quota_project_id + self._use_metadata_identity_endpoint = use_metadata_identity_endpoint + self._target_audience = target_audience + + if use_metadata_identity_endpoint: + if token_uri or additional_claims or service_account_email or signer: + raise exceptions.MalformedError( + "If use_metadata_identity_endpoint is set, token_uri, " + "additional_claims, service_account_email, signer arguments" + " must not be set" + ) + self._token_uri = None + self._additional_claims = None + self._signer = None + + if service_account_email is None: + sa_info = _metadata.get_service_account_info(request) + self._service_account_email = sa_info["email"] + else: + self._service_account_email = service_account_email + + if not use_metadata_identity_endpoint: + if signer is None: + signer = iam.Signer( + request=request, + credentials=Credentials(), + service_account_email=self._service_account_email, + ) + self._signer = signer + self._token_uri = token_uri or _DEFAULT_TOKEN_URI + + if additional_claims is not None: + self._additional_claims = additional_claims + else: + self._additional_claims = {} + + def with_target_audience(self, target_audience): + """Create a copy of these credentials with the specified target + audience. + Args: + target_audience (str): The intended audience for these credentials, + used when requesting the ID Token. + Returns: + google.auth.service_account.IDTokenCredentials: A new credentials + instance. + """ + # since the signer is already instantiated, + # the request is not needed + if self._use_metadata_identity_endpoint: + return self.__class__( + None, + target_audience=target_audience, + use_metadata_identity_endpoint=True, + quota_project_id=self._quota_project_id, + ) + else: + return self.__class__( + None, + service_account_email=self._service_account_email, + token_uri=self._token_uri, + target_audience=target_audience, + additional_claims=self._additional_claims.copy(), + signer=self.signer, + use_metadata_identity_endpoint=False, + quota_project_id=self._quota_project_id, + ) + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + + # since the signer is already instantiated, + # the request is not needed + if self._use_metadata_identity_endpoint: + return self.__class__( + None, + target_audience=self._target_audience, + use_metadata_identity_endpoint=True, + quota_project_id=quota_project_id, + ) + else: + return self.__class__( + None, + service_account_email=self._service_account_email, + token_uri=self._token_uri, + target_audience=self._target_audience, + additional_claims=self._additional_claims.copy(), + signer=self.signer, + use_metadata_identity_endpoint=False, + quota_project_id=quota_project_id, + ) + + @_helpers.copy_docstring(credentials.CredentialsWithTokenUri) + def with_token_uri(self, token_uri): + + # since the signer is already instantiated, + # the request is not needed + if self._use_metadata_identity_endpoint: + raise exceptions.MalformedError( + "If use_metadata_identity_endpoint is set, token_uri" " must not be set" + ) + else: + return self.__class__( + None, + service_account_email=self._service_account_email, + token_uri=token_uri, + target_audience=self._target_audience, + additional_claims=self._additional_claims.copy(), + signer=self.signer, + use_metadata_identity_endpoint=False, + quota_project_id=self.quota_project_id, + ) + + def _make_authorization_grant_assertion(self): + """Create the OAuth 2.0 assertion. + This assertion is used during the OAuth 2.0 grant to acquire an + ID token. + Returns: + bytes: The authorization grant assertion. + """ + now = _helpers.utcnow() + lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS) + expiry = now + lifetime + + payload = { + "iat": _helpers.datetime_to_secs(now), + "exp": _helpers.datetime_to_secs(expiry), + # The issuer must be the service account email. + "iss": self.service_account_email, + # The audience must be the auth token endpoint's URI + "aud": self._token_uri, + # The target audience specifies which service the ID token is + # intended for. + "target_audience": self._target_audience, + } + + payload.update(self._additional_claims) + + token = jwt.encode(self._signer, payload) + + return token + + def _call_metadata_identity_endpoint(self, request): + """Request ID token from metadata identity endpoint. + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + + Returns: + Tuple[str, datetime.datetime]: The ID token and the expiry of the ID token. + + Raises: + google.auth.exceptions.RefreshError: If the Compute Engine metadata + service can't be reached or if the instance has no credentials. + ValueError: If extracting expiry from the obtained ID token fails. + """ + try: + path = "instance/service-accounts/default/identity" + params = {"audience": self._target_audience, "format": "full"} + metrics_header = { + metrics.API_CLIENT_HEADER: metrics.token_request_id_token_mds() + } + id_token = _metadata.get( + request, path, params=params, headers=metrics_header + ) + except exceptions.TransportError as caught_exc: + new_exc = exceptions.RefreshError(caught_exc) + raise new_exc from caught_exc + + _, payload, _, _ = jwt._unverified_decode(id_token) + return id_token, datetime.datetime.utcfromtimestamp(payload["exp"]) + + def refresh(self, request): + """Refreshes the ID token. + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + + Raises: + google.auth.exceptions.RefreshError: If the credentials could + not be refreshed. + ValueError: If extracting expiry from the obtained ID token fails. + """ + if self._use_metadata_identity_endpoint: + self.token, self.expiry = self._call_metadata_identity_endpoint(request) + else: + assertion = self._make_authorization_grant_assertion() + access_token, expiry, _ = _client.id_token_jwt_grant( + request, self._token_uri, assertion + ) + self.token = access_token + self.expiry = expiry + + @property # type: ignore + @_helpers.copy_docstring(credentials.Signing) + def signer(self): + return self._signer + + def sign_bytes(self, message): + """Signs the given message. + + Args: + message (bytes): The message to sign. + + Returns: + bytes: The message's cryptographic signature. + + Raises: + ValueError: + Signer is not available if metadata identity endpoint is used. + """ + if self._use_metadata_identity_endpoint: + raise exceptions.InvalidOperation( + "Signer is not available if metadata identity endpoint is used" + ) + return self._signer.sign(message) + + @property + def service_account_email(self): + """The service account email.""" + return self._service_account_email + + @property + def signer_email(self): + return self._service_account_email diff --git a/.venv/lib/python3.12/site-packages/google/auth/credentials.py b/.venv/lib/python3.12/site-packages/google/auth/credentials.py new file mode 100644 index 00000000..2c67e044 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/credentials.py @@ -0,0 +1,522 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Interfaces for credentials.""" + +import abc +from enum import Enum +import os + +from google.auth import _helpers, environment_vars +from google.auth import exceptions +from google.auth import metrics +from google.auth._credentials_base import _BaseCredentials +from google.auth._refresh_worker import RefreshThreadManager + +DEFAULT_UNIVERSE_DOMAIN = "googleapis.com" + + +class Credentials(_BaseCredentials): + """Base class for all credentials. + + All credentials have a :attr:`token` that is used for authentication and + may also optionally set an :attr:`expiry` to indicate when the token will + no longer be valid. + + Most credentials will be :attr:`invalid` until :meth:`refresh` is called. + Credentials can do this automatically before the first HTTP request in + :meth:`before_request`. + + Although the token and expiration will change as the credentials are + :meth:`refreshed <refresh>` and used, credentials should be considered + immutable. Various credentials will accept configuration such as private + keys, scopes, and other options. These options are not changeable after + construction. Some classes will provide mechanisms to copy the credentials + with modifications such as :meth:`ScopedCredentials.with_scopes`. + """ + + def __init__(self): + super(Credentials, self).__init__() + + self.expiry = None + """Optional[datetime]: When the token expires and is no longer valid. + If this is None, the token is assumed to never expire.""" + self._quota_project_id = None + """Optional[str]: Project to use for quota and billing purposes.""" + self._trust_boundary = None + """Optional[dict]: Cache of a trust boundary response which has a list + of allowed regions and an encoded string representation of credentials + trust boundary.""" + self._universe_domain = DEFAULT_UNIVERSE_DOMAIN + """Optional[str]: The universe domain value, default is googleapis.com + """ + + self._use_non_blocking_refresh = False + self._refresh_worker = RefreshThreadManager() + + @property + def expired(self): + """Checks if the credentials are expired. + + Note that credentials can be invalid but not expired because + Credentials with :attr:`expiry` set to None is considered to never + expire. + + .. deprecated:: v2.24.0 + Prefer checking :attr:`token_state` instead. + """ + if not self.expiry: + return False + # Remove some threshold from expiry to err on the side of reporting + # expiration early so that we avoid the 401-refresh-retry loop. + skewed_expiry = self.expiry - _helpers.REFRESH_THRESHOLD + return _helpers.utcnow() >= skewed_expiry + + @property + def valid(self): + """Checks the validity of the credentials. + + This is True if the credentials have a :attr:`token` and the token + is not :attr:`expired`. + + .. deprecated:: v2.24.0 + Prefer checking :attr:`token_state` instead. + """ + return self.token is not None and not self.expired + + @property + def token_state(self): + """ + See `:obj:`TokenState` + """ + if self.token is None: + return TokenState.INVALID + + # Credentials that can't expire are always treated as fresh. + if self.expiry is None: + return TokenState.FRESH + + expired = _helpers.utcnow() >= self.expiry + if expired: + return TokenState.INVALID + + is_stale = _helpers.utcnow() >= (self.expiry - _helpers.REFRESH_THRESHOLD) + if is_stale: + return TokenState.STALE + + return TokenState.FRESH + + @property + def quota_project_id(self): + """Project to use for quota and billing purposes.""" + return self._quota_project_id + + @property + def universe_domain(self): + """The universe domain value.""" + return self._universe_domain + + def get_cred_info(self): + """The credential information JSON. + + The credential information will be added to auth related error messages + by client library. + + Returns: + Mapping[str, str]: The credential information JSON. + """ + return None + + @abc.abstractmethod + def refresh(self, request): + """Refreshes the access token. + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + + Raises: + google.auth.exceptions.RefreshError: If the credentials could + not be refreshed. + """ + # pylint: disable=missing-raises-doc + # (pylint doesn't recognize that this is abstract) + raise NotImplementedError("Refresh must be implemented") + + def _metric_header_for_usage(self): + """The x-goog-api-client header for token usage metric. + + This header will be added to the API service requests in before_request + method. For example, "cred-type/sa-jwt" means service account self + signed jwt access token is used in the API service request + authorization header. Children credentials classes need to override + this method to provide the header value, if the token usage metric is + needed. + + Returns: + str: The x-goog-api-client header value. + """ + return None + + def apply(self, headers, token=None): + """Apply the token to the authentication header. + + Args: + headers (Mapping): The HTTP request headers. + token (Optional[str]): If specified, overrides the current access + token. + """ + self._apply(headers, token=token) + """Trust boundary value will be a cached value from global lookup. + + The response of trust boundary will be a list of regions and a hex + encoded representation. + + An example of global lookup response: + { + "locations": [ + "us-central1", "us-east1", "europe-west1", "asia-east1" + ] + "encoded_locations": "0xA30" + } + """ + if self._trust_boundary is not None: + headers["x-allowed-locations"] = self._trust_boundary["encoded_locations"] + if self.quota_project_id: + headers["x-goog-user-project"] = self.quota_project_id + + def _blocking_refresh(self, request): + if not self.valid: + self.refresh(request) + + def _non_blocking_refresh(self, request): + use_blocking_refresh_fallback = False + + if self.token_state == TokenState.STALE: + use_blocking_refresh_fallback = not self._refresh_worker.start_refresh( + self, request + ) + + if self.token_state == TokenState.INVALID or use_blocking_refresh_fallback: + self.refresh(request) + # If the blocking refresh succeeds then we can clear the error info + # on the background refresh worker, and perform refreshes in a + # background thread. + self._refresh_worker.clear_error() + + def before_request(self, request, method, url, headers): + """Performs credential-specific before request logic. + + Refreshes the credentials if necessary, then calls :meth:`apply` to + apply the token to the authentication header. + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + method (str): The request's HTTP method or the RPC method being + invoked. + url (str): The request's URI or the RPC service's URI. + headers (Mapping): The request's headers. + """ + # pylint: disable=unused-argument + # (Subclasses may use these arguments to ascertain information about + # the http request.) + if self._use_non_blocking_refresh: + self._non_blocking_refresh(request) + else: + self._blocking_refresh(request) + + metrics.add_metric_header(headers, self._metric_header_for_usage()) + self.apply(headers) + + def with_non_blocking_refresh(self): + self._use_non_blocking_refresh = True + + +class CredentialsWithQuotaProject(Credentials): + """Abstract base for credentials supporting ``with_quota_project`` factory""" + + def with_quota_project(self, quota_project_id): + """Returns a copy of these credentials with a modified quota project. + + Args: + quota_project_id (str): The project to use for quota and + billing purposes + + Returns: + google.auth.credentials.Credentials: A new credentials instance. + """ + raise NotImplementedError("This credential does not support quota project.") + + def with_quota_project_from_environment(self): + quota_from_env = os.environ.get(environment_vars.GOOGLE_CLOUD_QUOTA_PROJECT) + if quota_from_env: + return self.with_quota_project(quota_from_env) + return self + + +class CredentialsWithTokenUri(Credentials): + """Abstract base for credentials supporting ``with_token_uri`` factory""" + + def with_token_uri(self, token_uri): + """Returns a copy of these credentials with a modified token uri. + + Args: + token_uri (str): The uri to use for fetching/exchanging tokens + + Returns: + google.auth.credentials.Credentials: A new credentials instance. + """ + raise NotImplementedError("This credential does not use token uri.") + + +class CredentialsWithUniverseDomain(Credentials): + """Abstract base for credentials supporting ``with_universe_domain`` factory""" + + def with_universe_domain(self, universe_domain): + """Returns a copy of these credentials with a modified universe domain. + + Args: + universe_domain (str): The universe domain to use + + Returns: + google.auth.credentials.Credentials: A new credentials instance. + """ + raise NotImplementedError( + "This credential does not support with_universe_domain." + ) + + +class AnonymousCredentials(Credentials): + """Credentials that do not provide any authentication information. + + These are useful in the case of services that support anonymous access or + local service emulators that do not use credentials. + """ + + @property + def expired(self): + """Returns `False`, anonymous credentials never expire.""" + return False + + @property + def valid(self): + """Returns `True`, anonymous credentials are always valid.""" + return True + + def refresh(self, request): + """Raises :class:``InvalidOperation``, anonymous credentials cannot be + refreshed.""" + raise exceptions.InvalidOperation("Anonymous credentials cannot be refreshed.") + + def apply(self, headers, token=None): + """Anonymous credentials do nothing to the request. + + The optional ``token`` argument is not supported. + + Raises: + google.auth.exceptions.InvalidValue: If a token was specified. + """ + if token is not None: + raise exceptions.InvalidValue("Anonymous credentials don't support tokens.") + + def before_request(self, request, method, url, headers): + """Anonymous credentials do nothing to the request.""" + + +class ReadOnlyScoped(metaclass=abc.ABCMeta): + """Interface for credentials whose scopes can be queried. + + OAuth 2.0-based credentials allow limiting access using scopes as described + in `RFC6749 Section 3.3`_. + If a credential class implements this interface then the credentials either + use scopes in their implementation. + + Some credentials require scopes in order to obtain a token. You can check + if scoping is necessary with :attr:`requires_scopes`:: + + if credentials.requires_scopes: + # Scoping is required. + credentials = credentials.with_scopes(scopes=['one', 'two']) + + Credentials that require scopes must either be constructed with scopes:: + + credentials = SomeScopedCredentials(scopes=['one', 'two']) + + Or must copy an existing instance using :meth:`with_scopes`:: + + scoped_credentials = credentials.with_scopes(scopes=['one', 'two']) + + Some credentials have scopes but do not allow or require scopes to be set, + these credentials can be used as-is. + + .. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3 + """ + + def __init__(self): + super(ReadOnlyScoped, self).__init__() + self._scopes = None + self._default_scopes = None + + @property + def scopes(self): + """Sequence[str]: the credentials' current set of scopes.""" + return self._scopes + + @property + def default_scopes(self): + """Sequence[str]: the credentials' current set of default scopes.""" + return self._default_scopes + + @abc.abstractproperty + def requires_scopes(self): + """True if these credentials require scopes to obtain an access token. + """ + return False + + def has_scopes(self, scopes): + """Checks if the credentials have the given scopes. + + .. warning: This method is not guaranteed to be accurate if the + credentials are :attr:`~Credentials.invalid`. + + Args: + scopes (Sequence[str]): The list of scopes to check. + + Returns: + bool: True if the credentials have the given scopes. + """ + credential_scopes = ( + self._scopes if self._scopes is not None else self._default_scopes + ) + return set(scopes).issubset(set(credential_scopes or [])) + + +class Scoped(ReadOnlyScoped): + """Interface for credentials whose scopes can be replaced while copying. + + OAuth 2.0-based credentials allow limiting access using scopes as described + in `RFC6749 Section 3.3`_. + If a credential class implements this interface then the credentials either + use scopes in their implementation. + + Some credentials require scopes in order to obtain a token. You can check + if scoping is necessary with :attr:`requires_scopes`:: + + if credentials.requires_scopes: + # Scoping is required. + credentials = credentials.create_scoped(['one', 'two']) + + Credentials that require scopes must either be constructed with scopes:: + + credentials = SomeScopedCredentials(scopes=['one', 'two']) + + Or must copy an existing instance using :meth:`with_scopes`:: + + scoped_credentials = credentials.with_scopes(scopes=['one', 'two']) + + Some credentials have scopes but do not allow or require scopes to be set, + these credentials can be used as-is. + + .. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3 + """ + + @abc.abstractmethod + def with_scopes(self, scopes, default_scopes=None): + """Create a copy of these credentials with the specified scopes. + + Args: + scopes (Sequence[str]): The list of scopes to attach to the + current credentials. + + Raises: + NotImplementedError: If the credentials' scopes can not be changed. + This can be avoided by checking :attr:`requires_scopes` before + calling this method. + """ + raise NotImplementedError("This class does not require scoping.") + + +def with_scopes_if_required(credentials, scopes, default_scopes=None): + """Creates a copy of the credentials with scopes if scoping is required. + + This helper function is useful when you do not know (or care to know) the + specific type of credentials you are using (such as when you use + :func:`google.auth.default`). This function will call + :meth:`Scoped.with_scopes` if the credentials are scoped credentials and if + the credentials require scoping. Otherwise, it will return the credentials + as-is. + + Args: + credentials (google.auth.credentials.Credentials): The credentials to + scope if necessary. + scopes (Sequence[str]): The list of scopes to use. + default_scopes (Sequence[str]): Default scopes passed by a + Google client library. Use 'scopes' for user-defined scopes. + + Returns: + google.auth.credentials.Credentials: Either a new set of scoped + credentials, or the passed in credentials instance if no scoping + was required. + """ + if isinstance(credentials, Scoped) and credentials.requires_scopes: + return credentials.with_scopes(scopes, default_scopes=default_scopes) + else: + return credentials + + +class Signing(metaclass=abc.ABCMeta): + """Interface for credentials that can cryptographically sign messages.""" + + @abc.abstractmethod + def sign_bytes(self, message): + """Signs the given message. + + Args: + message (bytes): The message to sign. + + Returns: + bytes: The message's cryptographic signature. + """ + # pylint: disable=missing-raises-doc,redundant-returns-doc + # (pylint doesn't recognize that this is abstract) + raise NotImplementedError("Sign bytes must be implemented.") + + @abc.abstractproperty + def signer_email(self): + """Optional[str]: An email address that identifies the signer.""" + # pylint: disable=missing-raises-doc + # (pylint doesn't recognize that this is abstract) + raise NotImplementedError("Signer email must be implemented.") + + @abc.abstractproperty + def signer(self): + """google.auth.crypt.Signer: The signer used to sign bytes.""" + # pylint: disable=missing-raises-doc + # (pylint doesn't recognize that this is abstract) + raise NotImplementedError("Signer must be implemented.") + + +class TokenState(Enum): + """ + Tracks the state of a token. + FRESH: The token is valid. It is not expired or close to expired, or the token has no expiry. + STALE: The token is close to expired, and should be refreshed. The token can be used normally. + INVALID: The token is expired or invalid. The token cannot be used for a normal operation. + """ + + FRESH = 1 + STALE = 2 + INVALID = 3 diff --git a/.venv/lib/python3.12/site-packages/google/auth/crypt/__init__.py b/.venv/lib/python3.12/site-packages/google/auth/crypt/__init__.py new file mode 100644 index 00000000..6d147e70 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/crypt/__init__.py @@ -0,0 +1,98 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cryptography helpers for verifying and signing messages. + +The simplest way to verify signatures is using :func:`verify_signature`:: + + cert = open('certs.pem').read() + valid = crypt.verify_signature(message, signature, cert) + +If you're going to verify many messages with the same certificate, you can use +:class:`RSAVerifier`:: + + cert = open('certs.pem').read() + verifier = crypt.RSAVerifier.from_string(cert) + valid = verifier.verify(message, signature) + +To sign messages use :class:`RSASigner` with a private key:: + + private_key = open('private_key.pem').read() + signer = crypt.RSASigner.from_string(private_key) + signature = signer.sign(message) + +The code above also works for :class:`ES256Signer` and :class:`ES256Verifier`. +Note that these two classes are only available if your `cryptography` dependency +version is at least 1.4.0. +""" + +from google.auth.crypt import base +from google.auth.crypt import rsa + +try: + from google.auth.crypt import es256 +except ImportError: # pragma: NO COVER + es256 = None # type: ignore + +if es256 is not None: # pragma: NO COVER + __all__ = [ + "ES256Signer", + "ES256Verifier", + "RSASigner", + "RSAVerifier", + "Signer", + "Verifier", + ] +else: # pragma: NO COVER + __all__ = ["RSASigner", "RSAVerifier", "Signer", "Verifier"] + + +# Aliases to maintain the v1.0.0 interface, as the crypt module was split +# into submodules. +Signer = base.Signer +Verifier = base.Verifier +RSASigner = rsa.RSASigner +RSAVerifier = rsa.RSAVerifier + +if es256 is not None: # pragma: NO COVER + ES256Signer = es256.ES256Signer + ES256Verifier = es256.ES256Verifier + + +def verify_signature(message, signature, certs, verifier_cls=rsa.RSAVerifier): + """Verify an RSA or ECDSA cryptographic signature. + + Checks that the provided ``signature`` was generated from ``bytes`` using + the private key associated with the ``cert``. + + Args: + message (Union[str, bytes]): The plaintext message. + signature (Union[str, bytes]): The cryptographic signature to check. + certs (Union[Sequence, str, bytes]): The certificate or certificates + to use to check the signature. + verifier_cls (Optional[~google.auth.crypt.base.Signer]): Which verifier + class to use for verification. This can be used to select different + algorithms, such as RSA or ECDSA. Default value is :class:`RSAVerifier`. + + Returns: + bool: True if the signature is valid, otherwise False. + """ + if isinstance(certs, (str, bytes)): + certs = [certs] + + for cert in certs: + verifier = verifier_cls.from_string(cert) + if verifier.verify(message, signature): + return True + return False diff --git a/.venv/lib/python3.12/site-packages/google/auth/crypt/_cryptography_rsa.py b/.venv/lib/python3.12/site-packages/google/auth/crypt/_cryptography_rsa.py new file mode 100644 index 00000000..1a3e9ff5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/crypt/_cryptography_rsa.py @@ -0,0 +1,151 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""RSA verifier and signer that use the ``cryptography`` library. + +This is a much faster implementation than the default (in +``google.auth.crypt._python_rsa``), which depends on the pure-Python +``rsa`` library. +""" + +import cryptography.exceptions +from cryptography.hazmat import backends +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import padding +import cryptography.x509 + +from google.auth import _helpers +from google.auth.crypt import base + +_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----" +_BACKEND = backends.default_backend() +_PADDING = padding.PKCS1v15() +_SHA256 = hashes.SHA256() + + +class RSAVerifier(base.Verifier): + """Verifies RSA cryptographic signatures using public keys. + + Args: + public_key ( + cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey): + The public key used to verify signatures. + """ + + def __init__(self, public_key): + self._pubkey = public_key + + @_helpers.copy_docstring(base.Verifier) + def verify(self, message, signature): + message = _helpers.to_bytes(message) + try: + self._pubkey.verify(signature, message, _PADDING, _SHA256) + return True + except (ValueError, cryptography.exceptions.InvalidSignature): + return False + + @classmethod + def from_string(cls, public_key): + """Construct an Verifier instance from a public key or public + certificate string. + + Args: + public_key (Union[str, bytes]): The public key in PEM format or the + x509 public key certificate. + + Returns: + Verifier: The constructed verifier. + + Raises: + ValueError: If the public key can't be parsed. + """ + public_key_data = _helpers.to_bytes(public_key) + + if _CERTIFICATE_MARKER in public_key_data: + cert = cryptography.x509.load_pem_x509_certificate( + public_key_data, _BACKEND + ) + pubkey = cert.public_key() + + else: + pubkey = serialization.load_pem_public_key(public_key_data, _BACKEND) + + return cls(pubkey) + + +class RSASigner(base.Signer, base.FromServiceAccountMixin): + """Signs messages with an RSA private key. + + Args: + private_key ( + cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey): + The private key to sign with. + key_id (str): Optional key ID used to identify this private key. This + can be useful to associate the private key with its associated + public key or certificate. + """ + + def __init__(self, private_key, key_id=None): + self._key = private_key + self._key_id = key_id + + @property # type: ignore + @_helpers.copy_docstring(base.Signer) + def key_id(self): + return self._key_id + + @_helpers.copy_docstring(base.Signer) + def sign(self, message): + message = _helpers.to_bytes(message) + return self._key.sign(message, _PADDING, _SHA256) + + @classmethod + def from_string(cls, key, key_id=None): + """Construct a RSASigner from a private key in PEM format. + + Args: + key (Union[bytes, str]): Private key in PEM format. + key_id (str): An optional key id used to identify the private key. + + Returns: + google.auth.crypt._cryptography_rsa.RSASigner: The + constructed signer. + + Raises: + ValueError: If ``key`` is not ``bytes`` or ``str`` (unicode). + UnicodeDecodeError: If ``key`` is ``bytes`` but cannot be decoded + into a UTF-8 ``str``. + ValueError: If ``cryptography`` "Could not deserialize key data." + """ + key = _helpers.to_bytes(key) + private_key = serialization.load_pem_private_key( + key, password=None, backend=_BACKEND + ) + return cls(private_key, key_id=key_id) + + def __getstate__(self): + """Pickle helper that serializes the _key attribute.""" + state = self.__dict__.copy() + state["_key"] = self._key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption(), + ) + return state + + def __setstate__(self, state): + """Pickle helper that deserializes the _key attribute.""" + state["_key"] = serialization.load_pem_private_key(state["_key"], None) + self.__dict__.update(state) diff --git a/.venv/lib/python3.12/site-packages/google/auth/crypt/_helpers.py b/.venv/lib/python3.12/site-packages/google/auth/crypt/_helpers.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/crypt/_helpers.py diff --git a/.venv/lib/python3.12/site-packages/google/auth/crypt/_python_rsa.py b/.venv/lib/python3.12/site-packages/google/auth/crypt/_python_rsa.py new file mode 100644 index 00000000..e553c25e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/crypt/_python_rsa.py @@ -0,0 +1,175 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pure-Python RSA cryptography implementation. + +Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages +to parse PEM files storing PKCS#1 or PKCS#8 keys as well as +certificates. There is no support for p12 files. +""" + +from __future__ import absolute_import + +import io + +from pyasn1.codec.der import decoder # type: ignore +from pyasn1_modules import pem # type: ignore +from pyasn1_modules.rfc2459 import Certificate # type: ignore +from pyasn1_modules.rfc5208 import PrivateKeyInfo # type: ignore +import rsa # type: ignore + +from google.auth import _helpers +from google.auth import exceptions +from google.auth.crypt import base + +_POW2 = (128, 64, 32, 16, 8, 4, 2, 1) +_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----" +_PKCS1_MARKER = ("-----BEGIN RSA PRIVATE KEY-----", "-----END RSA PRIVATE KEY-----") +_PKCS8_MARKER = ("-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----") +_PKCS8_SPEC = PrivateKeyInfo() + + +def _bit_list_to_bytes(bit_list): + """Converts an iterable of 1s and 0s to bytes. + + Combines the list 8 at a time, treating each group of 8 bits + as a single byte. + + Args: + bit_list (Sequence): Sequence of 1s and 0s. + + Returns: + bytes: The decoded bytes. + """ + num_bits = len(bit_list) + byte_vals = bytearray() + for start in range(0, num_bits, 8): + curr_bits = bit_list[start : start + 8] + char_val = sum(val * digit for val, digit in zip(_POW2, curr_bits)) + byte_vals.append(char_val) + return bytes(byte_vals) + + +class RSAVerifier(base.Verifier): + """Verifies RSA cryptographic signatures using public keys. + + Args: + public_key (rsa.key.PublicKey): The public key used to verify + signatures. + """ + + def __init__(self, public_key): + self._pubkey = public_key + + @_helpers.copy_docstring(base.Verifier) + def verify(self, message, signature): + message = _helpers.to_bytes(message) + try: + return rsa.pkcs1.verify(message, signature, self._pubkey) + except (ValueError, rsa.pkcs1.VerificationError): + return False + + @classmethod + def from_string(cls, public_key): + """Construct an Verifier instance from a public key or public + certificate string. + + Args: + public_key (Union[str, bytes]): The public key in PEM format or the + x509 public key certificate. + + Returns: + google.auth.crypt._python_rsa.RSAVerifier: The constructed verifier. + + Raises: + ValueError: If the public_key can't be parsed. + """ + public_key = _helpers.to_bytes(public_key) + is_x509_cert = _CERTIFICATE_MARKER in public_key + + # If this is a certificate, extract the public key info. + if is_x509_cert: + der = rsa.pem.load_pem(public_key, "CERTIFICATE") + asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate()) + if remaining != b"": + raise exceptions.InvalidValue("Unused bytes", remaining) + + cert_info = asn1_cert["tbsCertificate"]["subjectPublicKeyInfo"] + key_bytes = _bit_list_to_bytes(cert_info["subjectPublicKey"]) + pubkey = rsa.PublicKey.load_pkcs1(key_bytes, "DER") + else: + pubkey = rsa.PublicKey.load_pkcs1(public_key, "PEM") + return cls(pubkey) + + +class RSASigner(base.Signer, base.FromServiceAccountMixin): + """Signs messages with an RSA private key. + + Args: + private_key (rsa.key.PrivateKey): The private key to sign with. + key_id (str): Optional key ID used to identify this private key. This + can be useful to associate the private key with its associated + public key or certificate. + """ + + def __init__(self, private_key, key_id=None): + self._key = private_key + self._key_id = key_id + + @property # type: ignore + @_helpers.copy_docstring(base.Signer) + def key_id(self): + return self._key_id + + @_helpers.copy_docstring(base.Signer) + def sign(self, message): + message = _helpers.to_bytes(message) + return rsa.pkcs1.sign(message, self._key, "SHA-256") + + @classmethod + def from_string(cls, key, key_id=None): + """Construct an Signer instance from a private key in PEM format. + + Args: + key (str): Private key in PEM format. + key_id (str): An optional key id used to identify the private key. + + Returns: + google.auth.crypt.Signer: The constructed signer. + + Raises: + ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in + PEM format. + """ + key = _helpers.from_bytes(key) # PEM expects str in Python 3 + marker_id, key_bytes = pem.readPemBlocksFromFile( + io.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER + ) + + # Key is in pkcs1 format. + if marker_id == 0: + private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format="DER") + # Key is in pkcs8. + elif marker_id == 1: + key_info, remaining = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC) + if remaining != b"": + raise exceptions.InvalidValue("Unused bytes", remaining) + private_key_info = key_info.getComponentByName("privateKey") + private_key = rsa.key.PrivateKey.load_pkcs1( + private_key_info.asOctets(), format="DER" + ) + else: + raise exceptions.MalformedError("No key could be detected.") + + return cls(private_key, key_id=key_id) diff --git a/.venv/lib/python3.12/site-packages/google/auth/crypt/base.py b/.venv/lib/python3.12/site-packages/google/auth/crypt/base.py new file mode 100644 index 00000000..ad871c31 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/crypt/base.py @@ -0,0 +1,127 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base classes for cryptographic signers and verifiers.""" + +import abc +import io +import json + +from google.auth import exceptions + +_JSON_FILE_PRIVATE_KEY = "private_key" +_JSON_FILE_PRIVATE_KEY_ID = "private_key_id" + + +class Verifier(metaclass=abc.ABCMeta): + """Abstract base class for crytographic signature verifiers.""" + + @abc.abstractmethod + def verify(self, message, signature): + """Verifies a message against a cryptographic signature. + + Args: + message (Union[str, bytes]): The message to verify. + signature (Union[str, bytes]): The cryptography signature to check. + + Returns: + bool: True if message was signed by the private key associated + with the public key that this object was constructed with. + """ + # pylint: disable=missing-raises-doc,redundant-returns-doc + # (pylint doesn't recognize that this is abstract) + raise NotImplementedError("Verify must be implemented") + + +class Signer(metaclass=abc.ABCMeta): + """Abstract base class for cryptographic signers.""" + + @abc.abstractproperty + def key_id(self): + """Optional[str]: The key ID used to identify this private key.""" + raise NotImplementedError("Key id must be implemented") + + @abc.abstractmethod + def sign(self, message): + """Signs a message. + + Args: + message (Union[str, bytes]): The message to be signed. + + Returns: + bytes: The signature of the message. + """ + # pylint: disable=missing-raises-doc,redundant-returns-doc + # (pylint doesn't recognize that this is abstract) + raise NotImplementedError("Sign must be implemented") + + +class FromServiceAccountMixin(metaclass=abc.ABCMeta): + """Mix-in to enable factory constructors for a Signer.""" + + @abc.abstractmethod + def from_string(cls, key, key_id=None): + """Construct an Signer instance from a private key string. + + Args: + key (str): Private key as a string. + key_id (str): An optional key id used to identify the private key. + + Returns: + google.auth.crypt.Signer: The constructed signer. + + Raises: + ValueError: If the key cannot be parsed. + """ + raise NotImplementedError("from_string must be implemented") + + @classmethod + def from_service_account_info(cls, info): + """Creates a Signer instance instance from a dictionary containing + service account info in Google format. + + Args: + info (Mapping[str, str]): The service account info in Google + format. + + Returns: + google.auth.crypt.Signer: The constructed signer. + + Raises: + ValueError: If the info is not in the expected format. + """ + if _JSON_FILE_PRIVATE_KEY not in info: + raise exceptions.MalformedError( + "The private_key field was not found in the service account " "info." + ) + + return cls.from_string( + info[_JSON_FILE_PRIVATE_KEY], info.get(_JSON_FILE_PRIVATE_KEY_ID) + ) + + @classmethod + def from_service_account_file(cls, filename): + """Creates a Signer instance from a service account .json file + in Google format. + + Args: + filename (str): The path to the service account .json file. + + Returns: + google.auth.crypt.Signer: The constructed signer. + """ + with io.open(filename, "r", encoding="utf-8") as json_file: + data = json.load(json_file) + + return cls.from_service_account_info(data) diff --git a/.venv/lib/python3.12/site-packages/google/auth/crypt/es256.py b/.venv/lib/python3.12/site-packages/google/auth/crypt/es256.py new file mode 100644 index 00000000..820e4bec --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/crypt/es256.py @@ -0,0 +1,175 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ECDSA (ES256) verifier and signer that use the ``cryptography`` library. +""" + +from cryptography import utils # type: ignore +import cryptography.exceptions +from cryptography.hazmat import backends +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import ec +from cryptography.hazmat.primitives.asymmetric import padding +from cryptography.hazmat.primitives.asymmetric.utils import decode_dss_signature +from cryptography.hazmat.primitives.asymmetric.utils import encode_dss_signature +import cryptography.x509 + +from google.auth import _helpers +from google.auth.crypt import base + + +_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----" +_BACKEND = backends.default_backend() +_PADDING = padding.PKCS1v15() + + +class ES256Verifier(base.Verifier): + """Verifies ECDSA cryptographic signatures using public keys. + + Args: + public_key ( + cryptography.hazmat.primitives.asymmetric.ec.ECDSAPublicKey): + The public key used to verify signatures. + """ + + def __init__(self, public_key): + self._pubkey = public_key + + @_helpers.copy_docstring(base.Verifier) + def verify(self, message, signature): + # First convert (r||s) raw signature to ASN1 encoded signature. + sig_bytes = _helpers.to_bytes(signature) + if len(sig_bytes) != 64: + return False + r = ( + int.from_bytes(sig_bytes[:32], byteorder="big") + if _helpers.is_python_3() + else utils.int_from_bytes(sig_bytes[:32], byteorder="big") + ) + s = ( + int.from_bytes(sig_bytes[32:], byteorder="big") + if _helpers.is_python_3() + else utils.int_from_bytes(sig_bytes[32:], byteorder="big") + ) + asn1_sig = encode_dss_signature(r, s) + + message = _helpers.to_bytes(message) + try: + self._pubkey.verify(asn1_sig, message, ec.ECDSA(hashes.SHA256())) + return True + except (ValueError, cryptography.exceptions.InvalidSignature): + return False + + @classmethod + def from_string(cls, public_key): + """Construct an Verifier instance from a public key or public + certificate string. + + Args: + public_key (Union[str, bytes]): The public key in PEM format or the + x509 public key certificate. + + Returns: + Verifier: The constructed verifier. + + Raises: + ValueError: If the public key can't be parsed. + """ + public_key_data = _helpers.to_bytes(public_key) + + if _CERTIFICATE_MARKER in public_key_data: + cert = cryptography.x509.load_pem_x509_certificate( + public_key_data, _BACKEND + ) + pubkey = cert.public_key() + + else: + pubkey = serialization.load_pem_public_key(public_key_data, _BACKEND) + + return cls(pubkey) + + +class ES256Signer(base.Signer, base.FromServiceAccountMixin): + """Signs messages with an ECDSA private key. + + Args: + private_key ( + cryptography.hazmat.primitives.asymmetric.ec.ECDSAPrivateKey): + The private key to sign with. + key_id (str): Optional key ID used to identify this private key. This + can be useful to associate the private key with its associated + public key or certificate. + """ + + def __init__(self, private_key, key_id=None): + self._key = private_key + self._key_id = key_id + + @property # type: ignore + @_helpers.copy_docstring(base.Signer) + def key_id(self): + return self._key_id + + @_helpers.copy_docstring(base.Signer) + def sign(self, message): + message = _helpers.to_bytes(message) + asn1_signature = self._key.sign(message, ec.ECDSA(hashes.SHA256())) + + # Convert ASN1 encoded signature to (r||s) raw signature. + (r, s) = decode_dss_signature(asn1_signature) + return ( + (r.to_bytes(32, byteorder="big") + s.to_bytes(32, byteorder="big")) + if _helpers.is_python_3() + else (utils.int_to_bytes(r, 32) + utils.int_to_bytes(s, 32)) + ) + + @classmethod + def from_string(cls, key, key_id=None): + """Construct a RSASigner from a private key in PEM format. + + Args: + key (Union[bytes, str]): Private key in PEM format. + key_id (str): An optional key id used to identify the private key. + + Returns: + google.auth.crypt._cryptography_rsa.RSASigner: The + constructed signer. + + Raises: + ValueError: If ``key`` is not ``bytes`` or ``str`` (unicode). + UnicodeDecodeError: If ``key`` is ``bytes`` but cannot be decoded + into a UTF-8 ``str``. + ValueError: If ``cryptography`` "Could not deserialize key data." + """ + key = _helpers.to_bytes(key) + private_key = serialization.load_pem_private_key( + key, password=None, backend=_BACKEND + ) + return cls(private_key, key_id=key_id) + + def __getstate__(self): + """Pickle helper that serializes the _key attribute.""" + state = self.__dict__.copy() + state["_key"] = self._key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption(), + ) + return state + + def __setstate__(self, state): + """Pickle helper that deserializes the _key attribute.""" + state["_key"] = serialization.load_pem_private_key(state["_key"], None) + self.__dict__.update(state) diff --git a/.venv/lib/python3.12/site-packages/google/auth/crypt/rsa.py b/.venv/lib/python3.12/site-packages/google/auth/crypt/rsa.py new file mode 100644 index 00000000..ed842d1e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/crypt/rsa.py @@ -0,0 +1,30 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""RSA cryptography signer and verifier.""" + + +try: + # Prefer cryptograph-based RSA implementation. + from google.auth.crypt import _cryptography_rsa + + RSASigner = _cryptography_rsa.RSASigner + RSAVerifier = _cryptography_rsa.RSAVerifier +except ImportError: # pragma: NO COVER + # Fallback to pure-python RSA implementation if cryptography is + # unavailable. + from google.auth.crypt import _python_rsa + + RSASigner = _python_rsa.RSASigner # type: ignore + RSAVerifier = _python_rsa.RSAVerifier # type: ignore diff --git a/.venv/lib/python3.12/site-packages/google/auth/downscoped.py b/.venv/lib/python3.12/site-packages/google/auth/downscoped.py new file mode 100644 index 00000000..ea75be90 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/downscoped.py @@ -0,0 +1,512 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Downscoping with Credential Access Boundaries + +This module provides the ability to downscope credentials using +`Downscoping with Credential Access Boundaries`_. This is useful to restrict the +Identity and Access Management (IAM) permissions that a short-lived credential +can use. + +To downscope permissions of a source credential, a Credential Access Boundary +that specifies which resources the new credential can access, as well as +an upper bound on the permissions that are available on each resource, has to +be defined. A downscoped credential can then be instantiated using the source +credential and the Credential Access Boundary. + +The common pattern of usage is to have a token broker with elevated access +generate these downscoped credentials from higher access source credentials and +pass the downscoped short-lived access tokens to a token consumer via some +secure authenticated channel for limited access to Google Cloud Storage +resources. + +For example, a token broker can be set up on a server in a private network. +Various workloads (token consumers) in the same network will send authenticated +requests to that broker for downscoped tokens to access or modify specific google +cloud storage buckets. + +The broker will instantiate downscoped credentials instances that can be used to +generate short lived downscoped access tokens that can be passed to the token +consumer. These downscoped access tokens can be injected by the consumer into +google.oauth2.Credentials and used to initialize a storage client instance to +access Google Cloud Storage resources with restricted access. + +Note: Only Cloud Storage supports Credential Access Boundaries. Other Google +Cloud services do not support this feature. + +.. _Downscoping with Credential Access Boundaries: https://cloud.google.com/iam/docs/downscoping-short-lived-credentials +""" + +import datetime + +from google.auth import _helpers +from google.auth import credentials +from google.auth import exceptions +from google.oauth2 import sts + +# The maximum number of access boundary rules a Credential Access Boundary can +# contain. +_MAX_ACCESS_BOUNDARY_RULES_COUNT = 10 +# The token exchange grant_type used for exchanging credentials. +_STS_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange" +# The token exchange requested_token_type. This is always an access_token. +_STS_REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token" +# The STS token URL used to exchanged a short lived access token for a downscoped one. +_STS_TOKEN_URL_PATTERN = "https://sts.{}/v1/token" +# The subject token type to use when exchanging a short lived access token for a +# downscoped token. +_STS_SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token" + + +class CredentialAccessBoundary(object): + """Defines a Credential Access Boundary which contains a list of access boundary + rules. Each rule contains information on the resource that the rule applies to, + the upper bound of the permissions that are available on that resource and an + optional condition to further restrict permissions. + """ + + def __init__(self, rules=[]): + """Instantiates a Credential Access Boundary. A Credential Access Boundary + can contain up to 10 access boundary rules. + + Args: + rules (Sequence[google.auth.downscoped.AccessBoundaryRule]): The list of + access boundary rules limiting the access that a downscoped credential + will have. + Raises: + InvalidType: If any of the rules are not a valid type. + InvalidValue: If the provided rules exceed the maximum allowed. + """ + self.rules = rules + + @property + def rules(self): + """Returns the list of access boundary rules defined on the Credential + Access Boundary. + + Returns: + Tuple[google.auth.downscoped.AccessBoundaryRule, ...]: The list of access + boundary rules defined on the Credential Access Boundary. These are returned + as an immutable tuple to prevent modification. + """ + return tuple(self._rules) + + @rules.setter + def rules(self, value): + """Updates the current rules on the Credential Access Boundary. This will overwrite + the existing set of rules. + + Args: + value (Sequence[google.auth.downscoped.AccessBoundaryRule]): The list of + access boundary rules limiting the access that a downscoped credential + will have. + Raises: + InvalidType: If any of the rules are not a valid type. + InvalidValue: If the provided rules exceed the maximum allowed. + """ + if len(value) > _MAX_ACCESS_BOUNDARY_RULES_COUNT: + raise exceptions.InvalidValue( + "Credential access boundary rules can have a maximum of {} rules.".format( + _MAX_ACCESS_BOUNDARY_RULES_COUNT + ) + ) + for access_boundary_rule in value: + if not isinstance(access_boundary_rule, AccessBoundaryRule): + raise exceptions.InvalidType( + "List of rules provided do not contain a valid 'google.auth.downscoped.AccessBoundaryRule'." + ) + # Make a copy of the original list. + self._rules = list(value) + + def add_rule(self, rule): + """Adds a single access boundary rule to the existing rules. + + Args: + rule (google.auth.downscoped.AccessBoundaryRule): The access boundary rule, + limiting the access that a downscoped credential will have, to be added to + the existing rules. + Raises: + InvalidType: If any of the rules are not a valid type. + InvalidValue: If the provided rules exceed the maximum allowed. + """ + if len(self.rules) == _MAX_ACCESS_BOUNDARY_RULES_COUNT: + raise exceptions.InvalidValue( + "Credential access boundary rules can have a maximum of {} rules.".format( + _MAX_ACCESS_BOUNDARY_RULES_COUNT + ) + ) + if not isinstance(rule, AccessBoundaryRule): + raise exceptions.InvalidType( + "The provided rule does not contain a valid 'google.auth.downscoped.AccessBoundaryRule'." + ) + self._rules.append(rule) + + def to_json(self): + """Generates the dictionary representation of the Credential Access Boundary. + This uses the format expected by the Security Token Service API as documented in + `Defining a Credential Access Boundary`_. + + .. _Defining a Credential Access Boundary: + https://cloud.google.com/iam/docs/downscoping-short-lived-credentials#define-boundary + + Returns: + Mapping: Credential Access Boundary Rule represented in a dictionary object. + """ + rules = [] + for access_boundary_rule in self.rules: + rules.append(access_boundary_rule.to_json()) + + return {"accessBoundary": {"accessBoundaryRules": rules}} + + +class AccessBoundaryRule(object): + """Defines an access boundary rule which contains information on the resource that + the rule applies to, the upper bound of the permissions that are available on that + resource and an optional condition to further restrict permissions. + """ + + def __init__( + self, available_resource, available_permissions, availability_condition=None + ): + """Instantiates a single access boundary rule. + + Args: + available_resource (str): The full resource name of the Cloud Storage bucket + that the rule applies to. Use the format + "//storage.googleapis.com/projects/_/buckets/bucket-name". + available_permissions (Sequence[str]): A list defining the upper bound that + the downscoped token will have on the available permissions for the + resource. Each value is the identifier for an IAM predefined role or + custom role, with the prefix "inRole:". For example: + "inRole:roles/storage.objectViewer". + Only the permissions in these roles will be available. + availability_condition (Optional[google.auth.downscoped.AvailabilityCondition]): + Optional condition that restricts the availability of permissions to + specific Cloud Storage objects. + + Raises: + InvalidType: If any of the parameters are not of the expected types. + InvalidValue: If any of the parameters are not of the expected values. + """ + self.available_resource = available_resource + self.available_permissions = available_permissions + self.availability_condition = availability_condition + + @property + def available_resource(self): + """Returns the current available resource. + + Returns: + str: The current available resource. + """ + return self._available_resource + + @available_resource.setter + def available_resource(self, value): + """Updates the current available resource. + + Args: + value (str): The updated value of the available resource. + + Raises: + google.auth.exceptions.InvalidType: If the value is not a string. + """ + if not isinstance(value, str): + raise exceptions.InvalidType( + "The provided available_resource is not a string." + ) + self._available_resource = value + + @property + def available_permissions(self): + """Returns the current available permissions. + + Returns: + Tuple[str, ...]: The current available permissions. These are returned + as an immutable tuple to prevent modification. + """ + return tuple(self._available_permissions) + + @available_permissions.setter + def available_permissions(self, value): + """Updates the current available permissions. + + Args: + value (Sequence[str]): The updated value of the available permissions. + + Raises: + InvalidType: If the value is not a list of strings. + InvalidValue: If the value is not valid. + """ + for available_permission in value: + if not isinstance(available_permission, str): + raise exceptions.InvalidType( + "Provided available_permissions are not a list of strings." + ) + if available_permission.find("inRole:") != 0: + raise exceptions.InvalidValue( + "available_permissions must be prefixed with 'inRole:'." + ) + # Make a copy of the original list. + self._available_permissions = list(value) + + @property + def availability_condition(self): + """Returns the current availability condition. + + Returns: + Optional[google.auth.downscoped.AvailabilityCondition]: The current + availability condition. + """ + return self._availability_condition + + @availability_condition.setter + def availability_condition(self, value): + """Updates the current availability condition. + + Args: + value (Optional[google.auth.downscoped.AvailabilityCondition]): The updated + value of the availability condition. + + Raises: + google.auth.exceptions.InvalidType: If the value is not of type google.auth.downscoped.AvailabilityCondition + or None. + """ + if not isinstance(value, AvailabilityCondition) and value is not None: + raise exceptions.InvalidType( + "The provided availability_condition is not a 'google.auth.downscoped.AvailabilityCondition' or None." + ) + self._availability_condition = value + + def to_json(self): + """Generates the dictionary representation of the access boundary rule. + This uses the format expected by the Security Token Service API as documented in + `Defining a Credential Access Boundary`_. + + .. _Defining a Credential Access Boundary: + https://cloud.google.com/iam/docs/downscoping-short-lived-credentials#define-boundary + + Returns: + Mapping: The access boundary rule represented in a dictionary object. + """ + json = { + "availablePermissions": list(self.available_permissions), + "availableResource": self.available_resource, + } + if self.availability_condition: + json["availabilityCondition"] = self.availability_condition.to_json() + return json + + +class AvailabilityCondition(object): + """An optional condition that can be used as part of a Credential Access Boundary + to further restrict permissions.""" + + def __init__(self, expression, title=None, description=None): + """Instantiates an availability condition using the provided expression and + optional title or description. + + Args: + expression (str): A condition expression that specifies the Cloud Storage + objects where permissions are available. For example, this expression + makes permissions available for objects whose name starts with "customer-a": + "resource.name.startsWith('projects/_/buckets/example-bucket/objects/customer-a')" + title (Optional[str]): An optional short string that identifies the purpose of + the condition. + description (Optional[str]): Optional details about the purpose of the condition. + + Raises: + InvalidType: If any of the parameters are not of the expected types. + InvalidValue: If any of the parameters are not of the expected values. + """ + self.expression = expression + self.title = title + self.description = description + + @property + def expression(self): + """Returns the current condition expression. + + Returns: + str: The current conditon expression. + """ + return self._expression + + @expression.setter + def expression(self, value): + """Updates the current condition expression. + + Args: + value (str): The updated value of the condition expression. + + Raises: + google.auth.exceptions.InvalidType: If the value is not of type string. + """ + if not isinstance(value, str): + raise exceptions.InvalidType("The provided expression is not a string.") + self._expression = value + + @property + def title(self): + """Returns the current title. + + Returns: + Optional[str]: The current title. + """ + return self._title + + @title.setter + def title(self, value): + """Updates the current title. + + Args: + value (Optional[str]): The updated value of the title. + + Raises: + google.auth.exceptions.InvalidType: If the value is not of type string or None. + """ + if not isinstance(value, str) and value is not None: + raise exceptions.InvalidType("The provided title is not a string or None.") + self._title = value + + @property + def description(self): + """Returns the current description. + + Returns: + Optional[str]: The current description. + """ + return self._description + + @description.setter + def description(self, value): + """Updates the current description. + + Args: + value (Optional[str]): The updated value of the description. + + Raises: + google.auth.exceptions.InvalidType: If the value is not of type string or None. + """ + if not isinstance(value, str) and value is not None: + raise exceptions.InvalidType( + "The provided description is not a string or None." + ) + self._description = value + + def to_json(self): + """Generates the dictionary representation of the availability condition. + This uses the format expected by the Security Token Service API as documented in + `Defining a Credential Access Boundary`_. + + .. _Defining a Credential Access Boundary: + https://cloud.google.com/iam/docs/downscoping-short-lived-credentials#define-boundary + + Returns: + Mapping[str, str]: The availability condition represented in a dictionary + object. + """ + json = {"expression": self.expression} + if self.title: + json["title"] = self.title + if self.description: + json["description"] = self.description + return json + + +class Credentials(credentials.CredentialsWithQuotaProject): + """Defines a set of Google credentials that are downscoped from an existing set + of Google OAuth2 credentials. This is useful to restrict the Identity and Access + Management (IAM) permissions that a short-lived credential can use. + The common pattern of usage is to have a token broker with elevated access + generate these downscoped credentials from higher access source credentials and + pass the downscoped short-lived access tokens to a token consumer via some + secure authenticated channel for limited access to Google Cloud Storage + resources. + """ + + def __init__( + self, + source_credentials, + credential_access_boundary, + quota_project_id=None, + universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN, + ): + """Instantiates a downscoped credentials object using the provided source + credentials and credential access boundary rules. + To downscope permissions of a source credential, a Credential Access Boundary + that specifies which resources the new credential can access, as well as an + upper bound on the permissions that are available on each resource, has to be + defined. A downscoped credential can then be instantiated using the source + credential and the Credential Access Boundary. + + Args: + source_credentials (google.auth.credentials.Credentials): The source credentials + to be downscoped based on the provided Credential Access Boundary rules. + credential_access_boundary (google.auth.downscoped.CredentialAccessBoundary): + The Credential Access Boundary which contains a list of access boundary + rules. Each rule contains information on the resource that the rule applies to, + the upper bound of the permissions that are available on that resource and an + optional condition to further restrict permissions. + quota_project_id (Optional[str]): The optional quota project ID. + universe_domain (Optional[str]): The universe domain value, default is googleapis.com + Raises: + google.auth.exceptions.RefreshError: If the source credentials + return an error on token refresh. + google.auth.exceptions.OAuthError: If the STS token exchange + endpoint returned an error during downscoped token generation. + """ + + super(Credentials, self).__init__() + self._source_credentials = source_credentials + self._credential_access_boundary = credential_access_boundary + self._quota_project_id = quota_project_id + self._universe_domain = universe_domain or credentials.DEFAULT_UNIVERSE_DOMAIN + self._sts_client = sts.Client( + _STS_TOKEN_URL_PATTERN.format(self.universe_domain) + ) + + @_helpers.copy_docstring(credentials.Credentials) + def refresh(self, request): + # Generate an access token from the source credentials. + self._source_credentials.refresh(request) + now = _helpers.utcnow() + # Exchange the access token for a downscoped access token. + response_data = self._sts_client.exchange_token( + request=request, + grant_type=_STS_GRANT_TYPE, + subject_token=self._source_credentials.token, + subject_token_type=_STS_SUBJECT_TOKEN_TYPE, + requested_token_type=_STS_REQUESTED_TOKEN_TYPE, + additional_options=self._credential_access_boundary.to_json(), + ) + self.token = response_data.get("access_token") + # For downscoping CAB flow, the STS endpoint may not return the expiration + # field for some flows. The generated downscoped token should always have + # the same expiration time as the source credentials. When no expires_in + # field is returned in the response, we can just get the expiration time + # from the source credentials. + if response_data.get("expires_in"): + lifetime = datetime.timedelta(seconds=response_data.get("expires_in")) + self.expiry = now + lifetime + else: + self.expiry = self._source_credentials.expiry + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + return self.__class__( + self._source_credentials, + self._credential_access_boundary, + quota_project_id=quota_project_id, + ) diff --git a/.venv/lib/python3.12/site-packages/google/auth/environment_vars.py b/.venv/lib/python3.12/site-packages/google/auth/environment_vars.py new file mode 100644 index 00000000..81f31571 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/environment_vars.py @@ -0,0 +1,84 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Environment variables used by :mod:`google.auth`.""" + + +PROJECT = "GOOGLE_CLOUD_PROJECT" +"""Environment variable defining default project. + +This used by :func:`google.auth.default` to explicitly set a project ID. This +environment variable is also used by the Google Cloud Python Library. +""" + +LEGACY_PROJECT = "GCLOUD_PROJECT" +"""Previously used environment variable defining the default project. + +This environment variable is used instead of the current one in some +situations (such as Google App Engine). +""" + +GOOGLE_CLOUD_QUOTA_PROJECT = "GOOGLE_CLOUD_QUOTA_PROJECT" +"""Environment variable defining the project to be used for +quota and billing.""" + +CREDENTIALS = "GOOGLE_APPLICATION_CREDENTIALS" +"""Environment variable defining the location of Google application default +credentials.""" + +# The environment variable name which can replace ~/.config if set. +CLOUD_SDK_CONFIG_DIR = "CLOUDSDK_CONFIG" +"""Environment variable defines the location of Google Cloud SDK's config +files.""" + +# These two variables allow for customization of the addresses used when +# contacting the GCE metadata service. +GCE_METADATA_HOST = "GCE_METADATA_HOST" +"""Environment variable providing an alternate hostname or host:port to be +used for GCE metadata requests. + +This environment variable was originally named GCE_METADATA_ROOT. The system will +check this environemnt variable first; should there be no value present, +the system will fall back to the old variable. +""" + +GCE_METADATA_ROOT = "GCE_METADATA_ROOT" +"""Old environment variable for GCE_METADATA_HOST.""" + +GCE_METADATA_IP = "GCE_METADATA_IP" +"""Environment variable providing an alternate ip:port to be used for ip-only +GCE metadata requests.""" + +GOOGLE_API_USE_CLIENT_CERTIFICATE = "GOOGLE_API_USE_CLIENT_CERTIFICATE" +"""Environment variable controlling whether to use client certificate or not. + +The default value is false. Users have to explicitly set this value to true +in order to use client certificate to establish a mutual TLS channel.""" + +LEGACY_APPENGINE_RUNTIME = "APPENGINE_RUNTIME" +"""Gen1 environment variable defining the App Engine Runtime. + +Used to distinguish between GAE gen1 and GAE gen2+. +""" + +# AWS environment variables used with AWS workload identity pools to retrieve +# AWS security credentials and the AWS region needed to create a serialized +# signed requests to the AWS STS GetCalledIdentity API that can be exchanged +# for a Google access tokens via the GCP STS endpoint. +# When not available the AWS metadata server is used to retrieve these values. +AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID" +AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY" +AWS_SESSION_TOKEN = "AWS_SESSION_TOKEN" +AWS_REGION = "AWS_REGION" +AWS_DEFAULT_REGION = "AWS_DEFAULT_REGION" diff --git a/.venv/lib/python3.12/site-packages/google/auth/exceptions.py b/.venv/lib/python3.12/site-packages/google/auth/exceptions.py new file mode 100644 index 00000000..feb9f741 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/exceptions.py @@ -0,0 +1,108 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Exceptions used in the google.auth package.""" + + +class GoogleAuthError(Exception): + """Base class for all google.auth errors.""" + + def __init__(self, *args, **kwargs): + super(GoogleAuthError, self).__init__(*args) + retryable = kwargs.get("retryable", False) + self._retryable = retryable + + @property + def retryable(self): + return self._retryable + + +class TransportError(GoogleAuthError): + """Used to indicate an error occurred during an HTTP request.""" + + +class RefreshError(GoogleAuthError): + """Used to indicate that an refreshing the credentials' access token + failed.""" + + +class UserAccessTokenError(GoogleAuthError): + """Used to indicate ``gcloud auth print-access-token`` command failed.""" + + +class DefaultCredentialsError(GoogleAuthError): + """Used to indicate that acquiring default credentials failed.""" + + +class MutualTLSChannelError(GoogleAuthError): + """Used to indicate that mutual TLS channel creation is failed, or mutual + TLS channel credentials is missing or invalid.""" + + +class ClientCertError(GoogleAuthError): + """Used to indicate that client certificate is missing or invalid.""" + + @property + def retryable(self): + return False + + +class OAuthError(GoogleAuthError): + """Used to indicate an error occurred during an OAuth related HTTP + request.""" + + +class ReauthFailError(RefreshError): + """An exception for when reauth failed.""" + + def __init__(self, message=None, **kwargs): + super(ReauthFailError, self).__init__( + "Reauthentication failed. {0}".format(message), **kwargs + ) + + +class ReauthSamlChallengeFailError(ReauthFailError): + """An exception for SAML reauth challenge failures.""" + + +class MalformedError(DefaultCredentialsError, ValueError): + """An exception for malformed data.""" + + +class InvalidResource(DefaultCredentialsError, ValueError): + """An exception for URL error.""" + + +class InvalidOperation(DefaultCredentialsError, ValueError): + """An exception for invalid operation.""" + + +class InvalidValue(DefaultCredentialsError, ValueError): + """Used to wrap general ValueError of python.""" + + +class InvalidType(DefaultCredentialsError, TypeError): + """Used to wrap general TypeError of python.""" + + +class OSError(DefaultCredentialsError, EnvironmentError): + """Used to wrap EnvironmentError(OSError after python3.3).""" + + +class TimeoutError(GoogleAuthError): + """Used to indicate a timeout error occurred during an HTTP request.""" + + +class ResponseError(GoogleAuthError): + """Used to indicate an error occurred when reading an HTTP response.""" diff --git a/.venv/lib/python3.12/site-packages/google/auth/external_account.py b/.venv/lib/python3.12/site-packages/google/auth/external_account.py new file mode 100644 index 00000000..161e6c50 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/external_account.py @@ -0,0 +1,628 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""External Account Credentials. + +This module provides credentials that exchange workload identity pool external +credentials for Google access tokens. This facilitates accessing Google Cloud +Platform resources from on-prem and non-Google Cloud platforms (e.g. AWS, +Microsoft Azure, OIDC identity providers), using native credentials retrieved +from the current environment without the need to copy, save and manage +long-lived service account credentials. + +Specifically, this is intended to use access tokens acquired using the GCP STS +token exchange endpoint following the `OAuth 2.0 Token Exchange`_ spec. + +.. _OAuth 2.0 Token Exchange: https://tools.ietf.org/html/rfc8693 +""" + +import abc +import copy +from dataclasses import dataclass +import datetime +import functools +import io +import json +import re + +from google.auth import _helpers +from google.auth import credentials +from google.auth import exceptions +from google.auth import impersonated_credentials +from google.auth import metrics +from google.oauth2 import sts +from google.oauth2 import utils + +# External account JSON type identifier. +_EXTERNAL_ACCOUNT_JSON_TYPE = "external_account" +# The token exchange grant_type used for exchanging credentials. +_STS_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange" +# The token exchange requested_token_type. This is always an access_token. +_STS_REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token" +# Cloud resource manager URL used to retrieve project information. +_CLOUD_RESOURCE_MANAGER = "https://cloudresourcemanager.googleapis.com/v1/projects/" +# Default Google sts token url. +_DEFAULT_TOKEN_URL = "https://sts.{universe_domain}/v1/token" + + +@dataclass +class SupplierContext: + """A context class that contains information about the requested third party credential that is passed + to AWS security credential and subject token suppliers. + + Attributes: + subject_token_type (str): The requested subject token type based on the Oauth2.0 token exchange spec. + Expected values include:: + + “urn:ietf:params:oauth:token-type:jwt” + “urn:ietf:params:oauth:token-type:id-token” + “urn:ietf:params:oauth:token-type:saml2” + “urn:ietf:params:aws:token-type:aws4_request” + + audience (str): The requested audience for the subject token. + """ + + subject_token_type: str + audience: str + + +class Credentials( + credentials.Scoped, + credentials.CredentialsWithQuotaProject, + credentials.CredentialsWithTokenUri, + metaclass=abc.ABCMeta, +): + """Base class for all external account credentials. + + This is used to instantiate Credentials for exchanging external account + credentials for Google access token and authorizing requests to Google APIs. + The base class implements the common logic for exchanging external account + credentials for Google access tokens. + """ + + def __init__( + self, + audience, + subject_token_type, + token_url, + credential_source, + service_account_impersonation_url=None, + service_account_impersonation_options=None, + client_id=None, + client_secret=None, + token_info_url=None, + quota_project_id=None, + scopes=None, + default_scopes=None, + workforce_pool_user_project=None, + universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN, + trust_boundary=None, + ): + """Instantiates an external account credentials object. + + Args: + audience (str): The STS audience field. + subject_token_type (str): The subject token type based on the Oauth2.0 token exchange spec. + Expected values include:: + + “urn:ietf:params:oauth:token-type:jwt” + “urn:ietf:params:oauth:token-type:id-token” + “urn:ietf:params:oauth:token-type:saml2” + “urn:ietf:params:aws:token-type:aws4_request” + + token_url (str): The STS endpoint URL. + credential_source (Mapping): The credential source dictionary. + service_account_impersonation_url (Optional[str]): The optional service account + impersonation generateAccessToken URL. + client_id (Optional[str]): The optional client ID. + client_secret (Optional[str]): The optional client secret. + token_info_url (str): The optional STS endpoint URL for token introspection. + quota_project_id (Optional[str]): The optional quota project ID. + scopes (Optional[Sequence[str]]): Optional scopes to request during the + authorization grant. + default_scopes (Optional[Sequence[str]]): Default scopes passed by a + Google client library. Use 'scopes' for user-defined scopes. + workforce_pool_user_project (Optona[str]): The optional workforce pool user + project number when the credential corresponds to a workforce pool and not + a workload identity pool. The underlying principal must still have + serviceusage.services.use IAM permission to use the project for + billing/quota. + universe_domain (str): The universe domain. The default universe + domain is googleapis.com. + trust_boundary (str): String representation of trust boundary meta. + Raises: + google.auth.exceptions.RefreshError: If the generateAccessToken + endpoint returned an error. + """ + super(Credentials, self).__init__() + self._audience = audience + self._subject_token_type = subject_token_type + self._universe_domain = universe_domain + self._token_url = token_url + if self._token_url == _DEFAULT_TOKEN_URL: + self._token_url = self._token_url.replace( + "{universe_domain}", self._universe_domain + ) + self._token_info_url = token_info_url + self._credential_source = credential_source + self._service_account_impersonation_url = service_account_impersonation_url + self._service_account_impersonation_options = ( + service_account_impersonation_options or {} + ) + self._client_id = client_id + self._client_secret = client_secret + self._quota_project_id = quota_project_id + self._scopes = scopes + self._default_scopes = default_scopes + self._workforce_pool_user_project = workforce_pool_user_project + self._trust_boundary = { + "locations": [], + "encoded_locations": "0x0", + } # expose a placeholder trust boundary value. + + if self._client_id: + self._client_auth = utils.ClientAuthentication( + utils.ClientAuthType.basic, self._client_id, self._client_secret + ) + else: + self._client_auth = None + self._sts_client = sts.Client(self._token_url, self._client_auth) + + self._metrics_options = self._create_default_metrics_options() + + self._impersonated_credentials = None + self._project_id = None + self._supplier_context = SupplierContext( + self._subject_token_type, self._audience + ) + self._cred_file_path = None + + if not self.is_workforce_pool and self._workforce_pool_user_project: + # Workload identity pools do not support workforce pool user projects. + raise exceptions.InvalidValue( + "workforce_pool_user_project should not be set for non-workforce pool " + "credentials" + ) + + @property + def info(self): + """Generates the dictionary representation of the current credentials. + + Returns: + Mapping: The dictionary representation of the credentials. This is the + reverse of "from_info" defined on the subclasses of this class. It is + useful for serializing the current credentials so it can deserialized + later. + """ + config_info = self._constructor_args() + config_info.update( + type=_EXTERNAL_ACCOUNT_JSON_TYPE, + service_account_impersonation=config_info.pop( + "service_account_impersonation_options", None + ), + ) + config_info.pop("scopes", None) + config_info.pop("default_scopes", None) + return {key: value for key, value in config_info.items() if value is not None} + + def _constructor_args(self): + args = { + "audience": self._audience, + "subject_token_type": self._subject_token_type, + "token_url": self._token_url, + "token_info_url": self._token_info_url, + "service_account_impersonation_url": self._service_account_impersonation_url, + "service_account_impersonation_options": copy.deepcopy( + self._service_account_impersonation_options + ) + or None, + "credential_source": copy.deepcopy(self._credential_source), + "quota_project_id": self._quota_project_id, + "client_id": self._client_id, + "client_secret": self._client_secret, + "workforce_pool_user_project": self._workforce_pool_user_project, + "scopes": self._scopes, + "default_scopes": self._default_scopes, + "universe_domain": self._universe_domain, + } + if not self.is_workforce_pool: + args.pop("workforce_pool_user_project") + return args + + @property + def service_account_email(self): + """Returns the service account email if service account impersonation is used. + + Returns: + Optional[str]: The service account email if impersonation is used. Otherwise + None is returned. + """ + if self._service_account_impersonation_url: + # Parse email from URL. The formal looks as follows: + # https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/name@project-id.iam.gserviceaccount.com:generateAccessToken + url = self._service_account_impersonation_url + start_index = url.rfind("/") + end_index = url.find(":generateAccessToken") + if start_index != -1 and end_index != -1 and start_index < end_index: + start_index = start_index + 1 + return url[start_index:end_index] + return None + + @property + def is_user(self): + """Returns whether the credentials represent a user (True) or workload (False). + Workloads behave similarly to service accounts. Currently workloads will use + service account impersonation but will eventually not require impersonation. + As a result, this property is more reliable than the service account email + property in determining if the credentials represent a user or workload. + + Returns: + bool: True if the credentials represent a user. False if they represent a + workload. + """ + # If service account impersonation is used, the credentials will always represent a + # service account. + if self._service_account_impersonation_url: + return False + return self.is_workforce_pool + + @property + def is_workforce_pool(self): + """Returns whether the credentials represent a workforce pool (True) or + workload (False) based on the credentials' audience. + + This will also return True for impersonated workforce pool credentials. + + Returns: + bool: True if the credentials represent a workforce pool. False if they + represent a workload. + """ + # Workforce pools representing users have the following audience format: + # //iam.googleapis.com/locations/$location/workforcePools/$poolId/providers/$providerId + p = re.compile(r"//iam\.googleapis\.com/locations/[^/]+/workforcePools/") + return p.match(self._audience or "") is not None + + @property + def requires_scopes(self): + """Checks if the credentials requires scopes. + + Returns: + bool: True if there are no scopes set otherwise False. + """ + return not self._scopes and not self._default_scopes + + @property + def project_number(self): + """Optional[str]: The project number corresponding to the workload identity pool.""" + + # STS audience pattern: + # //iam.googleapis.com/projects/$PROJECT_NUMBER/locations/... + components = self._audience.split("/") + try: + project_index = components.index("projects") + if project_index + 1 < len(components): + return components[project_index + 1] or None + except ValueError: + return None + + @property + def token_info_url(self): + """Optional[str]: The STS token introspection endpoint.""" + + return self._token_info_url + + @_helpers.copy_docstring(credentials.Credentials) + def get_cred_info(self): + if self._cred_file_path: + cred_info_json = { + "credential_source": self._cred_file_path, + "credential_type": "external account credentials", + } + if self.service_account_email: + cred_info_json["principal"] = self.service_account_email + return cred_info_json + return None + + @_helpers.copy_docstring(credentials.Scoped) + def with_scopes(self, scopes, default_scopes=None): + kwargs = self._constructor_args() + kwargs.update(scopes=scopes, default_scopes=default_scopes) + scoped = self.__class__(**kwargs) + scoped._cred_file_path = self._cred_file_path + scoped._metrics_options = self._metrics_options + return scoped + + @abc.abstractmethod + def retrieve_subject_token(self, request): + """Retrieves the subject token using the credential_source object. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + Returns: + str: The retrieved subject token. + """ + # pylint: disable=missing-raises-doc + # (pylint doesn't recognize that this is abstract) + raise NotImplementedError("retrieve_subject_token must be implemented") + + def get_project_id(self, request): + """Retrieves the project ID corresponding to the workload identity or workforce pool. + For workforce pool credentials, it returns the project ID corresponding to + the workforce_pool_user_project. + + When not determinable, None is returned. + + This is introduced to support the current pattern of using the Auth library: + + credentials, project_id = google.auth.default() + + The resource may not have permission (resourcemanager.projects.get) to + call this API or the required scopes may not be selected: + https://cloud.google.com/resource-manager/reference/rest/v1/projects/get#authorization-scopes + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + Returns: + Optional[str]: The project ID corresponding to the workload identity pool + or workforce pool if determinable. + """ + if self._project_id: + # If already retrieved, return the cached project ID value. + return self._project_id + scopes = self._scopes if self._scopes is not None else self._default_scopes + # Scopes are required in order to retrieve a valid access token. + project_number = self.project_number or self._workforce_pool_user_project + if project_number and scopes: + headers = {} + url = _CLOUD_RESOURCE_MANAGER + project_number + self.before_request(request, "GET", url, headers) + response = request(url=url, method="GET", headers=headers) + + response_body = ( + response.data.decode("utf-8") + if hasattr(response.data, "decode") + else response.data + ) + response_data = json.loads(response_body) + + if response.status == 200: + # Cache result as this field is immutable. + self._project_id = response_data.get("projectId") + return self._project_id + + return None + + @_helpers.copy_docstring(credentials.Credentials) + def refresh(self, request): + scopes = self._scopes if self._scopes is not None else self._default_scopes + + # Inject client certificate into request. + if self._mtls_required(): + request = functools.partial( + request, cert=self._get_mtls_cert_and_key_paths() + ) + + if self._should_initialize_impersonated_credentials(): + self._impersonated_credentials = self._initialize_impersonated_credentials() + + if self._impersonated_credentials: + self._impersonated_credentials.refresh(request) + self.token = self._impersonated_credentials.token + self.expiry = self._impersonated_credentials.expiry + else: + now = _helpers.utcnow() + additional_options = None + # Do not pass workforce_pool_user_project when client authentication + # is used. The client ID is sufficient for determining the user project. + if self._workforce_pool_user_project and not self._client_id: + additional_options = {"userProject": self._workforce_pool_user_project} + additional_headers = { + metrics.API_CLIENT_HEADER: metrics.byoid_metrics_header( + self._metrics_options + ) + } + response_data = self._sts_client.exchange_token( + request=request, + grant_type=_STS_GRANT_TYPE, + subject_token=self.retrieve_subject_token(request), + subject_token_type=self._subject_token_type, + audience=self._audience, + scopes=scopes, + requested_token_type=_STS_REQUESTED_TOKEN_TYPE, + additional_options=additional_options, + additional_headers=additional_headers, + ) + self.token = response_data.get("access_token") + expires_in = response_data.get("expires_in") + # Some services do not respect the OAUTH2.0 RFC and send expires_in as a + # JSON String. + if isinstance(expires_in, str): + expires_in = int(expires_in) + + lifetime = datetime.timedelta(seconds=expires_in) + + self.expiry = now + lifetime + + def _make_copy(self): + kwargs = self._constructor_args() + new_cred = self.__class__(**kwargs) + new_cred._cred_file_path = self._cred_file_path + new_cred._metrics_options = self._metrics_options + return new_cred + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + # Return copy of instance with the provided quota project ID. + cred = self._make_copy() + cred._quota_project_id = quota_project_id + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithTokenUri) + def with_token_uri(self, token_uri): + cred = self._make_copy() + cred._token_url = token_uri + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithUniverseDomain) + def with_universe_domain(self, universe_domain): + cred = self._make_copy() + cred._universe_domain = universe_domain + return cred + + def _should_initialize_impersonated_credentials(self): + return ( + self._service_account_impersonation_url is not None + and self._impersonated_credentials is None + ) + + def _initialize_impersonated_credentials(self): + """Generates an impersonated credentials. + + For more details, see `projects.serviceAccounts.generateAccessToken`_. + + .. _projects.serviceAccounts.generateAccessToken: https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/generateAccessToken + + Returns: + impersonated_credentials.Credential: The impersonated credentials + object. + + Raises: + google.auth.exceptions.RefreshError: If the generateAccessToken + endpoint returned an error. + """ + # Return copy of instance with no service account impersonation. + kwargs = self._constructor_args() + kwargs.update( + service_account_impersonation_url=None, + service_account_impersonation_options={}, + ) + source_credentials = self.__class__(**kwargs) + source_credentials._metrics_options = self._metrics_options + + # Determine target_principal. + target_principal = self.service_account_email + if not target_principal: + raise exceptions.RefreshError( + "Unable to determine target principal from service account impersonation URL." + ) + + scopes = self._scopes if self._scopes is not None else self._default_scopes + # Initialize and return impersonated credentials. + return impersonated_credentials.Credentials( + source_credentials=source_credentials, + target_principal=target_principal, + target_scopes=scopes, + quota_project_id=self._quota_project_id, + iam_endpoint_override=self._service_account_impersonation_url, + lifetime=self._service_account_impersonation_options.get( + "token_lifetime_seconds" + ), + ) + + def _create_default_metrics_options(self): + metrics_options = {} + if self._service_account_impersonation_url: + metrics_options["sa-impersonation"] = "true" + else: + metrics_options["sa-impersonation"] = "false" + if self._service_account_impersonation_options.get("token_lifetime_seconds"): + metrics_options["config-lifetime"] = "true" + else: + metrics_options["config-lifetime"] = "false" + + return metrics_options + + def _mtls_required(self): + """Returns a boolean representing whether the current credential is configured + for mTLS and should add a certificate to the outgoing calls to the sts and service + account impersonation endpoint. + + Returns: + bool: True if the credential is configured for mTLS, False if it is not. + """ + return False + + def _get_mtls_cert_and_key_paths(self): + """Gets the file locations for a certificate and private key file + to be used for configuring mTLS for the sts and service account + impersonation calls. Currently only expected to return a value when using + X509 workload identity federation. + + Returns: + Tuple[str, str]: The cert and key file locations as strings in a tuple. + + Raises: + NotImplementedError: When the current credential is not configured for + mTLS. + """ + raise NotImplementedError( + "_get_mtls_cert_and_key_location must be implemented." + ) + + @classmethod + def from_info(cls, info, **kwargs): + """Creates a Credentials instance from parsed external account info. + + Args: + info (Mapping[str, str]): The external account info in Google + format. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.identity_pool.Credentials: The constructed + credentials. + + Raises: + InvalidValue: For invalid parameters. + """ + return cls( + audience=info.get("audience"), + subject_token_type=info.get("subject_token_type"), + token_url=info.get("token_url"), + token_info_url=info.get("token_info_url"), + service_account_impersonation_url=info.get( + "service_account_impersonation_url" + ), + service_account_impersonation_options=info.get( + "service_account_impersonation" + ) + or {}, + client_id=info.get("client_id"), + client_secret=info.get("client_secret"), + credential_source=info.get("credential_source"), + quota_project_id=info.get("quota_project_id"), + workforce_pool_user_project=info.get("workforce_pool_user_project"), + universe_domain=info.get( + "universe_domain", credentials.DEFAULT_UNIVERSE_DOMAIN + ), + **kwargs + ) + + @classmethod + def from_file(cls, filename, **kwargs): + """Creates a Credentials instance from an external account json file. + + Args: + filename (str): The path to the external account json file. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.identity_pool.Credentials: The constructed + credentials. + """ + with io.open(filename, "r", encoding="utf-8") as json_file: + data = json.load(json_file) + return cls.from_info(data, **kwargs) diff --git a/.venv/lib/python3.12/site-packages/google/auth/external_account_authorized_user.py b/.venv/lib/python3.12/site-packages/google/auth/external_account_authorized_user.py new file mode 100644 index 00000000..4d0c3c68 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/external_account_authorized_user.py @@ -0,0 +1,380 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""External Account Authorized User Credentials. +This module provides credentials based on OAuth 2.0 access and refresh tokens. +These credentials usually access resources on behalf of a user (resource +owner). + +Specifically, these are sourced using external identities via Workforce Identity Federation. + +Obtaining the initial access and refresh token can be done through the Google Cloud CLI. + +Example credential: +{ + "type": "external_account_authorized_user", + "audience": "//iam.googleapis.com/locations/global/workforcePools/$WORKFORCE_POOL_ID/providers/$PROVIDER_ID", + "refresh_token": "refreshToken", + "token_url": "https://sts.googleapis.com/v1/oauth/token", + "token_info_url": "https://sts.googleapis.com/v1/instrospect", + "client_id": "clientId", + "client_secret": "clientSecret" +} +""" + +import datetime +import io +import json + +from google.auth import _helpers +from google.auth import credentials +from google.auth import exceptions +from google.oauth2 import sts +from google.oauth2 import utils + +_EXTERNAL_ACCOUNT_AUTHORIZED_USER_JSON_TYPE = "external_account_authorized_user" + + +class Credentials( + credentials.CredentialsWithQuotaProject, + credentials.ReadOnlyScoped, + credentials.CredentialsWithTokenUri, +): + """Credentials for External Account Authorized Users. + + This is used to instantiate Credentials for exchanging refresh tokens from + authorized users for Google access token and authorizing requests to Google + APIs. + + The credentials are considered immutable. If you want to modify the + quota project, use `with_quota_project` and if you want to modify the token + uri, use `with_token_uri`. + """ + + def __init__( + self, + token=None, + expiry=None, + refresh_token=None, + audience=None, + client_id=None, + client_secret=None, + token_url=None, + token_info_url=None, + revoke_url=None, + scopes=None, + quota_project_id=None, + universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN, + ): + """Instantiates a external account authorized user credentials object. + + Args: + token (str): The OAuth 2.0 access token. Can be None if refresh information + is provided. + expiry (datetime.datetime): The optional expiration datetime of the OAuth 2.0 access + token. + refresh_token (str): The optional OAuth 2.0 refresh token. If specified, + credentials can be refreshed. + audience (str): The optional STS audience which contains the resource name for the workforce + pool and the provider identifier in that pool. + client_id (str): The OAuth 2.0 client ID. Must be specified for refresh, can be left as + None if the token can not be refreshed. + client_secret (str): The OAuth 2.0 client secret. Must be specified for refresh, can be + left as None if the token can not be refreshed. + token_url (str): The optional STS token exchange endpoint for refresh. Must be specified for + refresh, can be left as None if the token can not be refreshed. + token_info_url (str): The optional STS endpoint URL for token introspection. + revoke_url (str): The optional STS endpoint URL for revoking tokens. + quota_project_id (str): The optional project ID used for quota and billing. + This project may be different from the project used to + create the credentials. + universe_domain (Optional[str]): The universe domain. The default value + is googleapis.com. + + Returns: + google.auth.external_account_authorized_user.Credentials: The + constructed credentials. + """ + super(Credentials, self).__init__() + + self.token = token + self.expiry = expiry + self._audience = audience + self._refresh_token = refresh_token + self._token_url = token_url + self._token_info_url = token_info_url + self._client_id = client_id + self._client_secret = client_secret + self._revoke_url = revoke_url + self._quota_project_id = quota_project_id + self._scopes = scopes + self._universe_domain = universe_domain or credentials.DEFAULT_UNIVERSE_DOMAIN + self._cred_file_path = None + + if not self.valid and not self.can_refresh: + raise exceptions.InvalidOperation( + "Token should be created with fields to make it valid (`token` and " + "`expiry`), or fields to allow it to refresh (`refresh_token`, " + "`token_url`, `client_id`, `client_secret`)." + ) + + self._client_auth = None + if self._client_id: + self._client_auth = utils.ClientAuthentication( + utils.ClientAuthType.basic, self._client_id, self._client_secret + ) + self._sts_client = sts.Client(self._token_url, self._client_auth) + + @property + def info(self): + """Generates the serializable dictionary representation of the current + credentials. + + Returns: + Mapping: The dictionary representation of the credentials. This is the + reverse of the "from_info" method defined in this class. It is + useful for serializing the current credentials so it can deserialized + later. + """ + config_info = self.constructor_args() + config_info.update(type=_EXTERNAL_ACCOUNT_AUTHORIZED_USER_JSON_TYPE) + if config_info["expiry"]: + config_info["expiry"] = config_info["expiry"].isoformat() + "Z" + + return {key: value for key, value in config_info.items() if value is not None} + + def constructor_args(self): + return { + "audience": self._audience, + "refresh_token": self._refresh_token, + "token_url": self._token_url, + "token_info_url": self._token_info_url, + "client_id": self._client_id, + "client_secret": self._client_secret, + "token": self.token, + "expiry": self.expiry, + "revoke_url": self._revoke_url, + "scopes": self._scopes, + "quota_project_id": self._quota_project_id, + "universe_domain": self._universe_domain, + } + + @property + def scopes(self): + """Optional[str]: The OAuth 2.0 permission scopes.""" + return self._scopes + + @property + def requires_scopes(self): + """ False: OAuth 2.0 credentials have their scopes set when + the initial token is requested and can not be changed.""" + return False + + @property + def client_id(self): + """Optional[str]: The OAuth 2.0 client ID.""" + return self._client_id + + @property + def client_secret(self): + """Optional[str]: The OAuth 2.0 client secret.""" + return self._client_secret + + @property + def audience(self): + """Optional[str]: The STS audience which contains the resource name for the + workforce pool and the provider identifier in that pool.""" + return self._audience + + @property + def refresh_token(self): + """Optional[str]: The OAuth 2.0 refresh token.""" + return self._refresh_token + + @property + def token_url(self): + """Optional[str]: The STS token exchange endpoint for refresh.""" + return self._token_url + + @property + def token_info_url(self): + """Optional[str]: The STS endpoint for token info.""" + return self._token_info_url + + @property + def revoke_url(self): + """Optional[str]: The STS endpoint for token revocation.""" + return self._revoke_url + + @property + def is_user(self): + """ True: This credential always represents a user.""" + return True + + @property + def can_refresh(self): + return all( + (self._refresh_token, self._token_url, self._client_id, self._client_secret) + ) + + def get_project_id(self, request=None): + """Retrieves the project ID corresponding to the workload identity or workforce pool. + For workforce pool credentials, it returns the project ID corresponding to + the workforce_pool_user_project. + + When not determinable, None is returned. + + Args: + request (google.auth.transport.requests.Request): Request object. + Unused here, but passed from _default.default(). + + Return: + str: project ID is not determinable for this credential type so it returns None + """ + + return None + + def to_json(self, strip=None): + """Utility function that creates a JSON representation of this + credential. + Args: + strip (Sequence[str]): Optional list of members to exclude from the + generated JSON. + Returns: + str: A JSON representation of this instance. When converted into + a dictionary, it can be passed to from_info() + to create a new instance. + """ + strip = strip if strip else [] + return json.dumps({k: v for (k, v) in self.info.items() if k not in strip}) + + def refresh(self, request): + """Refreshes the access token. + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + + Raises: + google.auth.exceptions.RefreshError: If the credentials could + not be refreshed. + """ + if not self.can_refresh: + raise exceptions.RefreshError( + "The credentials do not contain the necessary fields need to " + "refresh the access token. You must specify refresh_token, " + "token_url, client_id, and client_secret." + ) + + now = _helpers.utcnow() + response_data = self._make_sts_request(request) + + self.token = response_data.get("access_token") + + lifetime = datetime.timedelta(seconds=response_data.get("expires_in")) + self.expiry = now + lifetime + + if "refresh_token" in response_data: + self._refresh_token = response_data["refresh_token"] + + def _make_sts_request(self, request): + return self._sts_client.refresh_token(request, self._refresh_token) + + @_helpers.copy_docstring(credentials.Credentials) + def get_cred_info(self): + if self._cred_file_path: + return { + "credential_source": self._cred_file_path, + "credential_type": "external account authorized user credentials", + } + return None + + def _make_copy(self): + kwargs = self.constructor_args() + cred = self.__class__(**kwargs) + cred._cred_file_path = self._cred_file_path + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + cred = self._make_copy() + cred._quota_project_id = quota_project_id + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithTokenUri) + def with_token_uri(self, token_uri): + cred = self._make_copy() + cred._token_url = token_uri + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithUniverseDomain) + def with_universe_domain(self, universe_domain): + cred = self._make_copy() + cred._universe_domain = universe_domain + return cred + + @classmethod + def from_info(cls, info, **kwargs): + """Creates a Credentials instance from parsed external account info. + + Args: + info (Mapping[str, str]): The external account info in Google + format. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.external_account_authorized_user.Credentials: The + constructed credentials. + + Raises: + ValueError: For invalid parameters. + """ + expiry = info.get("expiry") + if expiry: + expiry = datetime.datetime.strptime( + expiry.rstrip("Z").split(".")[0], "%Y-%m-%dT%H:%M:%S" + ) + return cls( + audience=info.get("audience"), + refresh_token=info.get("refresh_token"), + token_url=info.get("token_url"), + token_info_url=info.get("token_info_url"), + client_id=info.get("client_id"), + client_secret=info.get("client_secret"), + token=info.get("token"), + expiry=expiry, + revoke_url=info.get("revoke_url"), + quota_project_id=info.get("quota_project_id"), + scopes=info.get("scopes"), + universe_domain=info.get( + "universe_domain", credentials.DEFAULT_UNIVERSE_DOMAIN + ), + **kwargs + ) + + @classmethod + def from_file(cls, filename, **kwargs): + """Creates a Credentials instance from an external account json file. + + Args: + filename (str): The path to the external account json file. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.external_account_authorized_user.Credentials: The + constructed credentials. + """ + with io.open(filename, "r", encoding="utf-8") as json_file: + data = json.load(json_file) + return cls.from_info(data, **kwargs) diff --git a/.venv/lib/python3.12/site-packages/google/auth/iam.py b/.venv/lib/python3.12/site-packages/google/auth/iam.py new file mode 100644 index 00000000..1e4cdffe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/iam.py @@ -0,0 +1,136 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for using the Google `Cloud Identity and Access Management (IAM) +API`_'s auth-related functionality. + +.. _Cloud Identity and Access Management (IAM) API: + https://cloud.google.com/iam/docs/ +""" + +import base64 +import http.client as http_client +import json + +from google.auth import _exponential_backoff +from google.auth import _helpers +from google.auth import credentials +from google.auth import crypt +from google.auth import exceptions + +IAM_RETRY_CODES = { + http_client.INTERNAL_SERVER_ERROR, + http_client.BAD_GATEWAY, + http_client.SERVICE_UNAVAILABLE, + http_client.GATEWAY_TIMEOUT, +} + +_IAM_SCOPE = ["https://www.googleapis.com/auth/iam"] + +_IAM_ENDPOINT = ( + "https://iamcredentials.googleapis.com/v1/projects/-" + + "/serviceAccounts/{}:generateAccessToken" +) + +_IAM_SIGN_ENDPOINT = ( + "https://iamcredentials.googleapis.com/v1/projects/-" + + "/serviceAccounts/{}:signBlob" +) + +_IAM_SIGNJWT_ENDPOINT = ( + "https://iamcredentials.googleapis.com/v1/projects/-" + + "/serviceAccounts/{}:signJwt" +) + +_IAM_IDTOKEN_ENDPOINT = ( + "https://iamcredentials.googleapis.com/v1/" + + "projects/-/serviceAccounts/{}:generateIdToken" +) + + +class Signer(crypt.Signer): + """Signs messages using the IAM `signBlob API`_. + + This is useful when you need to sign bytes but do not have access to the + credential's private key file. + + .. _signBlob API: + https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts + /signBlob + """ + + def __init__(self, request, credentials, service_account_email): + """ + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + credentials (google.auth.credentials.Credentials): The credentials + that will be used to authenticate the request to the IAM API. + The credentials must have of one the following scopes: + + - https://www.googleapis.com/auth/iam + - https://www.googleapis.com/auth/cloud-platform + service_account_email (str): The service account email identifying + which service account to use to sign bytes. Often, this can + be the same as the service account email in the given + credentials. + """ + self._request = request + self._credentials = credentials + self._service_account_email = service_account_email + + def _make_signing_request(self, message): + """Makes a request to the API signBlob API.""" + message = _helpers.to_bytes(message) + + method = "POST" + url = _IAM_SIGN_ENDPOINT.replace( + credentials.DEFAULT_UNIVERSE_DOMAIN, self._credentials.universe_domain + ).format(self._service_account_email) + headers = {"Content-Type": "application/json"} + body = json.dumps( + {"payload": base64.b64encode(message).decode("utf-8")} + ).encode("utf-8") + + retries = _exponential_backoff.ExponentialBackoff() + for _ in retries: + self._credentials.before_request(self._request, method, url, headers) + + response = self._request(url=url, method=method, body=body, headers=headers) + + if response.status in IAM_RETRY_CODES: + continue + + if response.status != http_client.OK: + raise exceptions.TransportError( + "Error calling the IAM signBlob API: {}".format(response.data) + ) + + return json.loads(response.data.decode("utf-8")) + raise exceptions.TransportError("exhausted signBlob endpoint retries") + + @property + def key_id(self): + """Optional[str]: The key ID used to identify this private key. + + .. warning:: + This is always ``None``. The key ID used by IAM can not + be reliably determined ahead of time. + """ + return None + + @_helpers.copy_docstring(crypt.Signer) + def sign(self, message): + response = self._make_signing_request(message) + return base64.b64decode(response["signedBlob"]) diff --git a/.venv/lib/python3.12/site-packages/google/auth/identity_pool.py b/.venv/lib/python3.12/site-packages/google/auth/identity_pool.py new file mode 100644 index 00000000..47f9a557 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/identity_pool.py @@ -0,0 +1,439 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Identity Pool Credentials. + +This module provides credentials to access Google Cloud resources from on-prem +or non-Google Cloud platforms which support external credentials (e.g. OIDC ID +tokens) retrieved from local file locations or local servers. This includes +Microsoft Azure and OIDC identity providers (e.g. K8s workloads registered with +Hub with Hub workload identity enabled). + +These credentials are recommended over the use of service account credentials +in on-prem/non-Google Cloud platforms as they do not involve the management of +long-live service account private keys. + +Identity Pool Credentials are initialized using external_account +arguments which are typically loaded from an external credentials file or +an external credentials URL. + +This module also provides a definition for an abstract subject token supplier. +This supplier can be implemented to return a valid OIDC or SAML2.0 subject token +and used to create Identity Pool credentials. The credentials will then call the +supplier instead of using pre-defined methods such as reading a local file or +calling a URL. +""" + +try: + from collections.abc import Mapping +# Python 2.7 compatibility +except ImportError: # pragma: NO COVER + from collections import Mapping # type: ignore +import abc +import json +import os +from typing import NamedTuple + +from google.auth import _helpers +from google.auth import exceptions +from google.auth import external_account +from google.auth.transport import _mtls_helper + + +class SubjectTokenSupplier(metaclass=abc.ABCMeta): + """Base class for subject token suppliers. This can be implemented with custom logic to retrieve + a subject token to exchange for a Google Cloud access token when using Workload or + Workforce Identity Federation. The identity pool credential does not cache the subject token, + so caching logic should be added in the implementation. + """ + + @abc.abstractmethod + def get_subject_token(self, context, request): + """Returns the requested subject token. The subject token must be valid. + + .. warning: This is not cached by the calling Google credential, so caching logic should be implemented in the supplier. + + Args: + context (google.auth.externalaccount.SupplierContext): The context object + containing information about the requested audience and subject token type. + request (google.auth.transport.Request): The object used to make + HTTP requests. + + Raises: + google.auth.exceptions.RefreshError: If an error is encountered during + subject token retrieval logic. + + Returns: + str: The requested subject token string. + """ + raise NotImplementedError("") + + +class _TokenContent(NamedTuple): + """Models the token content response from file and url internal suppliers. + Attributes: + content (str): The string content of the file or URL response. + location (str): The location the content was retrieved from. This will either be a file location or a URL. + """ + + content: str + location: str + + +class _FileSupplier(SubjectTokenSupplier): + """ Internal implementation of subject token supplier which supports reading a subject token from a file.""" + + def __init__(self, path, format_type, subject_token_field_name): + self._path = path + self._format_type = format_type + self._subject_token_field_name = subject_token_field_name + + @_helpers.copy_docstring(SubjectTokenSupplier) + def get_subject_token(self, context, request): + if not os.path.exists(self._path): + raise exceptions.RefreshError("File '{}' was not found.".format(self._path)) + + with open(self._path, "r", encoding="utf-8") as file_obj: + token_content = _TokenContent(file_obj.read(), self._path) + + return _parse_token_data( + token_content, self._format_type, self._subject_token_field_name + ) + + +class _UrlSupplier(SubjectTokenSupplier): + """ Internal implementation of subject token supplier which supports retrieving a subject token by calling a URL endpoint.""" + + def __init__(self, url, format_type, subject_token_field_name, headers): + self._url = url + self._format_type = format_type + self._subject_token_field_name = subject_token_field_name + self._headers = headers + + @_helpers.copy_docstring(SubjectTokenSupplier) + def get_subject_token(self, context, request): + response = request(url=self._url, method="GET", headers=self._headers) + + # support both string and bytes type response.data + response_body = ( + response.data.decode("utf-8") + if hasattr(response.data, "decode") + else response.data + ) + + if response.status != 200: + raise exceptions.RefreshError( + "Unable to retrieve Identity Pool subject token", response_body + ) + token_content = _TokenContent(response_body, self._url) + return _parse_token_data( + token_content, self._format_type, self._subject_token_field_name + ) + + +class _X509Supplier(SubjectTokenSupplier): + """Internal supplier for X509 workload credentials. This class is used internally and always returns an empty string as the subject token.""" + + @_helpers.copy_docstring(SubjectTokenSupplier) + def get_subject_token(self, context, request): + return "" + + +def _parse_token_data(token_content, format_type="text", subject_token_field_name=None): + if format_type == "text": + token = token_content.content + else: + try: + # Parse file content as JSON. + response_data = json.loads(token_content.content) + # Get the subject_token. + token = response_data[subject_token_field_name] + except (KeyError, ValueError): + raise exceptions.RefreshError( + "Unable to parse subject_token from JSON file '{}' using key '{}'".format( + token_content.location, subject_token_field_name + ) + ) + if not token: + raise exceptions.RefreshError( + "Missing subject_token in the credential_source file" + ) + return token + + +class Credentials(external_account.Credentials): + """External account credentials sourced from files and URLs.""" + + def __init__( + self, + audience, + subject_token_type, + token_url=external_account._DEFAULT_TOKEN_URL, + credential_source=None, + subject_token_supplier=None, + *args, + **kwargs + ): + """Instantiates an external account credentials object from a file/URL. + + Args: + audience (str): The STS audience field. + subject_token_type (str): The subject token type based on the Oauth2.0 token exchange spec. + Expected values include:: + + “urn:ietf:params:oauth:token-type:jwt” + “urn:ietf:params:oauth:token-type:id-token” + “urn:ietf:params:oauth:token-type:saml2” + + token_url (Optional [str]): The STS endpoint URL. If not provided, will default to "https://sts.googleapis.com/v1/token". + credential_source (Optional [Mapping]): The credential source dictionary used to + provide instructions on how to retrieve external credential to be + exchanged for Google access tokens. Either a credential source or + a subject token supplier must be provided. + + Example credential_source for url-sourced credential:: + + { + "url": "http://www.example.com", + "format": { + "type": "json", + "subject_token_field_name": "access_token", + }, + "headers": {"foo": "bar"}, + } + + Example credential_source for file-sourced credential:: + + { + "file": "/path/to/token/file.txt" + } + subject_token_supplier (Optional [SubjectTokenSupplier]): Optional subject token supplier. + This will be called to supply a valid subject token which will then + be exchanged for Google access tokens. Either a subject token supplier + or a credential source must be provided. + args (List): Optional positional arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method. + kwargs (Mapping): Optional keyword arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method. + + Raises: + google.auth.exceptions.RefreshError: If an error is encountered during + access token retrieval logic. + ValueError: For invalid parameters. + + .. note:: Typically one of the helper constructors + :meth:`from_file` or + :meth:`from_info` are used instead of calling the constructor directly. + """ + + super(Credentials, self).__init__( + audience=audience, + subject_token_type=subject_token_type, + token_url=token_url, + credential_source=credential_source, + *args, + **kwargs + ) + if credential_source is None and subject_token_supplier is None: + raise exceptions.InvalidValue( + "A valid credential source or a subject token supplier must be provided." + ) + if credential_source is not None and subject_token_supplier is not None: + raise exceptions.InvalidValue( + "Identity pool credential cannot have both a credential source and a subject token supplier." + ) + + if subject_token_supplier is not None: + self._subject_token_supplier = subject_token_supplier + self._credential_source_file = None + self._credential_source_url = None + self._credential_source_certificate = None + else: + if not isinstance(credential_source, Mapping): + self._credential_source_executable = None + raise exceptions.MalformedError( + "Invalid credential_source. The credential_source is not a dict." + ) + self._credential_source_file = credential_source.get("file") + self._credential_source_url = credential_source.get("url") + self._credential_source_certificate = credential_source.get("certificate") + + # environment_id is only supported in AWS or dedicated future external + # account credentials. + if "environment_id" in credential_source: + raise exceptions.MalformedError( + "Invalid Identity Pool credential_source field 'environment_id'" + ) + + # check that only one of file, url, or certificate are provided. + self._validate_single_source() + + if self._credential_source_certificate: + self._validate_certificate_config() + else: + self._validate_file_or_url_config(credential_source) + + if self._credential_source_file: + self._subject_token_supplier = _FileSupplier( + self._credential_source_file, + self._credential_source_format_type, + self._credential_source_field_name, + ) + elif self._credential_source_url: + self._subject_token_supplier = _UrlSupplier( + self._credential_source_url, + self._credential_source_format_type, + self._credential_source_field_name, + self._credential_source_headers, + ) + else: # self._credential_source_certificate + self._subject_token_supplier = _X509Supplier() + + @_helpers.copy_docstring(external_account.Credentials) + def retrieve_subject_token(self, request): + return self._subject_token_supplier.get_subject_token( + self._supplier_context, request + ) + + def _get_mtls_cert_and_key_paths(self): + if self._credential_source_certificate is None: + raise exceptions.RefreshError( + 'The credential is not configured to use mtls requests. The credential should include a "certificate" section in the credential source.' + ) + else: + return _mtls_helper._get_workload_cert_and_key_paths( + self._certificate_config_location + ) + + def _mtls_required(self): + return self._credential_source_certificate is not None + + def _create_default_metrics_options(self): + metrics_options = super(Credentials, self)._create_default_metrics_options() + # Check that credential source is a dict before checking for credential type. This check needs to be done + # here because the external_account credential constructor needs to pass the metrics options to the + # impersonated credential object before the identity_pool credentials are validated. + if isinstance(self._credential_source, Mapping): + if self._credential_source.get("file"): + metrics_options["source"] = "file" + elif self._credential_source.get("url"): + metrics_options["source"] = "url" + else: + metrics_options["source"] = "x509" + else: + metrics_options["source"] = "programmatic" + return metrics_options + + def _has_custom_supplier(self): + return self._credential_source is None + + def _constructor_args(self): + args = super(Credentials, self)._constructor_args() + # If a custom supplier was used, append it to the args dict. + if self._has_custom_supplier(): + args.update({"subject_token_supplier": self._subject_token_supplier}) + return args + + def _validate_certificate_config(self): + self._certificate_config_location = self._credential_source_certificate.get( + "certificate_config_location" + ) + use_default = self._credential_source_certificate.get( + "use_default_certificate_config" + ) + if self._certificate_config_location and use_default: + raise exceptions.MalformedError( + "Invalid certificate configuration, certificate_config_location cannot be specified when use_default_certificate_config = true." + ) + if not self._certificate_config_location and not use_default: + raise exceptions.MalformedError( + "Invalid certificate configuration, use_default_certificate_config should be true if no certificate_config_location is provided." + ) + + def _validate_file_or_url_config(self, credential_source): + self._credential_source_headers = credential_source.get("headers") + credential_source_format = credential_source.get("format", {}) + # Get credential_source format type. When not provided, this + # defaults to text. + self._credential_source_format_type = ( + credential_source_format.get("type") or "text" + ) + if self._credential_source_format_type not in ["text", "json"]: + raise exceptions.MalformedError( + "Invalid credential_source format '{}'".format( + self._credential_source_format_type + ) + ) + # For JSON types, get the required subject_token field name. + if self._credential_source_format_type == "json": + self._credential_source_field_name = credential_source_format.get( + "subject_token_field_name" + ) + if self._credential_source_field_name is None: + raise exceptions.MalformedError( + "Missing subject_token_field_name for JSON credential_source format" + ) + else: + self._credential_source_field_name = None + + def _validate_single_source(self): + credential_sources = [ + self._credential_source_file, + self._credential_source_url, + self._credential_source_certificate, + ] + valid_credential_sources = list( + filter(lambda source: source is not None, credential_sources) + ) + + if len(valid_credential_sources) > 1: + raise exceptions.MalformedError( + "Ambiguous credential_source. 'file', 'url', and 'certificate' are mutually exclusive.." + ) + if len(valid_credential_sources) != 1: + raise exceptions.MalformedError( + "Missing credential_source. A 'file', 'url', or 'certificate' must be provided." + ) + + @classmethod + def from_info(cls, info, **kwargs): + """Creates an Identity Pool Credentials instance from parsed external account info. + + Args: + info (Mapping[str, str]): The Identity Pool external account info in Google + format. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.identity_pool.Credentials: The constructed + credentials. + + Raises: + ValueError: For invalid parameters. + """ + subject_token_supplier = info.get("subject_token_supplier") + kwargs.update({"subject_token_supplier": subject_token_supplier}) + return super(Credentials, cls).from_info(info, **kwargs) + + @classmethod + def from_file(cls, filename, **kwargs): + """Creates an IdentityPool Credentials instance from an external account json file. + + Args: + filename (str): The path to the IdentityPool external account json file. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.identity_pool.Credentials: The constructed + credentials. + """ + return super(Credentials, cls).from_file(filename, **kwargs) diff --git a/.venv/lib/python3.12/site-packages/google/auth/impersonated_credentials.py b/.venv/lib/python3.12/site-packages/google/auth/impersonated_credentials.py new file mode 100644 index 00000000..ed7e3f00 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/impersonated_credentials.py @@ -0,0 +1,579 @@ +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Impersonated credentials. + +This module provides authentication for applications where local credentials +impersonates a remote service account using `IAM Credentials API`_. + +This class can be used to impersonate a service account as long as the original +Credential object has the "Service Account Token Creator" role on the target +service account. + + .. _IAM Credentials API: + https://cloud.google.com/iam/credentials/reference/rest/ +""" + +import base64 +import copy +from datetime import datetime +import http.client as http_client +import json + +from google.auth import _exponential_backoff +from google.auth import _helpers +from google.auth import credentials +from google.auth import exceptions +from google.auth import iam +from google.auth import jwt +from google.auth import metrics +from google.oauth2 import _client + + +_REFRESH_ERROR = "Unable to acquire impersonated credentials" + +_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds + +_GOOGLE_OAUTH2_TOKEN_ENDPOINT = "https://oauth2.googleapis.com/token" + + +def _make_iam_token_request( + request, + principal, + headers, + body, + universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN, + iam_endpoint_override=None, +): + """Makes a request to the Google Cloud IAM service for an access token. + Args: + request (Request): The Request object to use. + principal (str): The principal to request an access token for. + headers (Mapping[str, str]): Map of headers to transmit. + body (Mapping[str, str]): JSON Payload body for the iamcredentials + API call. + iam_endpoint_override (Optiona[str]): The full IAM endpoint override + with the target_principal embedded. This is useful when supporting + impersonation with regional endpoints. + + Raises: + google.auth.exceptions.TransportError: Raised if there is an underlying + HTTP connection error + google.auth.exceptions.RefreshError: Raised if the impersonated + credentials are not available. Common reasons are + `iamcredentials.googleapis.com` is not enabled or the + `Service Account Token Creator` is not assigned + """ + iam_endpoint = iam_endpoint_override or iam._IAM_ENDPOINT.replace( + credentials.DEFAULT_UNIVERSE_DOMAIN, universe_domain + ).format(principal) + + body = json.dumps(body).encode("utf-8") + + response = request(url=iam_endpoint, method="POST", headers=headers, body=body) + + # support both string and bytes type response.data + response_body = ( + response.data.decode("utf-8") + if hasattr(response.data, "decode") + else response.data + ) + + if response.status != http_client.OK: + raise exceptions.RefreshError(_REFRESH_ERROR, response_body) + + try: + token_response = json.loads(response_body) + token = token_response["accessToken"] + expiry = datetime.strptime(token_response["expireTime"], "%Y-%m-%dT%H:%M:%SZ") + + return token, expiry + + except (KeyError, ValueError) as caught_exc: + new_exc = exceptions.RefreshError( + "{}: No access token or invalid expiration in response.".format( + _REFRESH_ERROR + ), + response_body, + ) + raise new_exc from caught_exc + + +class Credentials( + credentials.Scoped, credentials.CredentialsWithQuotaProject, credentials.Signing +): + """This module defines impersonated credentials which are essentially + impersonated identities. + + Impersonated Credentials allows credentials issued to a user or + service account to impersonate another. The target service account must + grant the originating credential principal the + `Service Account Token Creator`_ IAM role: + + For more information about Token Creator IAM role and + IAMCredentials API, see + `Creating Short-Lived Service Account Credentials`_. + + .. _Service Account Token Creator: + https://cloud.google.com/iam/docs/service-accounts#the_service_account_token_creator_role + + .. _Creating Short-Lived Service Account Credentials: + https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials + + Usage: + + First grant source_credentials the `Service Account Token Creator` + role on the target account to impersonate. In this example, the + service account represented by svc_account.json has the + token creator role on + `impersonated-account@_project_.iam.gserviceaccount.com`. + + Enable the IAMCredentials API on the source project: + `gcloud services enable iamcredentials.googleapis.com`. + + Initialize a source credential which does not have access to + list bucket:: + + from google.oauth2 import service_account + + target_scopes = [ + 'https://www.googleapis.com/auth/devstorage.read_only'] + + source_credentials = ( + service_account.Credentials.from_service_account_file( + '/path/to/svc_account.json', + scopes=target_scopes)) + + Now use the source credentials to acquire credentials to impersonate + another service account:: + + from google.auth import impersonated_credentials + + target_credentials = impersonated_credentials.Credentials( + source_credentials=source_credentials, + target_principal='impersonated-account@_project_.iam.gserviceaccount.com', + target_scopes = target_scopes, + lifetime=500) + + Resource access is granted:: + + client = storage.Client(credentials=target_credentials) + buckets = client.list_buckets(project='your_project') + for bucket in buckets: + print(bucket.name) + """ + + def __init__( + self, + source_credentials, + target_principal, + target_scopes, + delegates=None, + subject=None, + lifetime=_DEFAULT_TOKEN_LIFETIME_SECS, + quota_project_id=None, + iam_endpoint_override=None, + ): + """ + Args: + source_credentials (google.auth.Credentials): The source credential + used as to acquire the impersonated credentials. + target_principal (str): The service account to impersonate. + target_scopes (Sequence[str]): Scopes to request during the + authorization grant. + delegates (Sequence[str]): The chained list of delegates required + to grant the final access_token. If set, the sequence of + identities must have "Service Account Token Creator" capability + granted to the prceeding identity. For example, if set to + [serviceAccountB, serviceAccountC], the source_credential + must have the Token Creator role on serviceAccountB. + serviceAccountB must have the Token Creator on + serviceAccountC. + Finally, C must have Token Creator on target_principal. + If left unset, source_credential must have that role on + target_principal. + lifetime (int): Number of seconds the delegated credential should + be valid for (upto 3600). + quota_project_id (Optional[str]): The project ID used for quota and billing. + This project may be different from the project used to + create the credentials. + iam_endpoint_override (Optional[str]): The full IAM endpoint override + with the target_principal embedded. This is useful when supporting + impersonation with regional endpoints. + subject (Optional[str]): sub field of a JWT. This field should only be set + if you wish to impersonate as a user. This feature is useful when + using domain wide delegation. + """ + + super(Credentials, self).__init__() + + self._source_credentials = copy.copy(source_credentials) + # Service account source credentials must have the _IAM_SCOPE + # added to refresh correctly. User credentials cannot have + # their original scopes modified. + if isinstance(self._source_credentials, credentials.Scoped): + self._source_credentials = self._source_credentials.with_scopes( + iam._IAM_SCOPE + ) + # If the source credential is service account and self signed jwt + # is needed, we need to create a jwt credential inside it + if ( + hasattr(self._source_credentials, "_create_self_signed_jwt") + and self._source_credentials._always_use_jwt_access + ): + self._source_credentials._create_self_signed_jwt(None) + + self._universe_domain = source_credentials.universe_domain + self._target_principal = target_principal + self._target_scopes = target_scopes + self._delegates = delegates + self._subject = subject + self._lifetime = lifetime or _DEFAULT_TOKEN_LIFETIME_SECS + self.token = None + self.expiry = _helpers.utcnow() + self._quota_project_id = quota_project_id + self._iam_endpoint_override = iam_endpoint_override + self._cred_file_path = None + + def _metric_header_for_usage(self): + return metrics.CRED_TYPE_SA_IMPERSONATE + + @_helpers.copy_docstring(credentials.Credentials) + def refresh(self, request): + self._update_token(request) + + def _update_token(self, request): + """Updates credentials with a new access_token representing + the impersonated account. + + Args: + request (google.auth.transport.requests.Request): Request object + to use for refreshing credentials. + """ + + # Refresh our source credentials if it is not valid. + if ( + self._source_credentials.token_state == credentials.TokenState.STALE + or self._source_credentials.token_state == credentials.TokenState.INVALID + ): + self._source_credentials.refresh(request) + + body = { + "delegates": self._delegates, + "scope": self._target_scopes, + "lifetime": str(self._lifetime) + "s", + } + + headers = { + "Content-Type": "application/json", + metrics.API_CLIENT_HEADER: metrics.token_request_access_token_impersonate(), + } + + # Apply the source credentials authentication info. + self._source_credentials.apply(headers) + + # If a subject is specified a domain-wide delegation auth-flow is initiated + # to impersonate as the provided subject (user). + if self._subject: + if self.universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN: + raise exceptions.GoogleAuthError( + "Domain-wide delegation is not supported in universes other " + + "than googleapis.com" + ) + + now = _helpers.utcnow() + payload = { + "iss": self._target_principal, + "scope": _helpers.scopes_to_string(self._target_scopes or ()), + "sub": self._subject, + "aud": _GOOGLE_OAUTH2_TOKEN_ENDPOINT, + "iat": _helpers.datetime_to_secs(now), + "exp": _helpers.datetime_to_secs(now) + _DEFAULT_TOKEN_LIFETIME_SECS, + } + + assertion = _sign_jwt_request( + request=request, + principal=self._target_principal, + headers=headers, + payload=payload, + delegates=self._delegates, + ) + + self.token, self.expiry, _ = _client.jwt_grant( + request, _GOOGLE_OAUTH2_TOKEN_ENDPOINT, assertion + ) + + return + + self.token, self.expiry = _make_iam_token_request( + request=request, + principal=self._target_principal, + headers=headers, + body=body, + universe_domain=self.universe_domain, + iam_endpoint_override=self._iam_endpoint_override, + ) + + def sign_bytes(self, message): + from google.auth.transport.requests import AuthorizedSession + + iam_sign_endpoint = iam._IAM_SIGN_ENDPOINT.replace( + credentials.DEFAULT_UNIVERSE_DOMAIN, self.universe_domain + ).format(self._target_principal) + + body = { + "payload": base64.b64encode(message).decode("utf-8"), + "delegates": self._delegates, + } + + headers = {"Content-Type": "application/json"} + + authed_session = AuthorizedSession(self._source_credentials) + + try: + retries = _exponential_backoff.ExponentialBackoff() + for _ in retries: + response = authed_session.post( + url=iam_sign_endpoint, headers=headers, json=body + ) + if response.status_code in iam.IAM_RETRY_CODES: + continue + if response.status_code != http_client.OK: + raise exceptions.TransportError( + "Error calling sign_bytes: {}".format(response.json()) + ) + + return base64.b64decode(response.json()["signedBlob"]) + finally: + authed_session.close() + raise exceptions.TransportError("exhausted signBlob endpoint retries") + + @property + def signer_email(self): + return self._target_principal + + @property + def service_account_email(self): + return self._target_principal + + @property + def signer(self): + return self + + @property + def requires_scopes(self): + return not self._target_scopes + + @_helpers.copy_docstring(credentials.Credentials) + def get_cred_info(self): + if self._cred_file_path: + return { + "credential_source": self._cred_file_path, + "credential_type": "impersonated credentials", + "principal": self._target_principal, + } + return None + + def _make_copy(self): + cred = self.__class__( + self._source_credentials, + target_principal=self._target_principal, + target_scopes=self._target_scopes, + delegates=self._delegates, + lifetime=self._lifetime, + quota_project_id=self._quota_project_id, + iam_endpoint_override=self._iam_endpoint_override, + ) + cred._cred_file_path = self._cred_file_path + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + cred = self._make_copy() + cred._quota_project_id = quota_project_id + return cred + + @_helpers.copy_docstring(credentials.Scoped) + def with_scopes(self, scopes, default_scopes=None): + cred = self._make_copy() + cred._target_scopes = scopes or default_scopes + return cred + + +class IDTokenCredentials(credentials.CredentialsWithQuotaProject): + """Open ID Connect ID Token-based service account credentials. + + """ + + def __init__( + self, + target_credentials, + target_audience=None, + include_email=False, + quota_project_id=None, + ): + """ + Args: + target_credentials (google.auth.Credentials): The target + credential used as to acquire the id tokens for. + target_audience (string): Audience to issue the token for. + include_email (bool): Include email in IdToken + quota_project_id (Optional[str]): The project ID used for + quota and billing. + """ + super(IDTokenCredentials, self).__init__() + + if not isinstance(target_credentials, Credentials): + raise exceptions.GoogleAuthError( + "Provided Credential must be " "impersonated_credentials" + ) + self._target_credentials = target_credentials + self._target_audience = target_audience + self._include_email = include_email + self._quota_project_id = quota_project_id + + def from_credentials(self, target_credentials, target_audience=None): + return self.__class__( + target_credentials=target_credentials, + target_audience=target_audience, + include_email=self._include_email, + quota_project_id=self._quota_project_id, + ) + + def with_target_audience(self, target_audience): + return self.__class__( + target_credentials=self._target_credentials, + target_audience=target_audience, + include_email=self._include_email, + quota_project_id=self._quota_project_id, + ) + + def with_include_email(self, include_email): + return self.__class__( + target_credentials=self._target_credentials, + target_audience=self._target_audience, + include_email=include_email, + quota_project_id=self._quota_project_id, + ) + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + return self.__class__( + target_credentials=self._target_credentials, + target_audience=self._target_audience, + include_email=self._include_email, + quota_project_id=quota_project_id, + ) + + @_helpers.copy_docstring(credentials.Credentials) + def refresh(self, request): + from google.auth.transport.requests import AuthorizedSession + + iam_sign_endpoint = iam._IAM_IDTOKEN_ENDPOINT.replace( + credentials.DEFAULT_UNIVERSE_DOMAIN, + self._target_credentials.universe_domain, + ).format(self._target_credentials.signer_email) + + body = { + "audience": self._target_audience, + "delegates": self._target_credentials._delegates, + "includeEmail": self._include_email, + } + + headers = { + "Content-Type": "application/json", + metrics.API_CLIENT_HEADER: metrics.token_request_id_token_impersonate(), + } + + authed_session = AuthorizedSession( + self._target_credentials._source_credentials, auth_request=request + ) + + try: + response = authed_session.post( + url=iam_sign_endpoint, + headers=headers, + data=json.dumps(body).encode("utf-8"), + ) + finally: + authed_session.close() + + if response.status_code != http_client.OK: + raise exceptions.RefreshError( + "Error getting ID token: {}".format(response.json()) + ) + + id_token = response.json()["token"] + self.token = id_token + self.expiry = datetime.utcfromtimestamp( + jwt.decode(id_token, verify=False)["exp"] + ) + + +def _sign_jwt_request(request, principal, headers, payload, delegates=[]): + """Makes a request to the Google Cloud IAM service to sign a JWT using a + service account's system-managed private key. + Args: + request (Request): The Request object to use. + principal (str): The principal to request an access token for. + headers (Mapping[str, str]): Map of headers to transmit. + payload (Mapping[str, str]): The JWT payload to sign. Must be a + serialized JSON object that contains a JWT Claims Set. + delegates (Sequence[str]): The chained list of delegates required + to grant the final access_token. If set, the sequence of + identities must have "Service Account Token Creator" capability + granted to the prceeding identity. For example, if set to + [serviceAccountB, serviceAccountC], the source_credential + must have the Token Creator role on serviceAccountB. + serviceAccountB must have the Token Creator on + serviceAccountC. + Finally, C must have Token Creator on target_principal. + If left unset, source_credential must have that role on + target_principal. + + Raises: + google.auth.exceptions.TransportError: Raised if there is an underlying + HTTP connection error + google.auth.exceptions.RefreshError: Raised if the impersonated + credentials are not available. Common reasons are + `iamcredentials.googleapis.com` is not enabled or the + `Service Account Token Creator` is not assigned + """ + iam_endpoint = iam._IAM_SIGNJWT_ENDPOINT.format(principal) + + body = {"delegates": delegates, "payload": json.dumps(payload)} + body = json.dumps(body).encode("utf-8") + + response = request(url=iam_endpoint, method="POST", headers=headers, body=body) + + # support both string and bytes type response.data + response_body = ( + response.data.decode("utf-8") + if hasattr(response.data, "decode") + else response.data + ) + + if response.status != http_client.OK: + raise exceptions.RefreshError(_REFRESH_ERROR, response_body) + + try: + jwt_response = json.loads(response_body) + signed_jwt = jwt_response["signedJwt"] + return signed_jwt + + except (KeyError, ValueError) as caught_exc: + new_exc = exceptions.RefreshError( + "{}: No signed JWT in response.".format(_REFRESH_ERROR), response_body + ) + raise new_exc from caught_exc diff --git a/.venv/lib/python3.12/site-packages/google/auth/jwt.py b/.venv/lib/python3.12/site-packages/google/auth/jwt.py new file mode 100644 index 00000000..1ebd565d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/jwt.py @@ -0,0 +1,878 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""JSON Web Tokens + +Provides support for creating (encoding) and verifying (decoding) JWTs, +especially JWTs generated and consumed by Google infrastructure. + +See `rfc7519`_ for more details on JWTs. + +To encode a JWT use :func:`encode`:: + + from google.auth import crypt + from google.auth import jwt + + signer = crypt.Signer(private_key) + payload = {'some': 'payload'} + encoded = jwt.encode(signer, payload) + +To decode a JWT and verify claims use :func:`decode`:: + + claims = jwt.decode(encoded, certs=public_certs) + +You can also skip verification:: + + claims = jwt.decode(encoded, verify=False) + +.. _rfc7519: https://tools.ietf.org/html/rfc7519 + +""" + +try: + from collections.abc import Mapping +# Python 2.7 compatibility +except ImportError: # pragma: NO COVER + from collections import Mapping # type: ignore +import copy +import datetime +import json +import urllib + +import cachetools + +from google.auth import _helpers +from google.auth import _service_account_info +from google.auth import crypt +from google.auth import exceptions +import google.auth.credentials + +try: + from google.auth.crypt import es256 +except ImportError: # pragma: NO COVER + es256 = None # type: ignore + +_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds +_DEFAULT_MAX_CACHE_SIZE = 10 +_ALGORITHM_TO_VERIFIER_CLASS = {"RS256": crypt.RSAVerifier} +_CRYPTOGRAPHY_BASED_ALGORITHMS = frozenset(["ES256"]) + +if es256 is not None: # pragma: NO COVER + _ALGORITHM_TO_VERIFIER_CLASS["ES256"] = es256.ES256Verifier # type: ignore + + +def encode(signer, payload, header=None, key_id=None): + """Make a signed JWT. + + Args: + signer (google.auth.crypt.Signer): The signer used to sign the JWT. + payload (Mapping[str, str]): The JWT payload. + header (Mapping[str, str]): Additional JWT header payload. + key_id (str): The key id to add to the JWT header. If the + signer has a key id it will be used as the default. If this is + specified it will override the signer's key id. + + Returns: + bytes: The encoded JWT. + """ + if header is None: + header = {} + + if key_id is None: + key_id = signer.key_id + + header.update({"typ": "JWT"}) + + if "alg" not in header: + if es256 is not None and isinstance(signer, es256.ES256Signer): + header.update({"alg": "ES256"}) + else: + header.update({"alg": "RS256"}) + + if key_id is not None: + header["kid"] = key_id + + segments = [ + _helpers.unpadded_urlsafe_b64encode(json.dumps(header).encode("utf-8")), + _helpers.unpadded_urlsafe_b64encode(json.dumps(payload).encode("utf-8")), + ] + + signing_input = b".".join(segments) + signature = signer.sign(signing_input) + segments.append(_helpers.unpadded_urlsafe_b64encode(signature)) + + return b".".join(segments) + + +def _decode_jwt_segment(encoded_section): + """Decodes a single JWT segment.""" + section_bytes = _helpers.padded_urlsafe_b64decode(encoded_section) + try: + return json.loads(section_bytes.decode("utf-8")) + except ValueError as caught_exc: + new_exc = exceptions.MalformedError( + "Can't parse segment: {0}".format(section_bytes) + ) + raise new_exc from caught_exc + + +def _unverified_decode(token): + """Decodes a token and does no verification. + + Args: + token (Union[str, bytes]): The encoded JWT. + + Returns: + Tuple[Mapping, Mapping, str, str]: header, payload, signed_section, and + signature. + + Raises: + google.auth.exceptions.MalformedError: if there are an incorrect amount of segments in the token or segments of the wrong type. + """ + token = _helpers.to_bytes(token) + + if token.count(b".") != 2: + raise exceptions.MalformedError( + "Wrong number of segments in token: {0}".format(token) + ) + + encoded_header, encoded_payload, signature = token.split(b".") + signed_section = encoded_header + b"." + encoded_payload + signature = _helpers.padded_urlsafe_b64decode(signature) + + # Parse segments + header = _decode_jwt_segment(encoded_header) + payload = _decode_jwt_segment(encoded_payload) + + if not isinstance(header, Mapping): + raise exceptions.MalformedError( + "Header segment should be a JSON object: {0}".format(encoded_header) + ) + + if not isinstance(payload, Mapping): + raise exceptions.MalformedError( + "Payload segment should be a JSON object: {0}".format(encoded_payload) + ) + + return header, payload, signed_section, signature + + +def decode_header(token): + """Return the decoded header of a token. + + No verification is done. This is useful to extract the key id from + the header in order to acquire the appropriate certificate to verify + the token. + + Args: + token (Union[str, bytes]): the encoded JWT. + + Returns: + Mapping: The decoded JWT header. + """ + header, _, _, _ = _unverified_decode(token) + return header + + +def _verify_iat_and_exp(payload, clock_skew_in_seconds=0): + """Verifies the ``iat`` (Issued At) and ``exp`` (Expires) claims in a token + payload. + + Args: + payload (Mapping[str, str]): The JWT payload. + clock_skew_in_seconds (int): The clock skew used for `iat` and `exp` + validation. + + Raises: + google.auth.exceptions.InvalidValue: if value validation failed. + google.auth.exceptions.MalformedError: if schema validation failed. + """ + now = _helpers.datetime_to_secs(_helpers.utcnow()) + + # Make sure the iat and exp claims are present. + for key in ("iat", "exp"): + if key not in payload: + raise exceptions.MalformedError( + "Token does not contain required claim {}".format(key) + ) + + # Make sure the token wasn't issued in the future. + iat = payload["iat"] + # Err on the side of accepting a token that is slightly early to account + # for clock skew. + earliest = iat - clock_skew_in_seconds + if now < earliest: + raise exceptions.InvalidValue( + "Token used too early, {} < {}. Check that your computer's clock is set correctly.".format( + now, iat + ) + ) + + # Make sure the token wasn't issued in the past. + exp = payload["exp"] + # Err on the side of accepting a token that is slightly out of date + # to account for clow skew. + latest = exp + clock_skew_in_seconds + if latest < now: + raise exceptions.InvalidValue("Token expired, {} < {}".format(latest, now)) + + +def decode(token, certs=None, verify=True, audience=None, clock_skew_in_seconds=0): + """Decode and verify a JWT. + + Args: + token (str): The encoded JWT. + certs (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The + certificate used to validate the JWT signature. If bytes or string, + it must the the public key certificate in PEM format. If a mapping, + it must be a mapping of key IDs to public key certificates in PEM + format. The mapping must contain the same key ID that's specified + in the token's header. + verify (bool): Whether to perform signature and claim validation. + Verification is done by default. + audience (str or list): The audience claim, 'aud', that this JWT should + contain. Or a list of audience claims. If None then the JWT's 'aud' + parameter is not verified. + clock_skew_in_seconds (int): The clock skew used for `iat` and `exp` + validation. + + Returns: + Mapping[str, str]: The deserialized JSON payload in the JWT. + + Raises: + google.auth.exceptions.InvalidValue: if value validation failed. + google.auth.exceptions.MalformedError: if schema validation failed. + """ + header, payload, signed_section, signature = _unverified_decode(token) + + if not verify: + return payload + + # Pluck the key id and algorithm from the header and make sure we have + # a verifier that can support it. + key_alg = header.get("alg") + key_id = header.get("kid") + + try: + verifier_cls = _ALGORITHM_TO_VERIFIER_CLASS[key_alg] + except KeyError as exc: + if key_alg in _CRYPTOGRAPHY_BASED_ALGORITHMS: + raise exceptions.InvalidValue( + "The key algorithm {} requires the cryptography package to be installed.".format( + key_alg + ) + ) from exc + else: + raise exceptions.InvalidValue( + "Unsupported signature algorithm {}".format(key_alg) + ) from exc + # If certs is specified as a dictionary of key IDs to certificates, then + # use the certificate identified by the key ID in the token header. + if isinstance(certs, Mapping): + if key_id: + if key_id not in certs: + raise exceptions.MalformedError( + "Certificate for key id {} not found.".format(key_id) + ) + certs_to_check = [certs[key_id]] + # If there's no key id in the header, check against all of the certs. + else: + certs_to_check = certs.values() + else: + certs_to_check = certs + + # Verify that the signature matches the message. + if not crypt.verify_signature( + signed_section, signature, certs_to_check, verifier_cls + ): + raise exceptions.MalformedError("Could not verify token signature.") + + # Verify the issued at and created times in the payload. + _verify_iat_and_exp(payload, clock_skew_in_seconds) + + # Check audience. + if audience is not None: + claim_audience = payload.get("aud") + if isinstance(audience, str): + audience = [audience] + if claim_audience not in audience: + raise exceptions.InvalidValue( + "Token has wrong audience {}, expected one of {}".format( + claim_audience, audience + ) + ) + + return payload + + +class Credentials( + google.auth.credentials.Signing, google.auth.credentials.CredentialsWithQuotaProject +): + """Credentials that use a JWT as the bearer token. + + These credentials require an "audience" claim. This claim identifies the + intended recipient of the bearer token. + + The constructor arguments determine the claims for the JWT that is + sent with requests. Usually, you'll construct these credentials with + one of the helper constructors as shown in the next section. + + To create JWT credentials using a Google service account private key + JSON file:: + + audience = 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher' + credentials = jwt.Credentials.from_service_account_file( + 'service-account.json', + audience=audience) + + If you already have the service account file loaded and parsed:: + + service_account_info = json.load(open('service_account.json')) + credentials = jwt.Credentials.from_service_account_info( + service_account_info, + audience=audience) + + Both helper methods pass on arguments to the constructor, so you can + specify the JWT claims:: + + credentials = jwt.Credentials.from_service_account_file( + 'service-account.json', + audience=audience, + additional_claims={'meta': 'data'}) + + You can also construct the credentials directly if you have a + :class:`~google.auth.crypt.Signer` instance:: + + credentials = jwt.Credentials( + signer, + issuer='your-issuer', + subject='your-subject', + audience=audience) + + The claims are considered immutable. If you want to modify the claims, + you can easily create another instance using :meth:`with_claims`:: + + new_audience = ( + 'https://pubsub.googleapis.com/google.pubsub.v1.Subscriber') + new_credentials = credentials.with_claims(audience=new_audience) + """ + + def __init__( + self, + signer, + issuer, + subject, + audience, + additional_claims=None, + token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS, + quota_project_id=None, + ): + """ + Args: + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + issuer (str): The `iss` claim. + subject (str): The `sub` claim. + audience (str): the `aud` claim. The intended audience for the + credentials. + additional_claims (Mapping[str, str]): Any additional claims for + the JWT payload. + token_lifetime (int): The amount of time in seconds for + which the token is valid. Defaults to 1 hour. + quota_project_id (Optional[str]): The project ID used for quota + and billing. + """ + super(Credentials, self).__init__() + self._signer = signer + self._issuer = issuer + self._subject = subject + self._audience = audience + self._token_lifetime = token_lifetime + self._quota_project_id = quota_project_id + + if additional_claims is None: + additional_claims = {} + + self._additional_claims = additional_claims + + @classmethod + def _from_signer_and_info(cls, signer, info, **kwargs): + """Creates a Credentials instance from a signer and service account + info. + + Args: + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + info (Mapping[str, str]): The service account info. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.jwt.Credentials: The constructed credentials. + + Raises: + google.auth.exceptions.MalformedError: If the info is not in the expected format. + """ + kwargs.setdefault("subject", info["client_email"]) + kwargs.setdefault("issuer", info["client_email"]) + return cls(signer, **kwargs) + + @classmethod + def from_service_account_info(cls, info, **kwargs): + """Creates an Credentials instance from a dictionary. + + Args: + info (Mapping[str, str]): The service account info in Google + format. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.jwt.Credentials: The constructed credentials. + + Raises: + google.auth.exceptions.MalformedError: If the info is not in the expected format. + """ + signer = _service_account_info.from_dict(info, require=["client_email"]) + return cls._from_signer_and_info(signer, info, **kwargs) + + @classmethod + def from_service_account_file(cls, filename, **kwargs): + """Creates a Credentials instance from a service account .json file + in Google format. + + Args: + filename (str): The path to the service account .json file. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.jwt.Credentials: The constructed credentials. + """ + info, signer = _service_account_info.from_filename( + filename, require=["client_email"] + ) + return cls._from_signer_and_info(signer, info, **kwargs) + + @classmethod + def from_signing_credentials(cls, credentials, audience, **kwargs): + """Creates a new :class:`google.auth.jwt.Credentials` instance from an + existing :class:`google.auth.credentials.Signing` instance. + + The new instance will use the same signer as the existing instance and + will use the existing instance's signer email as the issuer and + subject by default. + + Example:: + + svc_creds = service_account.Credentials.from_service_account_file( + 'service_account.json') + audience = ( + 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher') + jwt_creds = jwt.Credentials.from_signing_credentials( + svc_creds, audience=audience) + + Args: + credentials (google.auth.credentials.Signing): The credentials to + use to construct the new credentials. + audience (str): the `aud` claim. The intended audience for the + credentials. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.jwt.Credentials: A new Credentials instance. + """ + kwargs.setdefault("issuer", credentials.signer_email) + kwargs.setdefault("subject", credentials.signer_email) + return cls(credentials.signer, audience=audience, **kwargs) + + def with_claims( + self, issuer=None, subject=None, audience=None, additional_claims=None + ): + """Returns a copy of these credentials with modified claims. + + Args: + issuer (str): The `iss` claim. If unspecified the current issuer + claim will be used. + subject (str): The `sub` claim. If unspecified the current subject + claim will be used. + audience (str): the `aud` claim. If unspecified the current + audience claim will be used. + additional_claims (Mapping[str, str]): Any additional claims for + the JWT payload. This will be merged with the current + additional claims. + + Returns: + google.auth.jwt.Credentials: A new credentials instance. + """ + new_additional_claims = copy.deepcopy(self._additional_claims) + new_additional_claims.update(additional_claims or {}) + + return self.__class__( + self._signer, + issuer=issuer if issuer is not None else self._issuer, + subject=subject if subject is not None else self._subject, + audience=audience if audience is not None else self._audience, + additional_claims=new_additional_claims, + quota_project_id=self._quota_project_id, + ) + + @_helpers.copy_docstring(google.auth.credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + return self.__class__( + self._signer, + issuer=self._issuer, + subject=self._subject, + audience=self._audience, + additional_claims=self._additional_claims, + quota_project_id=quota_project_id, + ) + + def _make_jwt(self): + """Make a signed JWT. + + Returns: + Tuple[bytes, datetime]: The encoded JWT and the expiration. + """ + now = _helpers.utcnow() + lifetime = datetime.timedelta(seconds=self._token_lifetime) + expiry = now + lifetime + + payload = { + "iss": self._issuer, + "sub": self._subject, + "iat": _helpers.datetime_to_secs(now), + "exp": _helpers.datetime_to_secs(expiry), + } + if self._audience: + payload["aud"] = self._audience + + payload.update(self._additional_claims) + + jwt = encode(self._signer, payload) + + return jwt, expiry + + def refresh(self, request): + """Refreshes the access token. + + Args: + request (Any): Unused. + """ + # pylint: disable=unused-argument + # (pylint doesn't correctly recognize overridden methods.) + self.token, self.expiry = self._make_jwt() + + @_helpers.copy_docstring(google.auth.credentials.Signing) + def sign_bytes(self, message): + return self._signer.sign(message) + + @property # type: ignore + @_helpers.copy_docstring(google.auth.credentials.Signing) + def signer_email(self): + return self._issuer + + @property # type: ignore + @_helpers.copy_docstring(google.auth.credentials.Signing) + def signer(self): + return self._signer + + @property # type: ignore + def additional_claims(self): + """ Additional claims the JWT object was created with.""" + return self._additional_claims + + +class OnDemandCredentials( + google.auth.credentials.Signing, google.auth.credentials.CredentialsWithQuotaProject +): + """On-demand JWT credentials. + + Like :class:`Credentials`, this class uses a JWT as the bearer token for + authentication. However, this class does not require the audience at + construction time. Instead, it will generate a new token on-demand for + each request using the request URI as the audience. It caches tokens + so that multiple requests to the same URI do not incur the overhead + of generating a new token every time. + + This behavior is especially useful for `gRPC`_ clients. A gRPC service may + have multiple audience and gRPC clients may not know all of the audiences + required for accessing a particular service. With these credentials, + no knowledge of the audiences is required ahead of time. + + .. _grpc: http://www.grpc.io/ + """ + + def __init__( + self, + signer, + issuer, + subject, + additional_claims=None, + token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS, + max_cache_size=_DEFAULT_MAX_CACHE_SIZE, + quota_project_id=None, + ): + """ + Args: + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + issuer (str): The `iss` claim. + subject (str): The `sub` claim. + additional_claims (Mapping[str, str]): Any additional claims for + the JWT payload. + token_lifetime (int): The amount of time in seconds for + which the token is valid. Defaults to 1 hour. + max_cache_size (int): The maximum number of JWT tokens to keep in + cache. Tokens are cached using :class:`cachetools.LRUCache`. + quota_project_id (Optional[str]): The project ID used for quota + and billing. + + """ + super(OnDemandCredentials, self).__init__() + self._signer = signer + self._issuer = issuer + self._subject = subject + self._token_lifetime = token_lifetime + self._quota_project_id = quota_project_id + + if additional_claims is None: + additional_claims = {} + + self._additional_claims = additional_claims + self._cache = cachetools.LRUCache(maxsize=max_cache_size) + + @classmethod + def _from_signer_and_info(cls, signer, info, **kwargs): + """Creates an OnDemandCredentials instance from a signer and service + account info. + + Args: + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + info (Mapping[str, str]): The service account info. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.jwt.OnDemandCredentials: The constructed credentials. + + Raises: + google.auth.exceptions.MalformedError: If the info is not in the expected format. + """ + kwargs.setdefault("subject", info["client_email"]) + kwargs.setdefault("issuer", info["client_email"]) + return cls(signer, **kwargs) + + @classmethod + def from_service_account_info(cls, info, **kwargs): + """Creates an OnDemandCredentials instance from a dictionary. + + Args: + info (Mapping[str, str]): The service account info in Google + format. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.jwt.OnDemandCredentials: The constructed credentials. + + Raises: + google.auth.exceptions.MalformedError: If the info is not in the expected format. + """ + signer = _service_account_info.from_dict(info, require=["client_email"]) + return cls._from_signer_and_info(signer, info, **kwargs) + + @classmethod + def from_service_account_file(cls, filename, **kwargs): + """Creates an OnDemandCredentials instance from a service account .json + file in Google format. + + Args: + filename (str): The path to the service account .json file. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.jwt.OnDemandCredentials: The constructed credentials. + """ + info, signer = _service_account_info.from_filename( + filename, require=["client_email"] + ) + return cls._from_signer_and_info(signer, info, **kwargs) + + @classmethod + def from_signing_credentials(cls, credentials, **kwargs): + """Creates a new :class:`google.auth.jwt.OnDemandCredentials` instance + from an existing :class:`google.auth.credentials.Signing` instance. + + The new instance will use the same signer as the existing instance and + will use the existing instance's signer email as the issuer and + subject by default. + + Example:: + + svc_creds = service_account.Credentials.from_service_account_file( + 'service_account.json') + jwt_creds = jwt.OnDemandCredentials.from_signing_credentials( + svc_creds) + + Args: + credentials (google.auth.credentials.Signing): The credentials to + use to construct the new credentials. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.jwt.Credentials: A new Credentials instance. + """ + kwargs.setdefault("issuer", credentials.signer_email) + kwargs.setdefault("subject", credentials.signer_email) + return cls(credentials.signer, **kwargs) + + def with_claims(self, issuer=None, subject=None, additional_claims=None): + """Returns a copy of these credentials with modified claims. + + Args: + issuer (str): The `iss` claim. If unspecified the current issuer + claim will be used. + subject (str): The `sub` claim. If unspecified the current subject + claim will be used. + additional_claims (Mapping[str, str]): Any additional claims for + the JWT payload. This will be merged with the current + additional claims. + + Returns: + google.auth.jwt.OnDemandCredentials: A new credentials instance. + """ + new_additional_claims = copy.deepcopy(self._additional_claims) + new_additional_claims.update(additional_claims or {}) + + return self.__class__( + self._signer, + issuer=issuer if issuer is not None else self._issuer, + subject=subject if subject is not None else self._subject, + additional_claims=new_additional_claims, + max_cache_size=self._cache.maxsize, + quota_project_id=self._quota_project_id, + ) + + @_helpers.copy_docstring(google.auth.credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + + return self.__class__( + self._signer, + issuer=self._issuer, + subject=self._subject, + additional_claims=self._additional_claims, + max_cache_size=self._cache.maxsize, + quota_project_id=quota_project_id, + ) + + @property + def valid(self): + """Checks the validity of the credentials. + + These credentials are always valid because it generates tokens on + demand. + """ + return True + + def _make_jwt_for_audience(self, audience): + """Make a new JWT for the given audience. + + Args: + audience (str): The intended audience. + + Returns: + Tuple[bytes, datetime]: The encoded JWT and the expiration. + """ + now = _helpers.utcnow() + lifetime = datetime.timedelta(seconds=self._token_lifetime) + expiry = now + lifetime + + payload = { + "iss": self._issuer, + "sub": self._subject, + "iat": _helpers.datetime_to_secs(now), + "exp": _helpers.datetime_to_secs(expiry), + "aud": audience, + } + + payload.update(self._additional_claims) + + jwt = encode(self._signer, payload) + + return jwt, expiry + + def _get_jwt_for_audience(self, audience): + """Get a JWT For a given audience. + + If there is already an existing, non-expired token in the cache for + the audience, that token is used. Otherwise, a new token will be + created. + + Args: + audience (str): The intended audience. + + Returns: + bytes: The encoded JWT. + """ + token, expiry = self._cache.get(audience, (None, None)) + + if token is None or expiry < _helpers.utcnow(): + token, expiry = self._make_jwt_for_audience(audience) + self._cache[audience] = token, expiry + + return token + + def refresh(self, request): + """Raises an exception, these credentials can not be directly + refreshed. + + Args: + request (Any): Unused. + + Raises: + google.auth.RefreshError + """ + # pylint: disable=unused-argument + # (pylint doesn't correctly recognize overridden methods.) + raise exceptions.RefreshError( + "OnDemandCredentials can not be directly refreshed." + ) + + def before_request(self, request, method, url, headers): + """Performs credential-specific before request logic. + + Args: + request (Any): Unused. JWT credentials do not need to make an + HTTP request to refresh. + method (str): The request's HTTP method. + url (str): The request's URI. This is used as the audience claim + when generating the JWT. + headers (Mapping): The request's headers. + """ + # pylint: disable=unused-argument + # (pylint doesn't correctly recognize overridden methods.) + parts = urllib.parse.urlsplit(url) + # Strip query string and fragment + audience = urllib.parse.urlunsplit( + (parts.scheme, parts.netloc, parts.path, "", "") + ) + token = self._get_jwt_for_audience(audience) + self.apply(headers, token=token) + + @_helpers.copy_docstring(google.auth.credentials.Signing) + def sign_bytes(self, message): + return self._signer.sign(message) + + @property # type: ignore + @_helpers.copy_docstring(google.auth.credentials.Signing) + def signer_email(self): + return self._issuer + + @property # type: ignore + @_helpers.copy_docstring(google.auth.credentials.Signing) + def signer(self): + return self._signer diff --git a/.venv/lib/python3.12/site-packages/google/auth/metrics.py b/.venv/lib/python3.12/site-packages/google/auth/metrics.py new file mode 100644 index 00000000..11e4b077 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/metrics.py @@ -0,0 +1,154 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" We use x-goog-api-client header to report metrics. This module provides +the constants and helper methods to construct x-goog-api-client header. +""" + +import platform + +from google.auth import version + + +API_CLIENT_HEADER = "x-goog-api-client" + +# BYOID Specific consts +BYOID_HEADER_SECTION = "google-byoid-sdk" + +# Auth request type +REQUEST_TYPE_ACCESS_TOKEN = "auth-request-type/at" +REQUEST_TYPE_ID_TOKEN = "auth-request-type/it" +REQUEST_TYPE_MDS_PING = "auth-request-type/mds" +REQUEST_TYPE_REAUTH_START = "auth-request-type/re-start" +REQUEST_TYPE_REAUTH_CONTINUE = "auth-request-type/re-cont" + +# Credential type +CRED_TYPE_USER = "cred-type/u" +CRED_TYPE_SA_ASSERTION = "cred-type/sa" +CRED_TYPE_SA_JWT = "cred-type/jwt" +CRED_TYPE_SA_MDS = "cred-type/mds" +CRED_TYPE_SA_IMPERSONATE = "cred-type/imp" + + +# Versions +def python_and_auth_lib_version(): + return "gl-python/{} auth/{}".format(platform.python_version(), version.__version__) + + +# Token request metric header values + +# x-goog-api-client header value for access token request via metadata server. +# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/mds" +def token_request_access_token_mds(): + return "{} {} {}".format( + python_and_auth_lib_version(), REQUEST_TYPE_ACCESS_TOKEN, CRED_TYPE_SA_MDS + ) + + +# x-goog-api-client header value for ID token request via metadata server. +# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/mds" +def token_request_id_token_mds(): + return "{} {} {}".format( + python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_MDS + ) + + +# x-goog-api-client header value for impersonated credentials access token request. +# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp" +def token_request_access_token_impersonate(): + return "{} {} {}".format( + python_and_auth_lib_version(), + REQUEST_TYPE_ACCESS_TOKEN, + CRED_TYPE_SA_IMPERSONATE, + ) + + +# x-goog-api-client header value for impersonated credentials ID token request. +# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/imp" +def token_request_id_token_impersonate(): + return "{} {} {}".format( + python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_IMPERSONATE + ) + + +# x-goog-api-client header value for service account credentials access token +# request (assertion flow). +# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/sa" +def token_request_access_token_sa_assertion(): + return "{} {} {}".format( + python_and_auth_lib_version(), REQUEST_TYPE_ACCESS_TOKEN, CRED_TYPE_SA_ASSERTION + ) + + +# x-goog-api-client header value for service account credentials ID token +# request (assertion flow). +# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/sa" +def token_request_id_token_sa_assertion(): + return "{} {} {}".format( + python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_ASSERTION + ) + + +# x-goog-api-client header value for user credentials token request. +# Example: "gl-python/3.7 auth/1.1 cred-type/u" +def token_request_user(): + return "{} {}".format(python_and_auth_lib_version(), CRED_TYPE_USER) + + +# Miscellenous metrics + +# x-goog-api-client header value for metadata server ping. +# Example: "gl-python/3.7 auth/1.1 auth-request-type/mds" +def mds_ping(): + return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_MDS_PING) + + +# x-goog-api-client header value for reauth start endpoint calls. +# Example: "gl-python/3.7 auth/1.1 auth-request-type/re-start" +def reauth_start(): + return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_REAUTH_START) + + +# x-goog-api-client header value for reauth continue endpoint calls. +# Example: "gl-python/3.7 auth/1.1 cred-type/re-cont" +def reauth_continue(): + return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_REAUTH_CONTINUE) + + +# x-goog-api-client header value for BYOID calls to the Security Token Service exchange token endpoint. +# Example: "gl-python/3.7 auth/1.1 google-byoid-sdk source/aws sa-impersonation/true sa-impersonation/true" +def byoid_metrics_header(metrics_options): + header = "{} {}".format(python_and_auth_lib_version(), BYOID_HEADER_SECTION) + for key, value in metrics_options.items(): + header = "{} {}/{}".format(header, key, value) + return header + + +def add_metric_header(headers, metric_header_value): + """Add x-goog-api-client header with the given value. + + Args: + headers (Mapping[str, str]): The headers to which we will add the + metric header. + metric_header_value (Optional[str]): If value is None, do nothing; + if headers already has a x-goog-api-client header, append the value + to the existing header; otherwise add a new x-goog-api-client + header with the given value. + """ + if not metric_header_value: + return + if API_CLIENT_HEADER not in headers: + headers[API_CLIENT_HEADER] = metric_header_value + else: + headers[API_CLIENT_HEADER] += " " + metric_header_value diff --git a/.venv/lib/python3.12/site-packages/google/auth/pluggable.py b/.venv/lib/python3.12/site-packages/google/auth/pluggable.py new file mode 100644 index 00000000..d725188f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/pluggable.py @@ -0,0 +1,429 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pluggable Credentials. +Pluggable Credentials are initialized using external_account arguments which +are typically loaded from third-party executables. Unlike other +credentials that can be initialized with a list of explicit arguments, secrets +or credentials, external account clients use the environment and hints/guidelines +provided by the external_account JSON file to retrieve credentials and exchange +them for Google access tokens. + +Example credential_source for pluggable credential: +{ + "executable": { + "command": "/path/to/get/credentials.sh --arg1=value1 --arg2=value2", + "timeout_millis": 5000, + "output_file": "/path/to/generated/cached/credentials" + } +} +""" + +try: + from collections.abc import Mapping +# Python 2.7 compatibility +except ImportError: # pragma: NO COVER + from collections import Mapping # type: ignore +import json +import os +import subprocess +import sys +import time + +from google.auth import _helpers +from google.auth import exceptions +from google.auth import external_account + +# The max supported executable spec version. +EXECUTABLE_SUPPORTED_MAX_VERSION = 1 + +EXECUTABLE_TIMEOUT_MILLIS_DEFAULT = 30 * 1000 # 30 seconds +EXECUTABLE_TIMEOUT_MILLIS_LOWER_BOUND = 5 * 1000 # 5 seconds +EXECUTABLE_TIMEOUT_MILLIS_UPPER_BOUND = 120 * 1000 # 2 minutes + +EXECUTABLE_INTERACTIVE_TIMEOUT_MILLIS_LOWER_BOUND = 30 * 1000 # 30 seconds +EXECUTABLE_INTERACTIVE_TIMEOUT_MILLIS_UPPER_BOUND = 30 * 60 * 1000 # 30 minutes + + +class Credentials(external_account.Credentials): + """External account credentials sourced from executables.""" + + def __init__( + self, + audience, + subject_token_type, + token_url, + credential_source, + *args, + **kwargs + ): + """Instantiates an external account credentials object from a executables. + + Args: + audience (str): The STS audience field. + subject_token_type (str): The subject token type. + token_url (str): The STS endpoint URL. + credential_source (Mapping): The credential source dictionary used to + provide instructions on how to retrieve external credential to be + exchanged for Google access tokens. + + Example credential_source for pluggable credential: + + { + "executable": { + "command": "/path/to/get/credentials.sh --arg1=value1 --arg2=value2", + "timeout_millis": 5000, + "output_file": "/path/to/generated/cached/credentials" + } + } + args (List): Optional positional arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method. + kwargs (Mapping): Optional keyword arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method. + + Raises: + google.auth.exceptions.RefreshError: If an error is encountered during + access token retrieval logic. + google.auth.exceptions.InvalidValue: For invalid parameters. + google.auth.exceptions.MalformedError: For invalid parameters. + + .. note:: Typically one of the helper constructors + :meth:`from_file` or + :meth:`from_info` are used instead of calling the constructor directly. + """ + + self.interactive = kwargs.pop("interactive", False) + super(Credentials, self).__init__( + audience=audience, + subject_token_type=subject_token_type, + token_url=token_url, + credential_source=credential_source, + *args, + **kwargs + ) + if not isinstance(credential_source, Mapping): + self._credential_source_executable = None + raise exceptions.MalformedError( + "Missing credential_source. The credential_source is not a dict." + ) + self._credential_source_executable = credential_source.get("executable") + if not self._credential_source_executable: + raise exceptions.MalformedError( + "Missing credential_source. An 'executable' must be provided." + ) + self._credential_source_executable_command = self._credential_source_executable.get( + "command" + ) + self._credential_source_executable_timeout_millis = self._credential_source_executable.get( + "timeout_millis" + ) + self._credential_source_executable_interactive_timeout_millis = self._credential_source_executable.get( + "interactive_timeout_millis" + ) + self._credential_source_executable_output_file = self._credential_source_executable.get( + "output_file" + ) + + # Dummy value. This variable is only used via injection, not exposed to ctor + self._tokeninfo_username = "" + + if not self._credential_source_executable_command: + raise exceptions.MalformedError( + "Missing command field. Executable command must be provided." + ) + if not self._credential_source_executable_timeout_millis: + self._credential_source_executable_timeout_millis = ( + EXECUTABLE_TIMEOUT_MILLIS_DEFAULT + ) + elif ( + self._credential_source_executable_timeout_millis + < EXECUTABLE_TIMEOUT_MILLIS_LOWER_BOUND + or self._credential_source_executable_timeout_millis + > EXECUTABLE_TIMEOUT_MILLIS_UPPER_BOUND + ): + raise exceptions.InvalidValue("Timeout must be between 5 and 120 seconds.") + + if self._credential_source_executable_interactive_timeout_millis: + if ( + self._credential_source_executable_interactive_timeout_millis + < EXECUTABLE_INTERACTIVE_TIMEOUT_MILLIS_LOWER_BOUND + or self._credential_source_executable_interactive_timeout_millis + > EXECUTABLE_INTERACTIVE_TIMEOUT_MILLIS_UPPER_BOUND + ): + raise exceptions.InvalidValue( + "Interactive timeout must be between 30 seconds and 30 minutes." + ) + + @_helpers.copy_docstring(external_account.Credentials) + def retrieve_subject_token(self, request): + self._validate_running_mode() + + # Check output file. + if self._credential_source_executable_output_file is not None: + try: + with open( + self._credential_source_executable_output_file, encoding="utf-8" + ) as output_file: + response = json.load(output_file) + except Exception: + pass + else: + try: + # If the cached response is expired, _parse_subject_token will raise an error which will be ignored and we will call the executable again. + subject_token = self._parse_subject_token(response) + if ( + "expiration_time" not in response + ): # Always treat missing expiration_time as expired and proceed to executable run. + raise exceptions.RefreshError + except (exceptions.MalformedError, exceptions.InvalidValue): + raise + except exceptions.RefreshError: + pass + else: + return subject_token + + if not _helpers.is_python_3(): + raise exceptions.RefreshError( + "Pluggable auth is only supported for python 3.7+" + ) + + # Inject env vars. + env = os.environ.copy() + self._inject_env_variables(env) + env["GOOGLE_EXTERNAL_ACCOUNT_REVOKE"] = "0" + + # Run executable. + exe_timeout = ( + self._credential_source_executable_interactive_timeout_millis / 1000 + if self.interactive + else self._credential_source_executable_timeout_millis / 1000 + ) + exe_stdin = sys.stdin if self.interactive else None + exe_stdout = sys.stdout if self.interactive else subprocess.PIPE + exe_stderr = sys.stdout if self.interactive else subprocess.STDOUT + + result = subprocess.run( + self._credential_source_executable_command.split(), + timeout=exe_timeout, + stdin=exe_stdin, + stdout=exe_stdout, + stderr=exe_stderr, + env=env, + ) + if result.returncode != 0: + raise exceptions.RefreshError( + "Executable exited with non-zero return code {}. Error: {}".format( + result.returncode, result.stdout + ) + ) + + # Handle executable output. + response = json.loads(result.stdout.decode("utf-8")) if result.stdout else None + if not response and self._credential_source_executable_output_file is not None: + response = json.load( + open(self._credential_source_executable_output_file, encoding="utf-8") + ) + + subject_token = self._parse_subject_token(response) + return subject_token + + def revoke(self, request): + """Revokes the subject token using the credential_source object. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + Raises: + google.auth.exceptions.RefreshError: If the executable revocation + not properly executed. + + """ + if not self.interactive: + raise exceptions.InvalidValue( + "Revoke is only enabled under interactive mode." + ) + self._validate_running_mode() + + if not _helpers.is_python_3(): + raise exceptions.RefreshError( + "Pluggable auth is only supported for python 3.7+" + ) + + # Inject variables + env = os.environ.copy() + self._inject_env_variables(env) + env["GOOGLE_EXTERNAL_ACCOUNT_REVOKE"] = "1" + + # Run executable + result = subprocess.run( + self._credential_source_executable_command.split(), + timeout=self._credential_source_executable_interactive_timeout_millis + / 1000, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + env=env, + ) + + if result.returncode != 0: + raise exceptions.RefreshError( + "Auth revoke failed on executable. Exit with non-zero return code {}. Error: {}".format( + result.returncode, result.stdout + ) + ) + + response = json.loads(result.stdout.decode("utf-8")) + self._validate_revoke_response(response) + + @property + def external_account_id(self): + """Returns the external account identifier. + + When service account impersonation is used the identifier is the service + account email. + + Without service account impersonation, this returns None, unless it is + being used by the Google Cloud CLI which populates this field. + """ + + return self.service_account_email or self._tokeninfo_username + + @classmethod + def from_info(cls, info, **kwargs): + """Creates a Pluggable Credentials instance from parsed external account info. + + Args: + info (Mapping[str, str]): The Pluggable external account info in Google + format. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.pluggable.Credentials: The constructed + credentials. + + Raises: + google.auth.exceptions.InvalidValue: For invalid parameters. + google.auth.exceptions.MalformedError: For invalid parameters. + """ + return super(Credentials, cls).from_info(info, **kwargs) + + @classmethod + def from_file(cls, filename, **kwargs): + """Creates an Pluggable Credentials instance from an external account json file. + + Args: + filename (str): The path to the Pluggable external account json file. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.pluggable.Credentials: The constructed + credentials. + """ + return super(Credentials, cls).from_file(filename, **kwargs) + + def _inject_env_variables(self, env): + env["GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE"] = self._audience + env["GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE"] = self._subject_token_type + env["GOOGLE_EXTERNAL_ACCOUNT_ID"] = self.external_account_id + env["GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE"] = "1" if self.interactive else "0" + + if self._service_account_impersonation_url is not None: + env[ + "GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL" + ] = self.service_account_email + if self._credential_source_executable_output_file is not None: + env[ + "GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE" + ] = self._credential_source_executable_output_file + + def _parse_subject_token(self, response): + self._validate_response_schema(response) + if not response["success"]: + if "code" not in response or "message" not in response: + raise exceptions.MalformedError( + "Error code and message fields are required in the response." + ) + raise exceptions.RefreshError( + "Executable returned unsuccessful response: code: {}, message: {}.".format( + response["code"], response["message"] + ) + ) + if "expiration_time" in response and response["expiration_time"] < time.time(): + raise exceptions.RefreshError( + "The token returned by the executable is expired." + ) + if "token_type" not in response: + raise exceptions.MalformedError( + "The executable response is missing the token_type field." + ) + if ( + response["token_type"] == "urn:ietf:params:oauth:token-type:jwt" + or response["token_type"] == "urn:ietf:params:oauth:token-type:id_token" + ): # OIDC + return response["id_token"] + elif response["token_type"] == "urn:ietf:params:oauth:token-type:saml2": # SAML + return response["saml_response"] + else: + raise exceptions.RefreshError("Executable returned unsupported token type.") + + def _validate_revoke_response(self, response): + self._validate_response_schema(response) + if not response["success"]: + raise exceptions.RefreshError("Revoke failed with unsuccessful response.") + + def _validate_response_schema(self, response): + if "version" not in response: + raise exceptions.MalformedError( + "The executable response is missing the version field." + ) + if response["version"] > EXECUTABLE_SUPPORTED_MAX_VERSION: + raise exceptions.RefreshError( + "Executable returned unsupported version {}.".format( + response["version"] + ) + ) + + if "success" not in response: + raise exceptions.MalformedError( + "The executable response is missing the success field." + ) + + def _validate_running_mode(self): + env_allow_executables = os.environ.get( + "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES" + ) + if env_allow_executables != "1": + raise exceptions.MalformedError( + "Executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run." + ) + + if self.interactive and not self._credential_source_executable_output_file: + raise exceptions.MalformedError( + "An output_file must be specified in the credential configuration for interactive mode." + ) + + if ( + self.interactive + and not self._credential_source_executable_interactive_timeout_millis + ): + raise exceptions.InvalidOperation( + "Interactive mode cannot run without an interactive timeout." + ) + + if self.interactive and not self.is_workforce_pool: + raise exceptions.InvalidValue( + "Interactive mode is only enabled for workforce pool." + ) + + def _create_default_metrics_options(self): + metrics_options = super(Credentials, self)._create_default_metrics_options() + metrics_options["source"] = "executable" + return metrics_options diff --git a/.venv/lib/python3.12/site-packages/google/auth/py.typed b/.venv/lib/python3.12/site-packages/google/auth/py.typed new file mode 100644 index 00000000..aa7b6892 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561.
+# The google-auth package uses inline types.
diff --git a/.venv/lib/python3.12/site-packages/google/auth/transport/__init__.py b/.venv/lib/python3.12/site-packages/google/auth/transport/__init__.py new file mode 100644 index 00000000..724568e5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/transport/__init__.py @@ -0,0 +1,103 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transport - HTTP client library support. + +:mod:`google.auth` is designed to work with various HTTP client libraries such +as urllib3 and requests. In order to work across these libraries with different +interfaces some abstraction is needed. + +This module provides two interfaces that are implemented by transport adapters +to support HTTP libraries. :class:`Request` defines the interface expected by +:mod:`google.auth` to make requests. :class:`Response` defines the interface +for the return value of :class:`Request`. +""" + +import abc +import http.client as http_client + +DEFAULT_RETRYABLE_STATUS_CODES = ( + http_client.INTERNAL_SERVER_ERROR, + http_client.SERVICE_UNAVAILABLE, + http_client.REQUEST_TIMEOUT, + http_client.TOO_MANY_REQUESTS, +) +"""Sequence[int]: HTTP status codes indicating a request can be retried. +""" + + +DEFAULT_REFRESH_STATUS_CODES = (http_client.UNAUTHORIZED,) +"""Sequence[int]: Which HTTP status code indicate that credentials should be +refreshed. +""" + +DEFAULT_MAX_REFRESH_ATTEMPTS = 2 +"""int: How many times to refresh the credentials and retry a request.""" + + +class Response(metaclass=abc.ABCMeta): + """HTTP Response data.""" + + @abc.abstractproperty + def status(self): + """int: The HTTP status code.""" + raise NotImplementedError("status must be implemented.") + + @abc.abstractproperty + def headers(self): + """Mapping[str, str]: The HTTP response headers.""" + raise NotImplementedError("headers must be implemented.") + + @abc.abstractproperty + def data(self): + """bytes: The response body.""" + raise NotImplementedError("data must be implemented.") + + +class Request(metaclass=abc.ABCMeta): + """Interface for a callable that makes HTTP requests. + + Specific transport implementations should provide an implementation of + this that adapts their specific request / response API. + + .. automethod:: __call__ + """ + + @abc.abstractmethod + def __call__( + self, url, method="GET", body=None, headers=None, timeout=None, **kwargs + ): + """Make an HTTP request. + + Args: + url (str): The URI to be requested. + method (str): The HTTP method to use for the request. Defaults + to 'GET'. + body (bytes): The payload / body in HTTP request. + headers (Mapping[str, str]): Request headers. + timeout (Optional[int]): The number of seconds to wait for a + response from the server. If not specified or if None, the + transport-specific default timeout will be used. + kwargs: Additionally arguments passed on to the transport's + request method. + + Returns: + Response: The HTTP response. + + Raises: + google.auth.exceptions.TransportError: If any exception occurred. + """ + # pylint: disable=redundant-returns-doc, missing-raises-doc + # (pylint doesn't play well with abstract docstrings.) + raise NotImplementedError("__call__ must be implemented.") diff --git a/.venv/lib/python3.12/site-packages/google/auth/transport/_aiohttp_requests.py b/.venv/lib/python3.12/site-packages/google/auth/transport/_aiohttp_requests.py new file mode 100644 index 00000000..bc4d9dc6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/transport/_aiohttp_requests.py @@ -0,0 +1,391 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transport adapter for Async HTTP (aiohttp). + +NOTE: This async support is experimental and marked internal. This surface may +change in minor releases. +""" + +from __future__ import absolute_import + +import asyncio +import functools + +import aiohttp # type: ignore +import urllib3 # type: ignore + +from google.auth import exceptions +from google.auth import transport +from google.auth.transport import requests + +# Timeout can be re-defined depending on async requirement. Currently made 60s more than +# sync timeout. +_DEFAULT_TIMEOUT = 180 # in seconds + + +class _CombinedResponse(transport.Response): + """ + In order to more closely resemble the `requests` interface, where a raw + and deflated content could be accessed at once, this class lazily reads the + stream in `transport.Response` so both return forms can be used. + + The gzip and deflate transfer-encodings are automatically decoded for you + because the default parameter for autodecompress into the ClientSession is set + to False, and therefore we add this class to act as a wrapper for a user to be + able to access both the raw and decoded response bodies - mirroring the sync + implementation. + """ + + def __init__(self, response): + self._response = response + self._raw_content = None + + def _is_compressed(self): + headers = self._response.headers + return "Content-Encoding" in headers and ( + headers["Content-Encoding"] == "gzip" + or headers["Content-Encoding"] == "deflate" + ) + + @property + def status(self): + return self._response.status + + @property + def headers(self): + return self._response.headers + + @property + def data(self): + return self._response.content + + async def raw_content(self): + if self._raw_content is None: + self._raw_content = await self._response.content.read() + return self._raw_content + + async def content(self): + # Load raw_content if necessary + await self.raw_content() + if self._is_compressed(): + decoder = urllib3.response.MultiDecoder( + self._response.headers["Content-Encoding"] + ) + decompressed = decoder.decompress(self._raw_content) + return decompressed + + return self._raw_content + + +class _Response(transport.Response): + """ + Requests transport response adapter. + + Args: + response (requests.Response): The raw Requests response. + """ + + def __init__(self, response): + self._response = response + + @property + def status(self): + return self._response.status + + @property + def headers(self): + return self._response.headers + + @property + def data(self): + return self._response.content + + +class Request(transport.Request): + """Requests request adapter. + + This class is used internally for making requests using asyncio transports + in a consistent way. If you use :class:`AuthorizedSession` you do not need + to construct or use this class directly. + + This class can be useful if you want to manually refresh a + :class:`~google.auth.credentials.Credentials` instance:: + + import google.auth.transport.aiohttp_requests + + request = google.auth.transport.aiohttp_requests.Request() + + credentials.refresh(request) + + Args: + session (aiohttp.ClientSession): An instance :class:`aiohttp.ClientSession` used + to make HTTP requests. If not specified, a session will be created. + + .. automethod:: __call__ + """ + + def __init__(self, session=None): + # TODO: Use auto_decompress property for aiohttp 3.7+ + if session is not None and session._auto_decompress: + raise exceptions.InvalidOperation( + "Client sessions with auto_decompress=True are not supported." + ) + self.session = session + + async def __call__( + self, + url, + method="GET", + body=None, + headers=None, + timeout=_DEFAULT_TIMEOUT, + **kwargs, + ): + """ + Make an HTTP request using aiohttp. + + Args: + url (str): The URL to be requested. + method (Optional[str]): + The HTTP method to use for the request. Defaults to 'GET'. + body (Optional[bytes]): + The payload or body in HTTP request. + headers (Optional[Mapping[str, str]]): + Request headers. + timeout (Optional[int]): The number of seconds to wait for a + response from the server. If not specified or if None, the + requests default timeout will be used. + kwargs: Additional arguments passed through to the underlying + requests :meth:`requests.Session.request` method. + + Returns: + google.auth.transport.Response: The HTTP response. + + Raises: + google.auth.exceptions.TransportError: If any exception occurred. + """ + + try: + if self.session is None: # pragma: NO COVER + self.session = aiohttp.ClientSession( + auto_decompress=False + ) # pragma: NO COVER + requests._LOGGER.debug("Making request: %s %s", method, url) + response = await self.session.request( + method, url, data=body, headers=headers, timeout=timeout, **kwargs + ) + return _CombinedResponse(response) + + except aiohttp.ClientError as caught_exc: + new_exc = exceptions.TransportError(caught_exc) + raise new_exc from caught_exc + + except asyncio.TimeoutError as caught_exc: + new_exc = exceptions.TransportError(caught_exc) + raise new_exc from caught_exc + + +class AuthorizedSession(aiohttp.ClientSession): + """This is an async implementation of the Authorized Session class. We utilize an + aiohttp transport instance, and the interface mirrors the google.auth.transport.requests + Authorized Session class, except for the change in the transport used in the async use case. + + A Requests Session class with credentials. + + This class is used to perform requests to API endpoints that require + authorization:: + + from google.auth.transport import aiohttp_requests + + async with aiohttp_requests.AuthorizedSession(credentials) as authed_session: + response = await authed_session.request( + 'GET', 'https://www.googleapis.com/storage/v1/b') + + The underlying :meth:`request` implementation handles adding the + credentials' headers to the request and refreshing credentials as needed. + + Args: + credentials (google.auth._credentials_async.Credentials): + The credentials to add to the request. + refresh_status_codes (Sequence[int]): Which HTTP status codes indicate + that credentials should be refreshed and the request should be + retried. + max_refresh_attempts (int): The maximum number of times to attempt to + refresh the credentials and retry the request. + refresh_timeout (Optional[int]): The timeout value in seconds for + credential refresh HTTP requests. + auth_request (google.auth.transport.aiohttp_requests.Request): + (Optional) An instance of + :class:`~google.auth.transport.aiohttp_requests.Request` used when + refreshing credentials. If not passed, + an instance of :class:`~google.auth.transport.aiohttp_requests.Request` + is created. + kwargs: Additional arguments passed through to the underlying + ClientSession :meth:`aiohttp.ClientSession` object. + """ + + def __init__( + self, + credentials, + refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES, + max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS, + refresh_timeout=None, + auth_request=None, + auto_decompress=False, + **kwargs, + ): + super(AuthorizedSession, self).__init__(**kwargs) + self.credentials = credentials + self._refresh_status_codes = refresh_status_codes + self._max_refresh_attempts = max_refresh_attempts + self._refresh_timeout = refresh_timeout + self._is_mtls = False + self._auth_request = auth_request + self._auth_request_session = None + self._loop = asyncio.get_event_loop() + self._refresh_lock = asyncio.Lock() + self._auto_decompress = auto_decompress + + async def request( + self, + method, + url, + data=None, + headers=None, + max_allowed_time=None, + timeout=_DEFAULT_TIMEOUT, + auto_decompress=False, + **kwargs, + ): + + """Implementation of Authorized Session aiohttp request. + + Args: + method (str): + The http request method used (e.g. GET, PUT, DELETE) + url (str): + The url at which the http request is sent. + data (Optional[dict]): Dictionary, list of tuples, bytes, or file-like + object to send in the body of the Request. + headers (Optional[dict]): Dictionary of HTTP Headers to send with the + Request. + timeout (Optional[Union[float, aiohttp.ClientTimeout]]): + The amount of time in seconds to wait for the server response + with each individual request. Can also be passed as an + ``aiohttp.ClientTimeout`` object. + max_allowed_time (Optional[float]): + If the method runs longer than this, a ``Timeout`` exception is + automatically raised. Unlike the ``timeout`` parameter, this + value applies to the total method execution time, even if + multiple requests are made under the hood. + + Mind that it is not guaranteed that the timeout error is raised + at ``max_allowed_time``. It might take longer, for example, if + an underlying request takes a lot of time, but the request + itself does not timeout, e.g. if a large file is being + transmitted. The timout error will be raised after such + request completes. + """ + # Headers come in as bytes which isn't expected behavior, the resumable + # media libraries in some cases expect a str type for the header values, + # but sometimes the operations return these in bytes types. + if headers: + for key in headers.keys(): + if type(headers[key]) is bytes: + headers[key] = headers[key].decode("utf-8") + + async with aiohttp.ClientSession( + auto_decompress=self._auto_decompress, + trust_env=kwargs.get("trust_env", False), + ) as self._auth_request_session: + auth_request = Request(self._auth_request_session) + self._auth_request = auth_request + + # Use a kwarg for this instead of an attribute to maintain + # thread-safety. + _credential_refresh_attempt = kwargs.pop("_credential_refresh_attempt", 0) + # Make a copy of the headers. They will be modified by the credentials + # and we want to pass the original headers if we recurse. + request_headers = headers.copy() if headers is not None else {} + + # Do not apply the timeout unconditionally in order to not override the + # _auth_request's default timeout. + auth_request = ( + self._auth_request + if timeout is None + else functools.partial(self._auth_request, timeout=timeout) + ) + + remaining_time = max_allowed_time + + with requests.TimeoutGuard(remaining_time, asyncio.TimeoutError) as guard: + await self.credentials.before_request( + auth_request, method, url, request_headers + ) + + with requests.TimeoutGuard(remaining_time, asyncio.TimeoutError) as guard: + response = await super(AuthorizedSession, self).request( + method, + url, + data=data, + headers=request_headers, + timeout=timeout, + **kwargs, + ) + + remaining_time = guard.remaining_timeout + + if ( + response.status in self._refresh_status_codes + and _credential_refresh_attempt < self._max_refresh_attempts + ): + + requests._LOGGER.info( + "Refreshing credentials due to a %s response. Attempt %s/%s.", + response.status, + _credential_refresh_attempt + 1, + self._max_refresh_attempts, + ) + + # Do not apply the timeout unconditionally in order to not override the + # _auth_request's default timeout. + auth_request = ( + self._auth_request + if timeout is None + else functools.partial(self._auth_request, timeout=timeout) + ) + + with requests.TimeoutGuard( + remaining_time, asyncio.TimeoutError + ) as guard: + async with self._refresh_lock: + await self._loop.run_in_executor( + None, self.credentials.refresh, auth_request + ) + + remaining_time = guard.remaining_timeout + + return await self.request( + method, + url, + data=data, + headers=headers, + max_allowed_time=remaining_time, + timeout=timeout, + _credential_refresh_attempt=_credential_refresh_attempt + 1, + **kwargs, + ) + + return response diff --git a/.venv/lib/python3.12/site-packages/google/auth/transport/_custom_tls_signer.py b/.venv/lib/python3.12/site-packages/google/auth/transport/_custom_tls_signer.py new file mode 100644 index 00000000..9279158d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/transport/_custom_tls_signer.py @@ -0,0 +1,283 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code for configuring client side TLS to offload the signing operation to +signing libraries. +""" + +import ctypes +import json +import logging +import os +import sys + +import cffi # type: ignore + +from google.auth import exceptions + +_LOGGER = logging.getLogger(__name__) + +# C++ offload lib requires google-auth lib to provide the following callback: +# using SignFunc = int (*)(unsigned char *sig, size_t *sig_len, +# const unsigned char *tbs, size_t tbs_len) +# The bytes to be signed and the length are provided via `tbs` and `tbs_len`, +# the callback computes the signature, and write the signature and its length +# into `sig` and `sig_len`. +# If the signing is successful, the callback returns 1, otherwise it returns 0. +SIGN_CALLBACK_CTYPE = ctypes.CFUNCTYPE( + ctypes.c_int, # return type + ctypes.POINTER(ctypes.c_ubyte), # sig + ctypes.POINTER(ctypes.c_size_t), # sig_len + ctypes.POINTER(ctypes.c_ubyte), # tbs + ctypes.c_size_t, # tbs_len +) + + +# Cast SSL_CTX* to void* +def _cast_ssl_ctx_to_void_p_pyopenssl(ssl_ctx): + return ctypes.cast(int(cffi.FFI().cast("intptr_t", ssl_ctx)), ctypes.c_void_p) + + +# Cast SSL_CTX* to void* +def _cast_ssl_ctx_to_void_p_stdlib(context): + return ctypes.c_void_p.from_address( + id(context) + ctypes.sizeof(ctypes.c_void_p) * 2 + ) + + +# Load offload library and set up the function types. +def load_offload_lib(offload_lib_path): + _LOGGER.debug("loading offload library from %s", offload_lib_path) + + # winmode parameter is only available for python 3.8+. + lib = ( + ctypes.CDLL(offload_lib_path, winmode=0) + if sys.version_info >= (3, 8) and os.name == "nt" + else ctypes.CDLL(offload_lib_path) + ) + + # Set up types for: + # int ConfigureSslContext(SignFunc sign_func, const char *cert, SSL_CTX *ctx) + lib.ConfigureSslContext.argtypes = [ + SIGN_CALLBACK_CTYPE, + ctypes.c_char_p, + ctypes.c_void_p, + ] + lib.ConfigureSslContext.restype = ctypes.c_int + + return lib + + +# Load signer library and set up the function types. +# See: https://github.com/googleapis/enterprise-certificate-proxy/blob/main/cshared/main.go +def load_signer_lib(signer_lib_path): + _LOGGER.debug("loading signer library from %s", signer_lib_path) + + # winmode parameter is only available for python 3.8+. + lib = ( + ctypes.CDLL(signer_lib_path, winmode=0) + if sys.version_info >= (3, 8) and os.name == "nt" + else ctypes.CDLL(signer_lib_path) + ) + + # Set up types for: + # func GetCertPemForPython(configFilePath *C.char, certHolder *byte, certHolderLen int) + lib.GetCertPemForPython.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int] + # Returns: certLen + lib.GetCertPemForPython.restype = ctypes.c_int + + # Set up types for: + # func SignForPython(configFilePath *C.char, digest *byte, digestLen int, + # sigHolder *byte, sigHolderLen int) + lib.SignForPython.argtypes = [ + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_int, + ] + # Returns: the signature length + lib.SignForPython.restype = ctypes.c_int + + return lib + + +def load_provider_lib(provider_lib_path): + _LOGGER.debug("loading provider library from %s", provider_lib_path) + + # winmode parameter is only available for python 3.8+. + lib = ( + ctypes.CDLL(provider_lib_path, winmode=0) + if sys.version_info >= (3, 8) and os.name == "nt" + else ctypes.CDLL(provider_lib_path) + ) + + lib.ECP_attach_to_ctx.argtypes = [ctypes.c_void_p, ctypes.c_char_p] + lib.ECP_attach_to_ctx.restype = ctypes.c_int + + return lib + + +# Computes SHA256 hash. +def _compute_sha256_digest(to_be_signed, to_be_signed_len): + from cryptography.hazmat.primitives import hashes + + data = ctypes.string_at(to_be_signed, to_be_signed_len) + hash = hashes.Hash(hashes.SHA256()) + hash.update(data) + return hash.finalize() + + +# Create the signing callback. The actual signing work is done by the +# `SignForPython` method from the signer lib. +def get_sign_callback(signer_lib, config_file_path): + def sign_callback(sig, sig_len, tbs, tbs_len): + _LOGGER.debug("calling sign callback...") + + digest = _compute_sha256_digest(tbs, tbs_len) + digestArray = ctypes.c_char * len(digest) + + # reserve 2000 bytes for the signature, shoud be more then enough. + # RSA signature is 256 bytes, EC signature is 70~72. + sig_holder_len = 2000 + sig_holder = ctypes.create_string_buffer(sig_holder_len) + + signature_len = signer_lib.SignForPython( + config_file_path.encode(), # configFilePath + digestArray.from_buffer(bytearray(digest)), # digest + len(digest), # digestLen + sig_holder, # sigHolder + sig_holder_len, # sigHolderLen + ) + + if signature_len == 0: + # signing failed, return 0 + return 0 + + sig_len[0] = signature_len + bs = bytearray(sig_holder) + for i in range(signature_len): + sig[i] = bs[i] + + return 1 + + return SIGN_CALLBACK_CTYPE(sign_callback) + + +# Obtain the certificate bytes by calling the `GetCertPemForPython` method from +# the signer lib. The method is called twice, the first time is to compute the +# cert length, then we create a buffer to hold the cert, and call it again to +# fill the buffer. +def get_cert(signer_lib, config_file_path): + # First call to calculate the cert length + cert_len = signer_lib.GetCertPemForPython( + config_file_path.encode(), # configFilePath + None, # certHolder + 0, # certHolderLen + ) + if cert_len == 0: + raise exceptions.MutualTLSChannelError("failed to get certificate") + + # Then we create an array to hold the cert, and call again to fill the cert + cert_holder = ctypes.create_string_buffer(cert_len) + signer_lib.GetCertPemForPython( + config_file_path.encode(), # configFilePath + cert_holder, # certHolder + cert_len, # certHolderLen + ) + return bytes(cert_holder) + + +class CustomTlsSigner(object): + def __init__(self, enterprise_cert_file_path): + """ + This class loads the offload and signer library, and calls APIs from + these libraries to obtain the cert and a signing callback, and attach + them to SSL context. The cert and the signing callback will be used + for client authentication in TLS handshake. + + Args: + enterprise_cert_file_path (str): the path to a enterprise cert JSON + file. The file should contain the following field: + + { + "libs": { + "ecp_client": "...", + "tls_offload": "..." + } + } + """ + self._enterprise_cert_file_path = enterprise_cert_file_path + self._cert = None + self._sign_callback = None + self._provider_lib = None + + def load_libraries(self): + with open(self._enterprise_cert_file_path, "r") as f: + enterprise_cert_json = json.load(f) + libs = enterprise_cert_json.get("libs", {}) + + signer_library = libs.get("ecp_client", None) + offload_library = libs.get("tls_offload", None) + provider_library = libs.get("ecp_provider", None) + + # Using newer provider implementation. This is mutually exclusive to the + # offload implementation. + if provider_library: + self._provider_lib = load_provider_lib(provider_library) + return + + # Using old offload implementation + if offload_library and signer_library: + self._offload_lib = load_offload_lib(offload_library) + self._signer_lib = load_signer_lib(signer_library) + self.set_up_custom_key() + return + + raise exceptions.MutualTLSChannelError("enterprise cert file is invalid") + + def set_up_custom_key(self): + # We need to keep a reference of the cert and sign callback so it won't + # be garbage collected, otherwise it will crash when used by signer lib. + self._cert = get_cert(self._signer_lib, self._enterprise_cert_file_path) + self._sign_callback = get_sign_callback( + self._signer_lib, self._enterprise_cert_file_path + ) + + def should_use_provider(self): + if self._provider_lib: + return True + return False + + def attach_to_ssl_context(self, ctx): + if self.should_use_provider(): + if not self._provider_lib.ECP_attach_to_ctx( + _cast_ssl_ctx_to_void_p_stdlib(ctx), + self._enterprise_cert_file_path.encode("ascii"), + ): + raise exceptions.MutualTLSChannelError( + "failed to configure ECP Provider SSL context" + ) + elif self._offload_lib and self._signer_lib: + if not self._offload_lib.ConfigureSslContext( + self._sign_callback, + ctypes.c_char_p(self._cert), + _cast_ssl_ctx_to_void_p_pyopenssl(ctx._ctx._context), + ): + raise exceptions.MutualTLSChannelError( + "failed to configure ECP Offload SSL context" + ) + else: + raise exceptions.MutualTLSChannelError("Invalid ECP configuration.") diff --git a/.venv/lib/python3.12/site-packages/google/auth/transport/_http_client.py b/.venv/lib/python3.12/site-packages/google/auth/transport/_http_client.py new file mode 100644 index 00000000..cec0ab73 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/transport/_http_client.py @@ -0,0 +1,113 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transport adapter for http.client, for internal use only.""" + +import http.client as http_client +import logging +import socket +import urllib + +from google.auth import exceptions +from google.auth import transport + +_LOGGER = logging.getLogger(__name__) + + +class Response(transport.Response): + """http.client transport response adapter. + + Args: + response (http.client.HTTPResponse): The raw http client response. + """ + + def __init__(self, response): + self._status = response.status + self._headers = {key.lower(): value for key, value in response.getheaders()} + self._data = response.read() + + @property + def status(self): + return self._status + + @property + def headers(self): + return self._headers + + @property + def data(self): + return self._data + + +class Request(transport.Request): + """http.client transport request adapter.""" + + def __call__( + self, url, method="GET", body=None, headers=None, timeout=None, **kwargs + ): + """Make an HTTP request using http.client. + + Args: + url (str): The URI to be requested. + method (str): The HTTP method to use for the request. Defaults + to 'GET'. + body (bytes): The payload / body in HTTP request. + headers (Mapping): Request headers. + timeout (Optional(int)): The number of seconds to wait for a + response from the server. If not specified or if None, the + socket global default timeout will be used. + kwargs: Additional arguments passed throught to the underlying + :meth:`~http.client.HTTPConnection.request` method. + + Returns: + Response: The HTTP response. + + Raises: + google.auth.exceptions.TransportError: If any exception occurred. + """ + # socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client. + if timeout is None: + timeout = socket._GLOBAL_DEFAULT_TIMEOUT + + # http.client doesn't allow None as the headers argument. + if headers is None: + headers = {} + + # http.client needs the host and path parts specified separately. + parts = urllib.parse.urlsplit(url) + path = urllib.parse.urlunsplit( + ("", "", parts.path, parts.query, parts.fragment) + ) + + if parts.scheme != "http": + raise exceptions.TransportError( + "http.client transport only supports the http scheme, {}" + "was specified".format(parts.scheme) + ) + + connection = http_client.HTTPConnection(parts.netloc, timeout=timeout) + + try: + _LOGGER.debug("Making request: %s %s", method, url) + + connection.request(method, path, body=body, headers=headers, **kwargs) + response = connection.getresponse() + return Response(response) + + except (http_client.HTTPException, socket.error) as caught_exc: + new_exc = exceptions.TransportError(caught_exc) + raise new_exc from caught_exc + + finally: + connection.close() diff --git a/.venv/lib/python3.12/site-packages/google/auth/transport/_mtls_helper.py b/.venv/lib/python3.12/site-packages/google/auth/transport/_mtls_helper.py new file mode 100644 index 00000000..68568dd6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/transport/_mtls_helper.py @@ -0,0 +1,407 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helper functions for getting mTLS cert and key.""" + +import json +import logging +from os import environ, path +import re +import subprocess + +from google.auth import exceptions + +CONTEXT_AWARE_METADATA_PATH = "~/.secureConnect/context_aware_metadata.json" +CERTIFICATE_CONFIGURATION_DEFAULT_PATH = "~/.config/gcloud/certificate_config.json" +_CERTIFICATE_CONFIGURATION_ENV = "GOOGLE_API_CERTIFICATE_CONFIG" +_CERT_PROVIDER_COMMAND = "cert_provider_command" +_CERT_REGEX = re.compile( + b"-----BEGIN CERTIFICATE-----.+-----END CERTIFICATE-----\r?\n?", re.DOTALL +) + +# support various format of key files, e.g. +# "-----BEGIN PRIVATE KEY-----...", +# "-----BEGIN EC PRIVATE KEY-----...", +# "-----BEGIN RSA PRIVATE KEY-----..." +# "-----BEGIN ENCRYPTED PRIVATE KEY-----" +_KEY_REGEX = re.compile( + b"-----BEGIN [A-Z ]*PRIVATE KEY-----.+-----END [A-Z ]*PRIVATE KEY-----\r?\n?", + re.DOTALL, +) + +_LOGGER = logging.getLogger(__name__) + + +_PASSPHRASE_REGEX = re.compile( + b"-----BEGIN PASSPHRASE-----(.+)-----END PASSPHRASE-----", re.DOTALL +) + + +def _check_config_path(config_path): + """Checks for config file path. If it exists, returns the absolute path with user expansion; + otherwise returns None. + + Args: + config_path (str): The config file path for either context_aware_metadata.json or certificate_config.json for example + + Returns: + str: absolute path if exists and None otherwise. + """ + config_path = path.expanduser(config_path) + if not path.exists(config_path): + _LOGGER.debug("%s is not found.", config_path) + return None + return config_path + + +def _load_json_file(path): + """Reads and loads JSON from the given path. Used to read both X509 workload certificate and + secure connect configurations. + + Args: + path (str): the path to read from. + + Returns: + Dict[str, str]: The JSON stored at the file. + + Raises: + google.auth.exceptions.ClientCertError: If failed to parse the file as JSON. + """ + try: + with open(path) as f: + json_data = json.load(f) + except ValueError as caught_exc: + new_exc = exceptions.ClientCertError(caught_exc) + raise new_exc from caught_exc + + return json_data + + +def _get_workload_cert_and_key(certificate_config_path=None): + """Read the workload identity cert and key files specified in the certificate config provided. + If no config path is provided, check the environment variable: "GOOGLE_API_CERTIFICATE_CONFIG" + first, then the well known gcloud location: "~/.config/gcloud/certificate_config.json". + + Args: + certificate_config_path (string): The certificate config path. If no path is provided, + the environment variable will be checked first, then the well known gcloud location. + + Returns: + Tuple[Optional[bytes], Optional[bytes]]: client certificate bytes in PEM format and key + bytes in PEM format. + + Raises: + google.auth.exceptions.ClientCertError: if problems occurs when retrieving + the certificate or key information. + """ + + cert_path, key_path = _get_workload_cert_and_key_paths(certificate_config_path) + + if cert_path is None and key_path is None: + return None, None + + return _read_cert_and_key_files(cert_path, key_path) + + +def _get_cert_config_path(certificate_config_path=None): + """Get the certificate configuration path based on the following order: + + 1: Explicit override, if set + 2: Environment variable, if set + 3: Well-known location + + Returns "None" if the selected config file does not exist. + + Args: + certificate_config_path (string): The certificate config path. If provided, the well known + location and environment variable will be ignored. + + Returns: + The absolute path of the certificate config file, and None if the file does not exist. + """ + + if certificate_config_path is None: + env_path = environ.get(_CERTIFICATE_CONFIGURATION_ENV, None) + if env_path is not None and env_path != "": + certificate_config_path = env_path + else: + certificate_config_path = CERTIFICATE_CONFIGURATION_DEFAULT_PATH + + certificate_config_path = path.expanduser(certificate_config_path) + if not path.exists(certificate_config_path): + return None + return certificate_config_path + + +def _get_workload_cert_and_key_paths(config_path): + absolute_path = _get_cert_config_path(config_path) + if absolute_path is None: + return None, None + + data = _load_json_file(absolute_path) + + if "cert_configs" not in data: + raise exceptions.ClientCertError( + 'Certificate config file {} is in an invalid format, a "cert configs" object is expected'.format( + absolute_path + ) + ) + cert_configs = data["cert_configs"] + + if "workload" not in cert_configs: + raise exceptions.ClientCertError( + 'Certificate config file {} is in an invalid format, a "workload" cert config is expected'.format( + absolute_path + ) + ) + workload = cert_configs["workload"] + + if "cert_path" not in workload: + raise exceptions.ClientCertError( + 'Certificate config file {} is in an invalid format, a "cert_path" is expected in the workload cert config'.format( + absolute_path + ) + ) + cert_path = workload["cert_path"] + + if "key_path" not in workload: + raise exceptions.ClientCertError( + 'Certificate config file {} is in an invalid format, a "key_path" is expected in the workload cert config'.format( + absolute_path + ) + ) + key_path = workload["key_path"] + + return cert_path, key_path + + +def _read_cert_and_key_files(cert_path, key_path): + cert_data = _read_cert_file(cert_path) + key_data = _read_key_file(key_path) + + return cert_data, key_data + + +def _read_cert_file(cert_path): + with open(cert_path, "rb") as cert_file: + cert_data = cert_file.read() + + cert_match = re.findall(_CERT_REGEX, cert_data) + if len(cert_match) != 1: + raise exceptions.ClientCertError( + "Certificate file {} is in an invalid format, a single PEM formatted certificate is expected".format( + cert_path + ) + ) + return cert_match[0] + + +def _read_key_file(key_path): + with open(key_path, "rb") as key_file: + key_data = key_file.read() + + key_match = re.findall(_KEY_REGEX, key_data) + if len(key_match) != 1: + raise exceptions.ClientCertError( + "Private key file {} is in an invalid format, a single PEM formatted private key is expected".format( + key_path + ) + ) + + return key_match[0] + + +def _run_cert_provider_command(command, expect_encrypted_key=False): + """Run the provided command, and return client side mTLS cert, key and + passphrase. + + Args: + command (List[str]): cert provider command. + expect_encrypted_key (bool): If encrypted private key is expected. + + Returns: + Tuple[bytes, bytes, bytes]: client certificate bytes in PEM format, key + bytes in PEM format and passphrase bytes. + + Raises: + google.auth.exceptions.ClientCertError: if problems occurs when running + the cert provider command or generating cert, key and passphrase. + """ + try: + process = subprocess.Popen( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + stdout, stderr = process.communicate() + except OSError as caught_exc: + new_exc = exceptions.ClientCertError(caught_exc) + raise new_exc from caught_exc + + # Check cert provider command execution error. + if process.returncode != 0: + raise exceptions.ClientCertError( + "Cert provider command returns non-zero status code %s" % process.returncode + ) + + # Extract certificate (chain), key and passphrase. + cert_match = re.findall(_CERT_REGEX, stdout) + if len(cert_match) != 1: + raise exceptions.ClientCertError("Client SSL certificate is missing or invalid") + key_match = re.findall(_KEY_REGEX, stdout) + if len(key_match) != 1: + raise exceptions.ClientCertError("Client SSL key is missing or invalid") + passphrase_match = re.findall(_PASSPHRASE_REGEX, stdout) + + if expect_encrypted_key: + if len(passphrase_match) != 1: + raise exceptions.ClientCertError("Passphrase is missing or invalid") + if b"ENCRYPTED" not in key_match[0]: + raise exceptions.ClientCertError("Encrypted private key is expected") + return cert_match[0], key_match[0], passphrase_match[0].strip() + + if b"ENCRYPTED" in key_match[0]: + raise exceptions.ClientCertError("Encrypted private key is not expected") + if len(passphrase_match) > 0: + raise exceptions.ClientCertError("Passphrase is not expected") + return cert_match[0], key_match[0], None + + +def get_client_ssl_credentials( + generate_encrypted_key=False, + context_aware_metadata_path=CONTEXT_AWARE_METADATA_PATH, + certificate_config_path=CERTIFICATE_CONFIGURATION_DEFAULT_PATH, +): + """Returns the client side certificate, private key and passphrase. + + We look for certificates and keys with the following order of priority: + 1. Certificate and key specified by certificate_config.json. + Currently, only X.509 workload certificates are supported. + 2. Certificate and key specified by context aware metadata (i.e. SecureConnect). + + Args: + generate_encrypted_key (bool): If set to True, encrypted private key + and passphrase will be generated; otherwise, unencrypted private key + will be generated and passphrase will be None. This option only + affects keys obtained via context_aware_metadata.json. + context_aware_metadata_path (str): The context_aware_metadata.json file path. + certificate_config_path (str): The certificate_config.json file path. + + Returns: + Tuple[bool, bytes, bytes, bytes]: + A boolean indicating if cert, key and passphrase are obtained, the + cert bytes and key bytes both in PEM format, and passphrase bytes. + + Raises: + google.auth.exceptions.ClientCertError: if problems occurs when getting + the cert, key and passphrase. + """ + + # 1. Check for certificate config json. + cert_config_path = _check_config_path(certificate_config_path) + if cert_config_path: + # Attempt to retrieve X.509 Workload cert and key. + cert, key = _get_workload_cert_and_key(cert_config_path) + if cert and key: + return True, cert, key, None + + # 2. Check for context aware metadata json + metadata_path = _check_config_path(context_aware_metadata_path) + + if metadata_path: + metadata_json = _load_json_file(metadata_path) + + if _CERT_PROVIDER_COMMAND not in metadata_json: + raise exceptions.ClientCertError("Cert provider command is not found") + + command = metadata_json[_CERT_PROVIDER_COMMAND] + + if generate_encrypted_key and "--with_passphrase" not in command: + command.append("--with_passphrase") + + # Execute the command. + cert, key, passphrase = _run_cert_provider_command( + command, expect_encrypted_key=generate_encrypted_key + ) + return True, cert, key, passphrase + + return False, None, None, None + + +def get_client_cert_and_key(client_cert_callback=None): + """Returns the client side certificate and private key. The function first + tries to get certificate and key from client_cert_callback; if the callback + is None or doesn't provide certificate and key, the function tries application + default SSL credentials. + + Args: + client_cert_callback (Optional[Callable[[], (bytes, bytes)]]): An + optional callback which returns client certificate bytes and private + key bytes both in PEM format. + + Returns: + Tuple[bool, bytes, bytes]: + A boolean indicating if cert and key are obtained, the cert bytes + and key bytes both in PEM format. + + Raises: + google.auth.exceptions.ClientCertError: if problems occurs when getting + the cert and key. + """ + if client_cert_callback: + cert, key = client_cert_callback() + return True, cert, key + + has_cert, cert, key, _ = get_client_ssl_credentials(generate_encrypted_key=False) + return has_cert, cert, key + + +def decrypt_private_key(key, passphrase): + """A helper function to decrypt the private key with the given passphrase. + google-auth library doesn't support passphrase protected private key for + mutual TLS channel. This helper function can be used to decrypt the + passphrase protected private key in order to estalish mutual TLS channel. + + For example, if you have a function which produces client cert, passphrase + protected private key and passphrase, you can convert it to a client cert + callback function accepted by google-auth:: + + from google.auth.transport import _mtls_helper + + def your_client_cert_function(): + return cert, encrypted_key, passphrase + + # callback accepted by google-auth for mutual TLS channel. + def client_cert_callback(): + cert, encrypted_key, passphrase = your_client_cert_function() + decrypted_key = _mtls_helper.decrypt_private_key(encrypted_key, + passphrase) + return cert, decrypted_key + + Args: + key (bytes): The private key bytes in PEM format. + passphrase (bytes): The passphrase bytes. + + Returns: + bytes: The decrypted private key in PEM format. + + Raises: + ImportError: If pyOpenSSL is not installed. + OpenSSL.crypto.Error: If there is any problem decrypting the private key. + """ + from OpenSSL import crypto + + # First convert encrypted_key_bytes to PKey object + pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key, passphrase=passphrase) + + # Then dump the decrypted key bytes + return crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey) diff --git a/.venv/lib/python3.12/site-packages/google/auth/transport/_requests_base.py b/.venv/lib/python3.12/site-packages/google/auth/transport/_requests_base.py new file mode 100644 index 00000000..0608223d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/transport/_requests_base.py @@ -0,0 +1,53 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transport adapter for Base Requests.""" +# NOTE: The coverage for this file is temporarily disabled in `.coveragerc` +# since it is currently unused. + +import abc + + +_DEFAULT_TIMEOUT = 120 # in second + + +class _BaseAuthorizedSession(metaclass=abc.ABCMeta): + """Base class for a Request Session with credentials. This class is intended to capture + the common logic between synchronous and asynchronous request sessions and is not intended to + be instantiated directly. + + Args: + credentials (google.auth._credentials_base.BaseCredentials): The credentials to + add to the request. + """ + + def __init__(self, credentials): + self.credentials = credentials + + @abc.abstractmethod + def request( + self, + method, + url, + data=None, + headers=None, + max_allowed_time=None, + timeout=_DEFAULT_TIMEOUT, + **kwargs + ): + raise NotImplementedError("Request must be implemented") + + @abc.abstractmethod + def close(self): + raise NotImplementedError("Close must be implemented") diff --git a/.venv/lib/python3.12/site-packages/google/auth/transport/grpc.py b/.venv/lib/python3.12/site-packages/google/auth/transport/grpc.py new file mode 100644 index 00000000..1ebe1379 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/transport/grpc.py @@ -0,0 +1,343 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Authorization support for gRPC.""" + +from __future__ import absolute_import + +import logging +import os + +from google.auth import environment_vars +from google.auth import exceptions +from google.auth.transport import _mtls_helper +from google.oauth2 import service_account + +try: + import grpc # type: ignore +except ImportError as caught_exc: # pragma: NO COVER + raise ImportError( + "gRPC is not installed from please install the grpcio package to use the gRPC transport." + ) from caught_exc + +_LOGGER = logging.getLogger(__name__) + + +class AuthMetadataPlugin(grpc.AuthMetadataPlugin): + """A `gRPC AuthMetadataPlugin`_ that inserts the credentials into each + request. + + .. _gRPC AuthMetadataPlugin: + http://www.grpc.io/grpc/python/grpc.html#grpc.AuthMetadataPlugin + + Args: + credentials (google.auth.credentials.Credentials): The credentials to + add to requests. + request (google.auth.transport.Request): A HTTP transport request + object used to refresh credentials as needed. + default_host (Optional[str]): A host like "pubsub.googleapis.com". + This is used when a self-signed JWT is created from service + account credentials. + """ + + def __init__(self, credentials, request, default_host=None): + # pylint: disable=no-value-for-parameter + # pylint doesn't realize that the super method takes no arguments + # because this class is the same name as the superclass. + super(AuthMetadataPlugin, self).__init__() + self._credentials = credentials + self._request = request + self._default_host = default_host + + def _get_authorization_headers(self, context): + """Gets the authorization headers for a request. + + Returns: + Sequence[Tuple[str, str]]: A list of request headers (key, value) + to add to the request. + """ + headers = {} + + # https://google.aip.dev/auth/4111 + # Attempt to use self-signed JWTs when a service account is used. + # A default host must be explicitly provided since it cannot always + # be determined from the context.service_url. + if isinstance(self._credentials, service_account.Credentials): + self._credentials._create_self_signed_jwt( + "https://{}/".format(self._default_host) if self._default_host else None + ) + + self._credentials.before_request( + self._request, context.method_name, context.service_url, headers + ) + + return list(headers.items()) + + def __call__(self, context, callback): + """Passes authorization metadata into the given callback. + + Args: + context (grpc.AuthMetadataContext): The RPC context. + callback (grpc.AuthMetadataPluginCallback): The callback that will + be invoked to pass in the authorization metadata. + """ + callback(self._get_authorization_headers(context), None) + + +def secure_authorized_channel( + credentials, + request, + target, + ssl_credentials=None, + client_cert_callback=None, + **kwargs +): + """Creates a secure authorized gRPC channel. + + This creates a channel with SSL and :class:`AuthMetadataPlugin`. This + channel can be used to create a stub that can make authorized requests. + Users can configure client certificate or rely on device certificates to + establish a mutual TLS channel, if the `GOOGLE_API_USE_CLIENT_CERTIFICATE` + variable is explicitly set to `true`. + + Example:: + + import google.auth + import google.auth.transport.grpc + import google.auth.transport.requests + from google.cloud.speech.v1 import cloud_speech_pb2 + + # Get credentials. + credentials, _ = google.auth.default() + + # Get an HTTP request function to refresh credentials. + request = google.auth.transport.requests.Request() + + # Create a channel. + channel = google.auth.transport.grpc.secure_authorized_channel( + credentials, regular_endpoint, request, + ssl_credentials=grpc.ssl_channel_credentials()) + + # Use the channel to create a stub. + cloud_speech.create_Speech_stub(channel) + + Usage: + + There are actually a couple of options to create a channel, depending on if + you want to create a regular or mutual TLS channel. + + First let's list the endpoints (regular vs mutual TLS) to choose from:: + + regular_endpoint = 'speech.googleapis.com:443' + mtls_endpoint = 'speech.mtls.googleapis.com:443' + + Option 1: create a regular (non-mutual) TLS channel by explicitly setting + the ssl_credentials:: + + regular_ssl_credentials = grpc.ssl_channel_credentials() + + channel = google.auth.transport.grpc.secure_authorized_channel( + credentials, regular_endpoint, request, + ssl_credentials=regular_ssl_credentials) + + Option 2: create a mutual TLS channel by calling a callback which returns + the client side certificate and the key (Note that + `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable must be explicitly + set to `true`):: + + def my_client_cert_callback(): + code_to_load_client_cert_and_key() + if loaded: + return (pem_cert_bytes, pem_key_bytes) + raise MyClientCertFailureException() + + try: + channel = google.auth.transport.grpc.secure_authorized_channel( + credentials, mtls_endpoint, request, + client_cert_callback=my_client_cert_callback) + except MyClientCertFailureException: + # handle the exception + + Option 3: use application default SSL credentials. It searches and uses + the command in a context aware metadata file, which is available on devices + with endpoint verification support (Note that + `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable must be explicitly + set to `true`). + See https://cloud.google.com/endpoint-verification/docs/overview:: + + try: + default_ssl_credentials = SslCredentials() + except: + # Exception can be raised if the context aware metadata is malformed. + # See :class:`SslCredentials` for the possible exceptions. + + # Choose the endpoint based on the SSL credentials type. + if default_ssl_credentials.is_mtls: + endpoint_to_use = mtls_endpoint + else: + endpoint_to_use = regular_endpoint + channel = google.auth.transport.grpc.secure_authorized_channel( + credentials, endpoint_to_use, request, + ssl_credentials=default_ssl_credentials) + + Option 4: not setting ssl_credentials and client_cert_callback. For devices + without endpoint verification support or `GOOGLE_API_USE_CLIENT_CERTIFICATE` + environment variable is not `true`, a regular TLS channel is created; + otherwise, a mutual TLS channel is created, however, the call should be + wrapped in a try/except block in case of malformed context aware metadata. + + The following code uses regular_endpoint, it works the same no matter the + created channle is regular or mutual TLS. Regular endpoint ignores client + certificate and key:: + + channel = google.auth.transport.grpc.secure_authorized_channel( + credentials, regular_endpoint, request) + + The following code uses mtls_endpoint, if the created channle is regular, + and API mtls_endpoint is confgured to require client SSL credentials, API + calls using this channel will be rejected:: + + channel = google.auth.transport.grpc.secure_authorized_channel( + credentials, mtls_endpoint, request) + + Args: + credentials (google.auth.credentials.Credentials): The credentials to + add to requests. + request (google.auth.transport.Request): A HTTP transport request + object used to refresh credentials as needed. Even though gRPC + is a separate transport, there's no way to refresh the credentials + without using a standard http transport. + target (str): The host and port of the service. + ssl_credentials (grpc.ChannelCredentials): Optional SSL channel + credentials. This can be used to specify different certificates. + This argument is mutually exclusive with client_cert_callback; + providing both will raise an exception. + If ssl_credentials and client_cert_callback are None, application + default SSL credentials are used if `GOOGLE_API_USE_CLIENT_CERTIFICATE` + environment variable is explicitly set to `true`, otherwise one way TLS + SSL credentials are used. + client_cert_callback (Callable[[], (bytes, bytes)]): Optional + callback function to obtain client certicate and key for mutual TLS + connection. This argument is mutually exclusive with + ssl_credentials; providing both will raise an exception. + This argument does nothing unless `GOOGLE_API_USE_CLIENT_CERTIFICATE` + environment variable is explicitly set to `true`. + kwargs: Additional arguments to pass to :func:`grpc.secure_channel`. + + Returns: + grpc.Channel: The created gRPC channel. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel + creation failed for any reason. + """ + # Create the metadata plugin for inserting the authorization header. + metadata_plugin = AuthMetadataPlugin(credentials, request) + + # Create a set of grpc.CallCredentials using the metadata plugin. + google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin) + + if ssl_credentials and client_cert_callback: + raise exceptions.MalformedError( + "Received both ssl_credentials and client_cert_callback; " + "these are mutually exclusive." + ) + + # If SSL credentials are not explicitly set, try client_cert_callback and ADC. + if not ssl_credentials: + use_client_cert = os.getenv( + environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false" + ) + if use_client_cert == "true" and client_cert_callback: + # Use the callback if provided. + cert, key = client_cert_callback() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + elif use_client_cert == "true": + # Use application default SSL credentials. + adc_ssl_credentils = SslCredentials() + ssl_credentials = adc_ssl_credentils.ssl_credentials + else: + ssl_credentials = grpc.ssl_channel_credentials() + + # Combine the ssl credentials and the authorization credentials. + composite_credentials = grpc.composite_channel_credentials( + ssl_credentials, google_auth_credentials + ) + + return grpc.secure_channel(target, composite_credentials, **kwargs) + + +class SslCredentials: + """Class for application default SSL credentials. + + The behavior is controlled by `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment + variable whose default value is `false`. Client certificate will not be used + unless the environment variable is explicitly set to `true`. See + https://google.aip.dev/auth/4114 + + If the environment variable is `true`, then for devices with endpoint verification + support, a device certificate will be automatically loaded and mutual TLS will + be established. + See https://cloud.google.com/endpoint-verification/docs/overview. + """ + + def __init__(self): + use_client_cert = os.getenv( + environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false" + ) + if use_client_cert != "true": + self._is_mtls = False + else: + # Load client SSL credentials. + metadata_path = _mtls_helper._check_config_path( + _mtls_helper.CONTEXT_AWARE_METADATA_PATH + ) + self._is_mtls = metadata_path is not None + + @property + def ssl_credentials(self): + """Get the created SSL channel credentials. + + For devices with endpoint verification support, if the device certificate + loading has any problems, corresponding exceptions will be raised. For + a device without endpoint verification support, no exceptions will be + raised. + + Returns: + grpc.ChannelCredentials: The created grpc channel credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel + creation failed for any reason. + """ + if self._is_mtls: + try: + _, cert, key, _ = _mtls_helper.get_client_ssl_credentials() + self._ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + except exceptions.ClientCertError as caught_exc: + new_exc = exceptions.MutualTLSChannelError(caught_exc) + raise new_exc from caught_exc + else: + self._ssl_credentials = grpc.ssl_channel_credentials() + + return self._ssl_credentials + + @property + def is_mtls(self): + """Indicates if the created SSL channel credentials is mutual TLS.""" + return self._is_mtls diff --git a/.venv/lib/python3.12/site-packages/google/auth/transport/mtls.py b/.venv/lib/python3.12/site-packages/google/auth/transport/mtls.py new file mode 100644 index 00000000..e7a7304f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/transport/mtls.py @@ -0,0 +1,112 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilites for mutual TLS.""" + +from google.auth import exceptions +from google.auth.transport import _mtls_helper + + +def has_default_client_cert_source(): + """Check if default client SSL credentials exists on the device. + + Returns: + bool: indicating if the default client cert source exists. + """ + if ( + _mtls_helper._check_config_path(_mtls_helper.CONTEXT_AWARE_METADATA_PATH) + is not None + ): + return True + if ( + _mtls_helper._check_config_path( + _mtls_helper.CERTIFICATE_CONFIGURATION_DEFAULT_PATH + ) + is not None + ): + return True + return False + + +def default_client_cert_source(): + """Get a callback which returns the default client SSL credentials. + + Returns: + Callable[[], [bytes, bytes]]: A callback which returns the default + client certificate bytes and private key bytes, both in PEM format. + + Raises: + google.auth.exceptions.DefaultClientCertSourceError: If the default + client SSL credentials don't exist or are malformed. + """ + if not has_default_client_cert_source(): + raise exceptions.MutualTLSChannelError( + "Default client cert source doesn't exist" + ) + + def callback(): + try: + _, cert_bytes, key_bytes = _mtls_helper.get_client_cert_and_key() + except (OSError, RuntimeError, ValueError) as caught_exc: + new_exc = exceptions.MutualTLSChannelError(caught_exc) + raise new_exc from caught_exc + + return cert_bytes, key_bytes + + return callback + + +def default_client_encrypted_cert_source(cert_path, key_path): + """Get a callback which returns the default encrpyted client SSL credentials. + + Args: + cert_path (str): The cert file path. The default client certificate will + be written to this file when the returned callback is called. + key_path (str): The key file path. The default encrypted client key will + be written to this file when the returned callback is called. + + Returns: + Callable[[], [str, str, bytes]]: A callback which generates the default + client certificate, encrpyted private key and passphrase. It writes + the certificate and private key into the cert_path and key_path, and + returns the cert_path, key_path and passphrase bytes. + + Raises: + google.auth.exceptions.DefaultClientCertSourceError: If any problem + occurs when loading or saving the client certificate and key. + """ + if not has_default_client_cert_source(): + raise exceptions.MutualTLSChannelError( + "Default client encrypted cert source doesn't exist" + ) + + def callback(): + try: + ( + _, + cert_bytes, + key_bytes, + passphrase_bytes, + ) = _mtls_helper.get_client_ssl_credentials(generate_encrypted_key=True) + with open(cert_path, "wb") as cert_file: + cert_file.write(cert_bytes) + with open(key_path, "wb") as key_file: + key_file.write(key_bytes) + except (exceptions.ClientCertError, OSError) as caught_exc: + new_exc = exceptions.MutualTLSChannelError(caught_exc) + raise new_exc from caught_exc + + return cert_path, key_path, passphrase_bytes + + return callback diff --git a/.venv/lib/python3.12/site-packages/google/auth/transport/requests.py b/.venv/lib/python3.12/site-packages/google/auth/transport/requests.py new file mode 100644 index 00000000..23a69783 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/transport/requests.py @@ -0,0 +1,599 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transport adapter for Requests.""" + +from __future__ import absolute_import + +import functools +import logging +import numbers +import os +import time + +try: + import requests +except ImportError as caught_exc: # pragma: NO COVER + raise ImportError( + "The requests library is not installed from please install the requests package to use the requests transport." + ) from caught_exc +import requests.adapters # pylint: disable=ungrouped-imports +import requests.exceptions # pylint: disable=ungrouped-imports +from requests.packages.urllib3.util.ssl_ import ( # type: ignore + create_urllib3_context, +) # pylint: disable=ungrouped-imports + +from google.auth import environment_vars +from google.auth import exceptions +from google.auth import transport +import google.auth.transport._mtls_helper +from google.oauth2 import service_account + +_LOGGER = logging.getLogger(__name__) + +_DEFAULT_TIMEOUT = 120 # in seconds + + +class _Response(transport.Response): + """Requests transport response adapter. + + Args: + response (requests.Response): The raw Requests response. + """ + + def __init__(self, response): + self._response = response + + @property + def status(self): + return self._response.status_code + + @property + def headers(self): + return self._response.headers + + @property + def data(self): + return self._response.content + + +class TimeoutGuard(object): + """A context manager raising an error if the suite execution took too long. + + Args: + timeout (Union[None, Union[float, Tuple[float, float]]]): + The maximum number of seconds a suite can run without the context + manager raising a timeout exception on exit. If passed as a tuple, + the smaller of the values is taken as a timeout. If ``None``, a + timeout error is never raised. + timeout_error_type (Optional[Exception]): + The type of the error to raise on timeout. Defaults to + :class:`requests.exceptions.Timeout`. + """ + + def __init__(self, timeout, timeout_error_type=requests.exceptions.Timeout): + self._timeout = timeout + self.remaining_timeout = timeout + self._timeout_error_type = timeout_error_type + + def __enter__(self): + self._start = time.time() + return self + + def __exit__(self, exc_type, exc_value, traceback): + if exc_value: + return # let the error bubble up automatically + + if self._timeout is None: + return # nothing to do, the timeout was not specified + + elapsed = time.time() - self._start + deadline_hit = False + + if isinstance(self._timeout, numbers.Number): + self.remaining_timeout = self._timeout - elapsed + deadline_hit = self.remaining_timeout <= 0 + else: + self.remaining_timeout = tuple(x - elapsed for x in self._timeout) + deadline_hit = min(self.remaining_timeout) <= 0 + + if deadline_hit: + raise self._timeout_error_type() + + +class Request(transport.Request): + """Requests request adapter. + + This class is used internally for making requests using various transports + in a consistent way. If you use :class:`AuthorizedSession` you do not need + to construct or use this class directly. + + This class can be useful if you want to manually refresh a + :class:`~google.auth.credentials.Credentials` instance:: + + import google.auth.transport.requests + import requests + + request = google.auth.transport.requests.Request() + + credentials.refresh(request) + + Args: + session (requests.Session): An instance :class:`requests.Session` used + to make HTTP requests. If not specified, a session will be created. + + .. automethod:: __call__ + """ + + def __init__(self, session=None): + if not session: + session = requests.Session() + + self.session = session + + def __del__(self): + try: + if hasattr(self, "session") and self.session is not None: + self.session.close() + except TypeError: + # NOTE: For certain Python binary built, the queue.Empty exception + # might not be considered a normal Python exception causing + # TypeError. + pass + + def __call__( + self, + url, + method="GET", + body=None, + headers=None, + timeout=_DEFAULT_TIMEOUT, + **kwargs + ): + """Make an HTTP request using requests. + + Args: + url (str): The URI to be requested. + method (str): The HTTP method to use for the request. Defaults + to 'GET'. + body (bytes): The payload or body in HTTP request. + headers (Mapping[str, str]): Request headers. + timeout (Optional[int]): The number of seconds to wait for a + response from the server. If not specified or if None, the + requests default timeout will be used. + kwargs: Additional arguments passed through to the underlying + requests :meth:`~requests.Session.request` method. + + Returns: + google.auth.transport.Response: The HTTP response. + + Raises: + google.auth.exceptions.TransportError: If any exception occurred. + """ + try: + _LOGGER.debug("Making request: %s %s", method, url) + response = self.session.request( + method, url, data=body, headers=headers, timeout=timeout, **kwargs + ) + return _Response(response) + except requests.exceptions.RequestException as caught_exc: + new_exc = exceptions.TransportError(caught_exc) + raise new_exc from caught_exc + + +class _MutualTlsAdapter(requests.adapters.HTTPAdapter): + """ + A TransportAdapter that enables mutual TLS. + + Args: + cert (bytes): client certificate in PEM format + key (bytes): client private key in PEM format + + Raises: + ImportError: if certifi or pyOpenSSL is not installed + OpenSSL.crypto.Error: if client cert or key is invalid + """ + + def __init__(self, cert, key): + import certifi + from OpenSSL import crypto + import urllib3.contrib.pyopenssl # type: ignore + + urllib3.contrib.pyopenssl.inject_into_urllib3() + + pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key) + x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert) + + ctx_poolmanager = create_urllib3_context() + ctx_poolmanager.load_verify_locations(cafile=certifi.where()) + ctx_poolmanager._ctx.use_certificate(x509) + ctx_poolmanager._ctx.use_privatekey(pkey) + self._ctx_poolmanager = ctx_poolmanager + + ctx_proxymanager = create_urllib3_context() + ctx_proxymanager.load_verify_locations(cafile=certifi.where()) + ctx_proxymanager._ctx.use_certificate(x509) + ctx_proxymanager._ctx.use_privatekey(pkey) + self._ctx_proxymanager = ctx_proxymanager + + super(_MutualTlsAdapter, self).__init__() + + def init_poolmanager(self, *args, **kwargs): + kwargs["ssl_context"] = self._ctx_poolmanager + super(_MutualTlsAdapter, self).init_poolmanager(*args, **kwargs) + + def proxy_manager_for(self, *args, **kwargs): + kwargs["ssl_context"] = self._ctx_proxymanager + return super(_MutualTlsAdapter, self).proxy_manager_for(*args, **kwargs) + + +class _MutualTlsOffloadAdapter(requests.adapters.HTTPAdapter): + """ + A TransportAdapter that enables mutual TLS and offloads the client side + signing operation to the signing library. + + Args: + enterprise_cert_file_path (str): the path to a enterprise cert JSON + file. The file should contain the following field: + + { + "libs": { + "signer_library": "...", + "offload_library": "..." + } + } + + Raises: + ImportError: if certifi or pyOpenSSL is not installed + google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel + creation failed for any reason. + """ + + def __init__(self, enterprise_cert_file_path): + import certifi + from google.auth.transport import _custom_tls_signer + + self.signer = _custom_tls_signer.CustomTlsSigner(enterprise_cert_file_path) + self.signer.load_libraries() + + import urllib3.contrib.pyopenssl + + urllib3.contrib.pyopenssl.inject_into_urllib3() + + poolmanager = create_urllib3_context() + poolmanager.load_verify_locations(cafile=certifi.where()) + self.signer.attach_to_ssl_context(poolmanager) + self._ctx_poolmanager = poolmanager + + proxymanager = create_urllib3_context() + proxymanager.load_verify_locations(cafile=certifi.where()) + self.signer.attach_to_ssl_context(proxymanager) + self._ctx_proxymanager = proxymanager + + super(_MutualTlsOffloadAdapter, self).__init__() + + def init_poolmanager(self, *args, **kwargs): + kwargs["ssl_context"] = self._ctx_poolmanager + super(_MutualTlsOffloadAdapter, self).init_poolmanager(*args, **kwargs) + + def proxy_manager_for(self, *args, **kwargs): + kwargs["ssl_context"] = self._ctx_proxymanager + return super(_MutualTlsOffloadAdapter, self).proxy_manager_for(*args, **kwargs) + + +class AuthorizedSession(requests.Session): + """A Requests Session class with credentials. + + This class is used to perform requests to API endpoints that require + authorization:: + + from google.auth.transport.requests import AuthorizedSession + + authed_session = AuthorizedSession(credentials) + + response = authed_session.request( + 'GET', 'https://www.googleapis.com/storage/v1/b') + + + The underlying :meth:`request` implementation handles adding the + credentials' headers to the request and refreshing credentials as needed. + + This class also supports mutual TLS via :meth:`configure_mtls_channel` + method. In order to use this method, the `GOOGLE_API_USE_CLIENT_CERTIFICATE` + environment variable must be explicitly set to ``true``, otherwise it does + nothing. Assume the environment is set to ``true``, the method behaves in the + following manner: + + If client_cert_callback is provided, client certificate and private + key are loaded using the callback; if client_cert_callback is None, + application default SSL credentials will be used. Exceptions are raised if + there are problems with the certificate, private key, or the loading process, + so it should be called within a try/except block. + + First we set the environment variable to ``true``, then create an :class:`AuthorizedSession` + instance and specify the endpoints:: + + regular_endpoint = 'https://pubsub.googleapis.com/v1/projects/{my_project_id}/topics' + mtls_endpoint = 'https://pubsub.mtls.googleapis.com/v1/projects/{my_project_id}/topics' + + authed_session = AuthorizedSession(credentials) + + Now we can pass a callback to :meth:`configure_mtls_channel`:: + + def my_cert_callback(): + # some code to load client cert bytes and private key bytes, both in + # PEM format. + some_code_to_load_client_cert_and_key() + if loaded: + return cert, key + raise MyClientCertFailureException() + + # Always call configure_mtls_channel within a try/except block. + try: + authed_session.configure_mtls_channel(my_cert_callback) + except: + # handle exceptions. + + if authed_session.is_mtls: + response = authed_session.request('GET', mtls_endpoint) + else: + response = authed_session.request('GET', regular_endpoint) + + + You can alternatively use application default SSL credentials like this:: + + try: + authed_session.configure_mtls_channel() + except: + # handle exceptions. + + Args: + credentials (google.auth.credentials.Credentials): The credentials to + add to the request. + refresh_status_codes (Sequence[int]): Which HTTP status codes indicate + that credentials should be refreshed and the request should be + retried. + max_refresh_attempts (int): The maximum number of times to attempt to + refresh the credentials and retry the request. + refresh_timeout (Optional[int]): The timeout value in seconds for + credential refresh HTTP requests. + auth_request (google.auth.transport.requests.Request): + (Optional) An instance of + :class:`~google.auth.transport.requests.Request` used when + refreshing credentials. If not passed, + an instance of :class:`~google.auth.transport.requests.Request` + is created. + default_host (Optional[str]): A host like "pubsub.googleapis.com". + This is used when a self-signed JWT is created from service + account credentials. + """ + + def __init__( + self, + credentials, + refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES, + max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS, + refresh_timeout=None, + auth_request=None, + default_host=None, + ): + super(AuthorizedSession, self).__init__() + self.credentials = credentials + self._refresh_status_codes = refresh_status_codes + self._max_refresh_attempts = max_refresh_attempts + self._refresh_timeout = refresh_timeout + self._is_mtls = False + self._default_host = default_host + + if auth_request is None: + self._auth_request_session = requests.Session() + + # Using an adapter to make HTTP requests robust to network errors. + # This adapter retrys HTTP requests when network errors occur + # and the requests seems safely retryable. + retry_adapter = requests.adapters.HTTPAdapter(max_retries=3) + self._auth_request_session.mount("https://", retry_adapter) + + # Do not pass `self` as the session here, as it can lead to + # infinite recursion. + auth_request = Request(self._auth_request_session) + else: + self._auth_request_session = None + + # Request instance used by internal methods (for example, + # credentials.refresh). + self._auth_request = auth_request + + # https://google.aip.dev/auth/4111 + # Attempt to use self-signed JWTs when a service account is used. + if isinstance(self.credentials, service_account.Credentials): + self.credentials._create_self_signed_jwt( + "https://{}/".format(self._default_host) if self._default_host else None + ) + + def configure_mtls_channel(self, client_cert_callback=None): + """Configure the client certificate and key for SSL connection. + + The function does nothing unless `GOOGLE_API_USE_CLIENT_CERTIFICATE` is + explicitly set to `true`. In this case if client certificate and key are + successfully obtained (from the given client_cert_callback or from application + default SSL credentials), a :class:`_MutualTlsAdapter` instance will be mounted + to "https://" prefix. + + Args: + client_cert_callback (Optional[Callable[[], (bytes, bytes)]]): + The optional callback returns the client certificate and private + key bytes both in PEM format. + If the callback is None, application default SSL credentials + will be used. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel + creation failed for any reason. + """ + use_client_cert = os.getenv( + environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false" + ) + if use_client_cert != "true": + self._is_mtls = False + return + + try: + import OpenSSL + except ImportError as caught_exc: + new_exc = exceptions.MutualTLSChannelError(caught_exc) + raise new_exc from caught_exc + + try: + ( + self._is_mtls, + cert, + key, + ) = google.auth.transport._mtls_helper.get_client_cert_and_key( + client_cert_callback + ) + + if self._is_mtls: + mtls_adapter = _MutualTlsAdapter(cert, key) + self.mount("https://", mtls_adapter) + except ( + exceptions.ClientCertError, + ImportError, + OpenSSL.crypto.Error, + ) as caught_exc: + new_exc = exceptions.MutualTLSChannelError(caught_exc) + raise new_exc from caught_exc + + def request( + self, + method, + url, + data=None, + headers=None, + max_allowed_time=None, + timeout=_DEFAULT_TIMEOUT, + **kwargs + ): + """Implementation of Requests' request. + + Args: + timeout (Optional[Union[float, Tuple[float, float]]]): + The amount of time in seconds to wait for the server response + with each individual request. Can also be passed as a tuple + ``(connect_timeout, read_timeout)``. See :meth:`requests.Session.request` + documentation for details. + max_allowed_time (Optional[float]): + If the method runs longer than this, a ``Timeout`` exception is + automatically raised. Unlike the ``timeout`` parameter, this + value applies to the total method execution time, even if + multiple requests are made under the hood. + + Mind that it is not guaranteed that the timeout error is raised + at ``max_allowed_time``. It might take longer, for example, if + an underlying request takes a lot of time, but the request + itself does not timeout, e.g. if a large file is being + transmitted. The timout error will be raised after such + request completes. + """ + # pylint: disable=arguments-differ + # Requests has a ton of arguments to request, but only two + # (method, url) are required. We pass through all of the other + # arguments to super, so no need to exhaustively list them here. + + # Use a kwarg for this instead of an attribute to maintain + # thread-safety. + _credential_refresh_attempt = kwargs.pop("_credential_refresh_attempt", 0) + + # Make a copy of the headers. They will be modified by the credentials + # and we want to pass the original headers if we recurse. + request_headers = headers.copy() if headers is not None else {} + + # Do not apply the timeout unconditionally in order to not override the + # _auth_request's default timeout. + auth_request = ( + self._auth_request + if timeout is None + else functools.partial(self._auth_request, timeout=timeout) + ) + + remaining_time = max_allowed_time + + with TimeoutGuard(remaining_time) as guard: + self.credentials.before_request(auth_request, method, url, request_headers) + remaining_time = guard.remaining_timeout + + with TimeoutGuard(remaining_time) as guard: + response = super(AuthorizedSession, self).request( + method, + url, + data=data, + headers=request_headers, + timeout=timeout, + **kwargs + ) + remaining_time = guard.remaining_timeout + + # If the response indicated that the credentials needed to be + # refreshed, then refresh the credentials and re-attempt the + # request. + # A stored token may expire between the time it is retrieved and + # the time the request is made, so we may need to try twice. + if ( + response.status_code in self._refresh_status_codes + and _credential_refresh_attempt < self._max_refresh_attempts + ): + + _LOGGER.info( + "Refreshing credentials due to a %s response. Attempt %s/%s.", + response.status_code, + _credential_refresh_attempt + 1, + self._max_refresh_attempts, + ) + + # Do not apply the timeout unconditionally in order to not override the + # _auth_request's default timeout. + auth_request = ( + self._auth_request + if timeout is None + else functools.partial(self._auth_request, timeout=timeout) + ) + + with TimeoutGuard(remaining_time) as guard: + self.credentials.refresh(auth_request) + remaining_time = guard.remaining_timeout + + # Recurse. Pass in the original headers, not our modified set, but + # do pass the adjusted max allowed time (i.e. the remaining total time). + return self.request( + method, + url, + data=data, + headers=headers, + max_allowed_time=remaining_time, + timeout=timeout, + _credential_refresh_attempt=_credential_refresh_attempt + 1, + **kwargs + ) + + return response + + @property + def is_mtls(self): + """Indicates if the created SSL channel is mutual TLS.""" + return self._is_mtls + + def close(self): + if self._auth_request_session is not None: + self._auth_request_session.close() + super(AuthorizedSession, self).close() diff --git a/.venv/lib/python3.12/site-packages/google/auth/transport/urllib3.py b/.venv/lib/python3.12/site-packages/google/auth/transport/urllib3.py new file mode 100644 index 00000000..63144f5f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/transport/urllib3.py @@ -0,0 +1,444 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transport adapter for urllib3.""" + +from __future__ import absolute_import + +import logging +import os +import warnings + +# Certifi is Mozilla's certificate bundle. Urllib3 needs a certificate bundle +# to verify HTTPS requests, and certifi is the recommended and most reliable +# way to get a root certificate bundle. See +# http://urllib3.readthedocs.io/en/latest/user-guide.html\ +# #certificate-verification +# For more details. +try: + import certifi +except ImportError: # pragma: NO COVER + certifi = None # type: ignore + +try: + import urllib3 # type: ignore + import urllib3.exceptions # type: ignore +except ImportError as caught_exc: # pragma: NO COVER + raise ImportError( + "The urllib3 library is not installed from please install the " + "urllib3 package to use the urllib3 transport." + ) from caught_exc + +from packaging import version # type: ignore + +from google.auth import environment_vars +from google.auth import exceptions +from google.auth import transport +from google.oauth2 import service_account + +if version.parse(urllib3.__version__) >= version.parse("2.0.0"): # pragma: NO COVER + RequestMethods = urllib3._request_methods.RequestMethods # type: ignore +else: # pragma: NO COVER + RequestMethods = urllib3.request.RequestMethods # type: ignore + +_LOGGER = logging.getLogger(__name__) + + +class _Response(transport.Response): + """urllib3 transport response adapter. + + Args: + response (urllib3.response.HTTPResponse): The raw urllib3 response. + """ + + def __init__(self, response): + self._response = response + + @property + def status(self): + return self._response.status + + @property + def headers(self): + return self._response.headers + + @property + def data(self): + return self._response.data + + +class Request(transport.Request): + """urllib3 request adapter. + + This class is used internally for making requests using various transports + in a consistent way. If you use :class:`AuthorizedHttp` you do not need + to construct or use this class directly. + + This class can be useful if you want to manually refresh a + :class:`~google.auth.credentials.Credentials` instance:: + + import google.auth.transport.urllib3 + import urllib3 + + http = urllib3.PoolManager() + request = google.auth.transport.urllib3.Request(http) + + credentials.refresh(request) + + Args: + http (urllib3.request.RequestMethods): An instance of any urllib3 + class that implements :class:`~urllib3.request.RequestMethods`, + usually :class:`urllib3.PoolManager`. + + .. automethod:: __call__ + """ + + def __init__(self, http): + self.http = http + + def __call__( + self, url, method="GET", body=None, headers=None, timeout=None, **kwargs + ): + """Make an HTTP request using urllib3. + + Args: + url (str): The URI to be requested. + method (str): The HTTP method to use for the request. Defaults + to 'GET'. + body (bytes): The payload / body in HTTP request. + headers (Mapping[str, str]): Request headers. + timeout (Optional[int]): The number of seconds to wait for a + response from the server. If not specified or if None, the + urllib3 default timeout will be used. + kwargs: Additional arguments passed throught to the underlying + urllib3 :meth:`urlopen` method. + + Returns: + google.auth.transport.Response: The HTTP response. + + Raises: + google.auth.exceptions.TransportError: If any exception occurred. + """ + # urllib3 uses a sentinel default value for timeout, so only set it if + # specified. + if timeout is not None: + kwargs["timeout"] = timeout + + try: + _LOGGER.debug("Making request: %s %s", method, url) + response = self.http.request( + method, url, body=body, headers=headers, **kwargs + ) + return _Response(response) + except urllib3.exceptions.HTTPError as caught_exc: + new_exc = exceptions.TransportError(caught_exc) + raise new_exc from caught_exc + + +def _make_default_http(): + if certifi is not None: + return urllib3.PoolManager(cert_reqs="CERT_REQUIRED", ca_certs=certifi.where()) + else: + return urllib3.PoolManager() + + +def _make_mutual_tls_http(cert, key): + """Create a mutual TLS HTTP connection with the given client cert and key. + See https://github.com/urllib3/urllib3/issues/474#issuecomment-253168415 + + Args: + cert (bytes): client certificate in PEM format + key (bytes): client private key in PEM format + + Returns: + urllib3.PoolManager: Mutual TLS HTTP connection. + + Raises: + ImportError: If certifi or pyOpenSSL is not installed. + OpenSSL.crypto.Error: If the cert or key is invalid. + """ + import certifi + from OpenSSL import crypto + import urllib3.contrib.pyopenssl # type: ignore + + urllib3.contrib.pyopenssl.inject_into_urllib3() + ctx = urllib3.util.ssl_.create_urllib3_context() + ctx.load_verify_locations(cafile=certifi.where()) + + pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key) + x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert) + + ctx._ctx.use_certificate(x509) + ctx._ctx.use_privatekey(pkey) + + http = urllib3.PoolManager(ssl_context=ctx) + return http + + +class AuthorizedHttp(RequestMethods): # type: ignore + """A urllib3 HTTP class with credentials. + + This class is used to perform requests to API endpoints that require + authorization:: + + from google.auth.transport.urllib3 import AuthorizedHttp + + authed_http = AuthorizedHttp(credentials) + + response = authed_http.request( + 'GET', 'https://www.googleapis.com/storage/v1/b') + + This class implements :class:`urllib3.request.RequestMethods` and can be + used just like any other :class:`urllib3.PoolManager`. + + The underlying :meth:`urlopen` implementation handles adding the + credentials' headers to the request and refreshing credentials as needed. + + This class also supports mutual TLS via :meth:`configure_mtls_channel` + method. In order to use this method, the `GOOGLE_API_USE_CLIENT_CERTIFICATE` + environment variable must be explicitly set to `true`, otherwise it does + nothing. Assume the environment is set to `true`, the method behaves in the + following manner: + If client_cert_callback is provided, client certificate and private + key are loaded using the callback; if client_cert_callback is None, + application default SSL credentials will be used. Exceptions are raised if + there are problems with the certificate, private key, or the loading process, + so it should be called within a try/except block. + + First we set the environment variable to `true`, then create an :class:`AuthorizedHttp` + instance and specify the endpoints:: + + regular_endpoint = 'https://pubsub.googleapis.com/v1/projects/{my_project_id}/topics' + mtls_endpoint = 'https://pubsub.mtls.googleapis.com/v1/projects/{my_project_id}/topics' + + authed_http = AuthorizedHttp(credentials) + + Now we can pass a callback to :meth:`configure_mtls_channel`:: + + def my_cert_callback(): + # some code to load client cert bytes and private key bytes, both in + # PEM format. + some_code_to_load_client_cert_and_key() + if loaded: + return cert, key + raise MyClientCertFailureException() + + # Always call configure_mtls_channel within a try/except block. + try: + is_mtls = authed_http.configure_mtls_channel(my_cert_callback) + except: + # handle exceptions. + + if is_mtls: + response = authed_http.request('GET', mtls_endpoint) + else: + response = authed_http.request('GET', regular_endpoint) + + You can alternatively use application default SSL credentials like this:: + + try: + is_mtls = authed_http.configure_mtls_channel() + except: + # handle exceptions. + + Args: + credentials (google.auth.credentials.Credentials): The credentials to + add to the request. + http (urllib3.PoolManager): The underlying HTTP object to + use to make requests. If not specified, a + :class:`urllib3.PoolManager` instance will be constructed with + sane defaults. + refresh_status_codes (Sequence[int]): Which HTTP status codes indicate + that credentials should be refreshed and the request should be + retried. + max_refresh_attempts (int): The maximum number of times to attempt to + refresh the credentials and retry the request. + default_host (Optional[str]): A host like "pubsub.googleapis.com". + This is used when a self-signed JWT is created from service + account credentials. + """ + + def __init__( + self, + credentials, + http=None, + refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES, + max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS, + default_host=None, + ): + if http is None: + self.http = _make_default_http() + self._has_user_provided_http = False + else: + self.http = http + self._has_user_provided_http = True + + self.credentials = credentials + self._refresh_status_codes = refresh_status_codes + self._max_refresh_attempts = max_refresh_attempts + self._default_host = default_host + # Request instance used by internal methods (for example, + # credentials.refresh). + self._request = Request(self.http) + + # https://google.aip.dev/auth/4111 + # Attempt to use self-signed JWTs when a service account is used. + if isinstance(self.credentials, service_account.Credentials): + self.credentials._create_self_signed_jwt( + "https://{}/".format(self._default_host) if self._default_host else None + ) + + super(AuthorizedHttp, self).__init__() + + def configure_mtls_channel(self, client_cert_callback=None): + """Configures mutual TLS channel using the given client_cert_callback or + application default SSL credentials. The behavior is controlled by + `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable. + (1) If the environment variable value is `true`, the function returns True + if the channel is mutual TLS and False otherwise. The `http` provided + in the constructor will be overwritten. + (2) If the environment variable is not set or `false`, the function does + nothing and it always return False. + + Args: + client_cert_callback (Optional[Callable[[], (bytes, bytes)]]): + The optional callback returns the client certificate and private + key bytes both in PEM format. + If the callback is None, application default SSL credentials + will be used. + + Returns: + True if the channel is mutual TLS and False otherwise. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel + creation failed for any reason. + """ + use_client_cert = os.getenv( + environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false" + ) + if use_client_cert != "true": + return False + + try: + import OpenSSL + except ImportError as caught_exc: + new_exc = exceptions.MutualTLSChannelError(caught_exc) + raise new_exc from caught_exc + + try: + found_cert_key, cert, key = transport._mtls_helper.get_client_cert_and_key( + client_cert_callback + ) + + if found_cert_key: + self.http = _make_mutual_tls_http(cert, key) + else: + self.http = _make_default_http() + except ( + exceptions.ClientCertError, + ImportError, + OpenSSL.crypto.Error, + ) as caught_exc: + new_exc = exceptions.MutualTLSChannelError(caught_exc) + raise new_exc from caught_exc + + if self._has_user_provided_http: + self._has_user_provided_http = False + warnings.warn( + "`http` provided in the constructor is overwritten", UserWarning + ) + + return found_cert_key + + def urlopen(self, method, url, body=None, headers=None, **kwargs): + """Implementation of urllib3's urlopen.""" + # pylint: disable=arguments-differ + # We use kwargs to collect additional args that we don't need to + # introspect here. However, we do explicitly collect the two + # positional arguments. + + # Use a kwarg for this instead of an attribute to maintain + # thread-safety. + _credential_refresh_attempt = kwargs.pop("_credential_refresh_attempt", 0) + + if headers is None: + headers = self.headers + + # Make a copy of the headers. They will be modified by the credentials + # and we want to pass the original headers if we recurse. + request_headers = headers.copy() + + self.credentials.before_request(self._request, method, url, request_headers) + + response = self.http.urlopen( + method, url, body=body, headers=request_headers, **kwargs + ) + + # If the response indicated that the credentials needed to be + # refreshed, then refresh the credentials and re-attempt the + # request. + # A stored token may expire between the time it is retrieved and + # the time the request is made, so we may need to try twice. + # The reason urllib3's retries aren't used is because they + # don't allow you to modify the request headers. :/ + if ( + response.status in self._refresh_status_codes + and _credential_refresh_attempt < self._max_refresh_attempts + ): + + _LOGGER.info( + "Refreshing credentials due to a %s response. Attempt %s/%s.", + response.status, + _credential_refresh_attempt + 1, + self._max_refresh_attempts, + ) + + self.credentials.refresh(self._request) + + # Recurse. Pass in the original headers, not our modified set. + return self.urlopen( + method, + url, + body=body, + headers=headers, + _credential_refresh_attempt=_credential_refresh_attempt + 1, + **kwargs + ) + + return response + + # Proxy methods for compliance with the urllib3.PoolManager interface + + def __enter__(self): + """Proxy to ``self.http``.""" + return self.http.__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + """Proxy to ``self.http``.""" + return self.http.__exit__(exc_type, exc_val, exc_tb) + + def __del__(self): + if hasattr(self, "http") and self.http is not None: + self.http.clear() + + @property + def headers(self): + """Proxy to ``self.http``.""" + return self.http.headers + + @headers.setter + def headers(self, value): + """Proxy to ``self.http``.""" + self.http.headers = value diff --git a/.venv/lib/python3.12/site-packages/google/auth/version.py b/.venv/lib/python3.12/site-packages/google/auth/version.py new file mode 100644 index 00000000..41a80e6c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/auth/version.py @@ -0,0 +1,15 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "2.38.0" diff --git a/.venv/lib/python3.12/site-packages/google/genai/__init__.py b/.venv/lib/python3.12/site-packages/google/genai/__init__.py new file mode 100644 index 00000000..358b726f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Google Gen AI SDK""" + +from .client import Client +from . import version + +__version__ = version.__version__ + +__all__ = ['Client'] diff --git a/.venv/lib/python3.12/site-packages/google/genai/_api_client.py b/.venv/lib/python3.12/site-packages/google/genai/_api_client.py new file mode 100644 index 00000000..cc0dada1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/_api_client.py @@ -0,0 +1,697 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +"""Base client for calling HTTP APIs sending and receiving JSON.""" + +import asyncio +import copy +from dataclasses import dataclass +import datetime +import io +import json +import logging +import os +import sys +from typing import Any, Optional, Tuple, TypedDict, Union +from urllib.parse import urlparse, urlunparse + +import google.auth +import google.auth.credentials +from google.auth.transport.requests import AuthorizedSession +from pydantic import BaseModel, ConfigDict, Field, ValidationError +import requests + +from . import errors +from . import version + + +class HttpOptions(BaseModel): + """HTTP options for the api client.""" + model_config = ConfigDict(extra='forbid') + + base_url: Optional[str] = Field( + default=None, + description="""The base URL for the AI platform service endpoint.""", + ) + api_version: Optional[str] = Field( + default=None, + description="""Specifies the version of the API to use.""", + ) + headers: Optional[dict[str, str]] = Field( + default=None, + description="""Additional HTTP headers to be sent with the request.""", + ) + response_payload: Optional[dict] = Field( + default=None, + description="""If set, the response payload will be returned int the supplied dict.""", + ) + timeout: Optional[Union[float, Tuple[float, float]]] = Field( + default=None, + description="""Timeout for the request in seconds.""", + ) + skip_project_and_location_in_path: bool = Field( + default=False, + description="""If set to True, the project and location will not be appended to the path.""", + ) + + +class HttpOptionsDict(TypedDict): + """HTTP options for the api client.""" + + base_url: Optional[str] = None + """The base URL for the AI platform service endpoint.""" + api_version: Optional[str] = None + """Specifies the version of the API to use.""" + headers: Optional[dict[str, str]] = None + """Additional HTTP headers to be sent with the request.""" + response_payload: Optional[dict] = None + """If set, the response payload will be returned int the supplied dict.""" + timeout: Optional[Union[float, Tuple[float, float]]] = None + """Timeout for the request in seconds.""" + skip_project_and_location_in_path: bool = False + """If set to True, the project and location will not be appended to the path.""" + +HttpOptionsOrDict = Union[HttpOptions, HttpOptionsDict] + + +def _append_library_version_headers(headers: dict[str, str]) -> None: + """Appends the telemetry header to the headers dict.""" + library_label = f'google-genai-sdk/{version.__version__}' + language_label = 'gl-python/' + sys.version.split()[0] + version_header_value = f'{library_label} {language_label}' + if ( + 'user-agent' in headers + and version_header_value not in headers['user-agent'] + ): + headers['user-agent'] += f' {version_header_value}' + elif 'user-agent' not in headers: + headers['user-agent'] = version_header_value + if ( + 'x-goog-api-client' in headers + and version_header_value not in headers['x-goog-api-client'] + ): + headers['x-goog-api-client'] += f' {version_header_value}' + elif 'x-goog-api-client' not in headers: + headers['x-goog-api-client'] = version_header_value + + +def _patch_http_options( + options: HttpOptionsDict, patch_options: HttpOptionsDict +) -> HttpOptionsDict: + # use shallow copy so we don't override the original objects. + copy_option = HttpOptionsDict() + copy_option.update(options) + for patch_key, patch_value in patch_options.items(): + # if both are dicts, update the copy. + # This is to handle cases like merging headers. + if isinstance(patch_value, dict) and isinstance( + copy_option.get(patch_key, None), dict + ): + copy_option[patch_key] = {} + copy_option[patch_key].update( + options[patch_key] + ) # shallow copy from original options. + copy_option[patch_key].update(patch_value) + elif patch_value is not None: # Accept empty values. + copy_option[patch_key] = patch_value + _append_library_version_headers(copy_option['headers']) + return copy_option + + +def _join_url_path(base_url: str, path: str) -> str: + parsed_base = urlparse(base_url) + base_path = parsed_base.path[:-1] if parsed_base.path.endswith('/') else parsed_base.path + path = path[1:] if path.startswith('/') else path + return urlunparse(parsed_base._replace(path=base_path + '/' + path)) + + +@dataclass +class HttpRequest: + headers: dict[str, str] + url: str + method: str + data: Union[dict[str, object], bytes] + timeout: Optional[Union[float, Tuple[float, float]]] = None + + +class HttpResponse: + + def __init__( + self, + headers: dict[str, str], + response_stream: Union[Any, str] = None, + byte_stream: Union[Any, bytes] = None, + ): + self.status_code = 200 + self.headers = headers + self.response_stream = response_stream + self.byte_stream = byte_stream + + @property + def text(self) -> str: + if not self.response_stream[0]: # Empty response + return '' + return json.loads(self.response_stream[0]) + + def segments(self): + if isinstance(self.response_stream, list): + # list of objects retrieved from replay or from non-streaming API. + for chunk in self.response_stream: + yield json.loads(chunk) if chunk else {} + elif self.response_stream is None: + yield from [] + else: + # Iterator of objects retrieved from the API. + for chunk in self.response_stream.iter_lines(): + if chunk: + # In streaming mode, the chunk of JSON is prefixed with "data:" which + # we must strip before parsing. + if chunk.startswith(b'data: '): + chunk = chunk[len(b'data: ') :] + yield json.loads(str(chunk, 'utf-8')) + + def byte_segments(self): + if isinstance(self.byte_stream, list): + # list of objects retrieved from replay or from non-streaming API. + yield from self.byte_stream + elif self.byte_stream is None: + yield from [] + else: + raise ValueError( + 'Byte segments are not supported for streaming responses.' + ) + + def copy_to_dict(self, response_payload: dict[str, object]): + for attribute in dir(self): + response_payload[attribute] = copy.deepcopy(getattr(self, attribute)) + + +class ApiClient: + """Client for calling HTTP APIs sending and receiving JSON.""" + + def __init__( + self, + vertexai: Union[bool, None] = None, + api_key: Union[str, None] = None, + credentials: google.auth.credentials.Credentials = None, + project: Union[str, None] = None, + location: Union[str, None] = None, + http_options: HttpOptionsOrDict = None, + ): + self.vertexai = vertexai + if self.vertexai is None: + if os.environ.get('GOOGLE_GENAI_USE_VERTEXAI', '0').lower() in [ + 'true', + '1', + ]: + self.vertexai = True + + # Validate explicitly set initializer values. + if (project or location) and api_key: + # API cannot consume both project/location and api_key. + raise ValueError( + 'Project/location and API key are mutually exclusive in the client initializer.' + ) + elif credentials and api_key: + # API cannot consume both credentials and api_key. + raise ValueError( + 'Credentials and API key are mutually exclusive in the client initializer.' + ) + + # Validate http_options if a dict is provided. + if isinstance(http_options, dict): + try: + HttpOptions.model_validate(http_options) + except ValidationError as e: + raise ValueError(f'Invalid http_options: {e}') + elif(isinstance(http_options, HttpOptions)): + http_options = http_options.model_dump() + + # Retrieve implicitly set values from the environment. + env_project = os.environ.get('GOOGLE_CLOUD_PROJECT', None) + env_location = os.environ.get('GOOGLE_CLOUD_LOCATION', None) + env_api_key = os.environ.get('GOOGLE_API_KEY', None) + self.project = project or env_project + self.location = location or env_location + self.api_key = api_key or env_api_key + + self._credentials = credentials + self._http_options = HttpOptionsDict() + + # Handle when to use Vertex AI in express mode (api key). + # Explicit initializer arguments are already validated above. + if self.vertexai: + if credentials: + # Explicit credentials take precedence over implicit api_key. + logging.info( + 'The user provided Google Cloud credentials will take precedence' + + ' over the API key from the environment variable.' + ) + self.api_key = None + elif (env_location or env_project) and api_key: + # Explicit api_key takes precedence over implicit project/location. + logging.info( + 'The user provided Vertex AI API key will take precedence over the' + + ' project/location from the environment variables.' + ) + self.project = None + self.location = None + elif (project or location) and env_api_key: + # Explicit project/location takes precedence over implicit api_key. + logging.info( + 'The user provided project/location will take precedence over the' + + ' Vertex AI API key from the environment variable.' + ) + self.api_key = None + elif (env_location or env_project) and env_api_key: + # Implicit project/location takes precedence over implicit api_key. + logging.info( + 'The project/location from the environment variables will take' + + ' precedence over the API key from the environment variables.' + ) + self.api_key = None + if not self.project and not self.api_key: + self.project = google.auth.default()[1] + if not ((self.project and self.location) or self.api_key): + raise ValueError( + 'Project and location or API key must be set when using the Vertex ' + 'AI API.' + ) + if self.api_key: + self._http_options['base_url'] = ( + f'https://aiplatform.googleapis.com/' + ) + else: + self._http_options['base_url'] = ( + f'https://{self.location}-aiplatform.googleapis.com/' + ) + self._http_options['api_version'] = 'v1beta1' + else: # ML Dev API + if not self.api_key: + raise ValueError('API key must be set when using the Google AI API.') + self._http_options['base_url'] = ( + 'https://generativelanguage.googleapis.com/' + ) + self._http_options['api_version'] = 'v1beta' + # Default options for both clients. + self._http_options['headers'] = {'Content-Type': 'application/json'} + if self.api_key and not self.vertexai: + self._http_options['headers']['x-goog-api-key'] = self.api_key + # Update the http options with the user provided http options. + if http_options: + self._http_options = _patch_http_options(self._http_options, http_options) + else: + _append_library_version_headers(self._http_options['headers']) + + def _websocket_base_url(self): + url_parts = urlparse(self._http_options['base_url']) + return url_parts._replace(scheme='wss').geturl() + + def _build_request( + self, + http_method: str, + path: str, + request_dict: dict[str, object], + http_options: HttpOptionsDict = None, + ) -> HttpRequest: + # Remove all special dict keys such as _url and _query. + keys_to_delete = [key for key in request_dict.keys() if key.startswith('_')] + for key in keys_to_delete: + del request_dict[key] + # patch the http options with the user provided settings. + if http_options: + patched_http_options = _patch_http_options( + self._http_options, http_options + ) + else: + patched_http_options = self._http_options + skip_project_and_location_in_path_val = patched_http_options.get( + 'skip_project_and_location_in_path', False + ) + if ( + self.vertexai + and not path.startswith('projects/') + and not skip_project_and_location_in_path_val + and not self.api_key + ): + path = f'projects/{self.project}/locations/{self.location}/' + path + elif self.vertexai and self.api_key: + path = f'{path}?key={self.api_key}' + url = _join_url_path( + patched_http_options['base_url'], + patched_http_options['api_version'] + '/' + path, + ) + return HttpRequest( + method=http_method, + url=url, + headers=patched_http_options['headers'], + data=request_dict, + timeout=patched_http_options.get('timeout', None), + ) + + def _request( + self, + http_request: HttpRequest, + stream: bool = False, + ) -> HttpResponse: + if self.vertexai and not self.api_key: + if not self._credentials: + self._credentials, _ = google.auth.default( + scopes=["https://www.googleapis.com/auth/cloud-platform"], + ) + authed_session = AuthorizedSession(self._credentials) + authed_session.stream = stream + response = authed_session.request( + http_request.method.upper(), + http_request.url, + headers=http_request.headers, + data=json.dumps(http_request.data) + if http_request.data + else None, + timeout=http_request.timeout, + ) + errors.APIError.raise_for_response(response) + return HttpResponse( + response.headers, response if stream else [response.text] + ) + else: + return self._request_unauthorized(http_request, stream) + + def _request_unauthorized( + self, + http_request: HttpRequest, + stream: bool = False, + ) -> HttpResponse: + data = None + if http_request.data: + if not isinstance(http_request.data, bytes): + data = json.dumps(http_request.data) + else: + data = http_request.data + + http_session = requests.Session() + response = http_session.request( + method=http_request.method, + url=http_request.url, + headers=http_request.headers, + data=data, + timeout=http_request.timeout, + stream=stream, + ) + errors.APIError.raise_for_response(response) + return HttpResponse( + response.headers, response if stream else [response.text] + ) + + async def _async_request( + self, http_request: HttpRequest, stream: bool = False + ): + if self.vertexai: + if not self._credentials: + self._credentials, _ = google.auth.default( + scopes=["https://www.googleapis.com/auth/cloud-platform"], + ) + return await asyncio.to_thread( + self._request, + http_request, + stream=stream, + ) + else: + return await asyncio.to_thread( + self._request, + http_request, + stream=stream, + ) + + def get_read_only_http_options(self) -> HttpOptionsDict: + copied = HttpOptionsDict() + if isinstance(self._http_options, BaseModel): + self._http_options = self._http_options.model_dump() + copied.update(self._http_options) + return copied + + def request( + self, + http_method: str, + path: str, + request_dict: dict[str, object], + http_options: HttpOptionsDict = None, + ): + http_request = self._build_request( + http_method, path, request_dict, http_options + ) + response = self._request(http_request, stream=False) + if http_options and 'response_payload' in http_options: + response.copy_to_dict(http_options['response_payload']) + return response.text + + def request_streamed( + self, + http_method: str, + path: str, + request_dict: dict[str, object], + http_options: HttpOptionsDict = None, + ): + http_request = self._build_request( + http_method, path, request_dict, http_options + ) + + session_response = self._request(http_request, stream=True) + if http_options and 'response_payload' in http_options: + session_response.copy_to_dict(http_options['response_payload']) + for chunk in session_response.segments(): + yield chunk + + async def async_request( + self, + http_method: str, + path: str, + request_dict: dict[str, object], + http_options: HttpOptionsDict = None, + ) -> dict[str, object]: + http_request = self._build_request( + http_method, path, request_dict, http_options + ) + + result = await self._async_request(http_request=http_request, stream=False) + if http_options and 'response_payload' in http_options: + result.copy_to_dict(http_options['response_payload']) + return result.text + + async def async_request_streamed( + self, + http_method: str, + path: str, + request_dict: dict[str, object], + http_options: HttpOptionsDict = None, + ): + http_request = self._build_request( + http_method, path, request_dict, http_options + ) + + response = await self._async_request(http_request=http_request, stream=True) + + for chunk in response.segments(): + yield chunk + if http_options and 'response_payload' in http_options: + response.copy_to_dict(http_options['response_payload']) + + def upload_file( + self, file_path: Union[str, io.IOBase], upload_url: str, upload_size: int + ) -> str: + """Transfers a file to the given URL. + + Args: + file_path: The full path to the file or a file like object inherited from + io.BytesIO. If the local file path is not found, an error will be + raised. + upload_url: The URL to upload the file to. + upload_size: The size of file content to be uploaded, this will have to + match the size requested in the resumable upload request. + + returns: + The response json object from the finalize request. + """ + if isinstance(file_path, io.IOBase): + return self._upload_fd(file_path, upload_url, upload_size) + else: + with open(file_path, 'rb') as file: + return self._upload_fd(file, upload_url, upload_size) + + def _upload_fd( + self, file: io.IOBase, upload_url: str, upload_size: int + ) -> str: + """Transfers a file to the given URL. + + Args: + file: A file like object inherited from io.BytesIO. + upload_url: The URL to upload the file to. + upload_size: The size of file content to be uploaded, this will have to + match the size requested in the resumable upload request. + + returns: + The response json object from the finalize request. + """ + offset = 0 + # Upload the file in chunks + while True: + file_chunk = file.read(1024 * 1024 * 8) # 8 MB chunk size + chunk_size = 0 + if file_chunk: + chunk_size = len(file_chunk) + upload_command = 'upload' + # If last chunk, finalize the upload. + if chunk_size + offset >= upload_size: + upload_command += ', finalize' + request = HttpRequest( + method='POST', + url=upload_url, + headers={ + 'X-Goog-Upload-Command': upload_command, + 'X-Goog-Upload-Offset': str(offset), + 'Content-Length': str(chunk_size), + }, + data=file_chunk, + ) + + response = self._request_unauthorized(request, stream=False) + offset += chunk_size + if response.headers['X-Goog-Upload-Status'] != 'active': + break # upload is complete or it has been interrupted. + + if upload_size <= offset: # Status is not finalized. + raise ValueError( + 'All content has been uploaded, but the upload status is not' + f' finalized. {response.headers}, body: {response.text}' + ) + + if response.headers['X-Goog-Upload-Status'] != 'final': + raise ValueError( + 'Failed to upload file: Upload status is not finalized. headers:' + f' {response.headers}, body: {response.text}' + ) + return response.text + + def download_file(self, path: str, http_options): + """Downloads the file data. + + Args: + path: The request path with query params. + http_options: The http options to use for the request. + + returns: + The file bytes + """ + http_request = self._build_request( + 'get', path=path, request_dict={}, http_options=http_options + ) + return self._download_file_request(http_request).byte_stream[0] + + def _download_file_request( + self, + http_request: HttpRequest, + ) -> HttpResponse: + data = None + if http_request.data: + if not isinstance(http_request.data, bytes): + data = json.dumps(http_request.data, cls=RequestJsonEncoder) + else: + data = http_request.data + + http_session = requests.Session() + response = http_session.request( + method=http_request.method, + url=http_request.url, + headers=http_request.headers, + data=data, + timeout=http_request.timeout, + stream=False, + ) + + errors.APIError.raise_for_response(response) + return HttpResponse(response.headers, byte_stream=[response.content]) + + + async def async_upload_file( + self, + file_path: Union[str, io.IOBase], + upload_url: str, + upload_size: int, + ) -> str: + """Transfers a file asynchronously to the given URL. + + Args: + file_path: The full path to the file. If the local file path is not found, + an error will be raised. + upload_url: The URL to upload the file to. + upload_size: The size of file content to be uploaded, this will have to + match the size requested in the resumable upload request. + + returns: + The response json object from the finalize request. + """ + return await asyncio.to_thread( + self.upload_file, + file_path, + upload_url, + upload_size, + ) + + async def _async_upload_fd( + self, + file: io.IOBase, + upload_url: str, + upload_size: int, + ) -> str: + """Transfers a file asynchronously to the given URL. + + Args: + file: A file like object inherited from io.BytesIO. + upload_url: The URL to upload the file to. + upload_size: The size of file content to be uploaded, this will have to + match the size requested in the resumable upload request. + + returns: + The response json object from the finalize request. + """ + return await asyncio.to_thread( + self._upload_fd, + file, + upload_url, + upload_size, + ) + + async def async_download_file(self, path: str, http_options): + """Downloads the file data. + + Args: + path: The request path with query params. + http_options: The http options to use for the request. + + returns: + The file bytes + """ + return await asyncio.to_thread( + self.download_file, + path, + http_options, + ) + + # This method does nothing in the real api client. It is used in the + # replay_api_client to verify the response from the SDK method matches the + # recorded response. + def _verify_response(self, response_model: BaseModel): + pass diff --git a/.venv/lib/python3.12/site-packages/google/genai/_automatic_function_calling_util.py b/.venv/lib/python3.12/site-packages/google/genai/_automatic_function_calling_util.py new file mode 100644 index 00000000..12d1df7c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/_automatic_function_calling_util.py @@ -0,0 +1,294 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import inspect +import types as typing_types +from typing import Any, Callable, Literal, Union, _GenericAlias, get_args, get_origin +import pydantic +from . import types + +_py_builtin_type_to_schema_type = { + str: 'STRING', + int: 'INTEGER', + float: 'NUMBER', + bool: 'BOOLEAN', + list: 'ARRAY', + dict: 'OBJECT', +} + + +def _is_builtin_primitive_or_compound( + annotation: inspect.Parameter.annotation, +) -> bool: + return annotation in _py_builtin_type_to_schema_type.keys() + + +def _raise_for_any_of_if_mldev(schema: types.Schema): + if schema.any_of: + raise ValueError( + 'AnyOf is not supported in function declaration schema for Google AI.' + ) + + +def _raise_for_default_if_mldev(schema: types.Schema): + if schema.default is not None: + raise ValueError( + 'Default value is not supported in function declaration schema for' + ' Google AI.' + ) + + +def _raise_for_nullable_if_mldev(schema: types.Schema): + if schema.nullable: + raise ValueError( + 'Nullable is not supported in function declaration schema for' + ' Google AI.' + ) + + +def _raise_if_schema_unsupported(variant: str, schema: types.Schema): + if not variant == 'VERTEX_AI': + _raise_for_any_of_if_mldev(schema) + _raise_for_default_if_mldev(schema) + _raise_for_nullable_if_mldev(schema) + + +def _is_default_value_compatible( + default_value: Any, annotation: inspect.Parameter.annotation +) -> bool: + # None type is expected to be handled external to this function + if _is_builtin_primitive_or_compound(annotation): + return isinstance(default_value, annotation) + + if ( + isinstance(annotation, _GenericAlias) + or isinstance(annotation, typing_types.GenericAlias) + or isinstance(annotation, typing_types.UnionType) + ): + origin = get_origin(annotation) + if origin in (Union, typing_types.UnionType): + return any( + _is_default_value_compatible(default_value, arg) + for arg in get_args(annotation) + ) + + if origin is dict: + return isinstance(default_value, dict) + + if origin is list: + if not isinstance(default_value, list): + return False + # most tricky case, element in list is union type + # need to apply any logic within all + # see test case test_generic_alias_complex_array_with_default_value + # a: typing.List[int | str | float | bool] + # default_value: [1, 'a', 1.1, True] + return all( + any( + _is_default_value_compatible(item, arg) + for arg in get_args(annotation) + ) + for item in default_value + ) + + if origin is Literal: + return default_value in get_args(annotation) + + # return False for any other unrecognized annotation + # let caller handle the raise + return False + + +def _parse_schema_from_parameter( + variant: str, param: inspect.Parameter, func_name: str +) -> types.Schema: + """parse schema from parameter. + + from the simplest case to the most complex case. + """ + schema = types.Schema() + default_value_error_msg = ( + f'Default value {param.default} of parameter {param} of function' + f' {func_name} is not compatible with the parameter annotation' + f' {param.annotation}.' + ) + if _is_builtin_primitive_or_compound(param.annotation): + if param.default is not inspect.Parameter.empty: + if not _is_default_value_compatible(param.default, param.annotation): + raise ValueError(default_value_error_msg) + schema.default = param.default + schema.type = _py_builtin_type_to_schema_type[param.annotation] + _raise_if_schema_unsupported(variant, schema) + return schema + if ( + isinstance(param.annotation, typing_types.UnionType) + # only parse simple UnionType, example int | str | float | bool + # complex types.UnionType will be invoked in raise branch + and all( + (_is_builtin_primitive_or_compound(arg) or arg is type(None)) + for arg in get_args(param.annotation) + ) + ): + schema.type = 'OBJECT' + schema.any_of = [] + unique_types = set() + for arg in get_args(param.annotation): + if arg.__name__ == 'NoneType': # Optional type + schema.nullable = True + continue + schema_in_any_of = _parse_schema_from_parameter( + variant, + inspect.Parameter( + 'item', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=arg + ), + func_name, + ) + if ( + schema_in_any_of.model_dump_json(exclude_none=True) + not in unique_types + ): + schema.any_of.append(schema_in_any_of) + unique_types.add(schema_in_any_of.model_dump_json(exclude_none=True)) + if len(schema.any_of) == 1: # param: list | None -> Array + schema.type = schema.any_of[0].type + schema.any_of = None + if ( + param.default is not inspect.Parameter.empty + and param.default is not None + ): + if not _is_default_value_compatible(param.default, param.annotation): + raise ValueError(default_value_error_msg) + schema.default = param.default + _raise_if_schema_unsupported(variant, schema) + return schema + if isinstance(param.annotation, _GenericAlias) or isinstance( + param.annotation, typing_types.GenericAlias + ): + origin = get_origin(param.annotation) + args = get_args(param.annotation) + if origin is dict: + schema.type = 'OBJECT' + if param.default is not inspect.Parameter.empty: + if not _is_default_value_compatible(param.default, param.annotation): + raise ValueError(default_value_error_msg) + schema.default = param.default + _raise_if_schema_unsupported(variant, schema) + return schema + if origin is Literal: + if not all(isinstance(arg, str) for arg in args): + raise ValueError( + f'Literal type {param.annotation} must be a list of strings.' + ) + schema.type = 'STRING' + schema.enum = list(args) + if param.default is not inspect.Parameter.empty: + if not _is_default_value_compatible(param.default, param.annotation): + raise ValueError(default_value_error_msg) + schema.default = param.default + _raise_if_schema_unsupported(variant, schema) + return schema + if origin is list: + schema.type = 'ARRAY' + schema.items = _parse_schema_from_parameter( + variant, + inspect.Parameter( + 'item', + inspect.Parameter.POSITIONAL_OR_KEYWORD, + annotation=args[0], + ), + func_name, + ) + if param.default is not inspect.Parameter.empty: + if not _is_default_value_compatible(param.default, param.annotation): + raise ValueError(default_value_error_msg) + schema.default = param.default + _raise_if_schema_unsupported(variant, schema) + return schema + if origin is Union: + schema.any_of = [] + schema.type = 'OBJECT' + unique_types = set() + for arg in args: + if arg.__name__ == 'NoneType': # Optional type + schema.nullable = True + continue + schema_in_any_of = _parse_schema_from_parameter( + variant, + inspect.Parameter( + 'item', + inspect.Parameter.POSITIONAL_OR_KEYWORD, + annotation=arg, + ), + func_name, + ) + if ( + schema_in_any_of.model_dump_json(exclude_none=True) + not in unique_types + ): + schema.any_of.append(schema_in_any_of) + unique_types.add(schema_in_any_of.model_dump_json(exclude_none=True)) + if len(schema.any_of) == 1: # param: Union[List, None] -> Array + schema.type = schema.any_of[0].type + schema.any_of = None + if ( + param.default is not None + and param.default is not inspect.Parameter.empty + ): + if not _is_default_value_compatible(param.default, param.annotation): + raise ValueError(default_value_error_msg) + schema.default = param.default + _raise_if_schema_unsupported(variant, schema) + return schema + # all other generic alias will be invoked in raise branch + if ( + inspect.isclass(param.annotation) + # for user defined class, we only support pydantic model + and issubclass(param.annotation, pydantic.BaseModel) + ): + if ( + param.default is not inspect.Parameter.empty + and param.default is not None + ): + schema.default = param.default + schema.type = 'OBJECT' + schema.properties = {} + for field_name, field_info in param.annotation.model_fields.items(): + schema.properties[field_name] = _parse_schema_from_parameter( + variant, + inspect.Parameter( + field_name, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + annotation=field_info.annotation, + ), + func_name, + ) + _raise_if_schema_unsupported(variant, schema) + return schema + raise ValueError( + f'Failed to parse the parameter {param} of function {func_name} for' + ' automatic function calling.Automatic function calling works best with' + ' simpler function signature schema,consider manually parse your' + f' function declaration for function {func_name}.' + ) + + +def _get_required_fields(schema: types.Schema) -> list[str]: + if not schema.properties: + return + return [ + field_name + for field_name, field_schema in schema.properties.items() + if not field_schema.nullable and field_schema.default is None + ] diff --git a/.venv/lib/python3.12/site-packages/google/genai/_common.py b/.venv/lib/python3.12/site-packages/google/genai/_common.py new file mode 100644 index 00000000..3211fd40 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/_common.py @@ -0,0 +1,272 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Common utilities for the SDK.""" + +import base64 +import datetime +import enum +import typing +from typing import Union +import uuid + +import pydantic +from pydantic import alias_generators + +from . import _api_client + + +def set_value_by_path(data, keys, value): + """Examples: + + set_value_by_path({}, ['a', 'b'], v) + -> {'a': {'b': v}} + set_value_by_path({}, ['a', 'b[]', c], [v1, v2]) + -> {'a': {'b': [{'c': v1}, {'c': v2}]}} + set_value_by_path({'a': {'b': [{'c': v1}, {'c': v2}]}}, ['a', 'b[]', 'd'], v3) + -> {'a': {'b': [{'c': v1, 'd': v3}, {'c': v2, 'd': v3}]}} + """ + if value is None: + return + for i, key in enumerate(keys[:-1]): + if key.endswith('[]'): + key_name = key[:-2] + if key_name not in data: + if isinstance(value, list): + data[key_name] = [{} for _ in range(len(value))] + else: + raise ValueError( + f'value {value} must be a list given an array path {key}' + ) + if isinstance(value, list): + for j, d in enumerate(data[key_name]): + set_value_by_path(d, keys[i + 1 :], value[j]) + else: + for d in data[key_name]: + set_value_by_path(d, keys[i + 1 :], value) + return + + data = data.setdefault(key, {}) + + existing_data = data.get(keys[-1]) + # If there is an existing value, merge, not overwrite. + if existing_data is not None: + # Don't overwrite existing non-empty value with new empty value. + # This is triggered when handling tuning datasets. + if not value: + pass + # Don't fail when overwriting value with same value + elif value == existing_data: + pass + # Instead of overwriting dictionary with another dictionary, merge them. + # This is important for handling training and validation datasets in tuning. + elif isinstance(existing_data, dict) and isinstance(value, dict): + # Merging dictionaries. Consider deep merging in the future. + existing_data.update(value) + else: + raise ValueError( + f'Cannot set value for an existing key. Key: {keys[-1]};' + f' Existing value: {existing_data}; New value: {value}.' + ) + else: + data[keys[-1]] = value + + +def get_value_by_path(data: object, keys: list[str]): + """Examples: + + get_value_by_path({'a': {'b': v}}, ['a', 'b']) + -> v + get_value_by_path({'a': {'b': [{'c': v1}, {'c': v2}]}}, ['a', 'b[]', 'c']) + -> [v1, v2] + """ + if keys == ['_self']: + return data + for i, key in enumerate(keys): + if not data: + return None + if key.endswith('[]'): + key_name = key[:-2] + if key_name in data: + return [get_value_by_path(d, keys[i + 1 :]) for d in data[key_name]] + else: + return None + else: + if key in data: + data = data[key] + elif isinstance(data, BaseModel) and hasattr(data, key): + data = getattr(data, key) + else: + return None + return data + + +class BaseModule: + + def __init__(self, api_client_: _api_client.ApiClient): + self._api_client = api_client_ + + +def convert_to_dict(obj: dict[str, object]) -> dict[str, object]: + """Recursively converts a given object to a dictionary. + + If the object is a Pydantic model, it uses the model's `model_dump()` method. + + Args: + obj: The object to convert. + + Returns: + A dictionary representation of the object. + """ + if isinstance(obj, pydantic.BaseModel): + return obj.model_dump(exclude_none=True) + elif isinstance(obj, dict): + return {key: convert_to_dict(value) for key, value in obj.items()} + elif isinstance(obj, list): + return [convert_to_dict(item) for item in obj] + else: + return obj + + +def _remove_extra_fields( + model: pydantic.BaseModel, response: dict[str, object] +) -> None: + """Removes extra fields from the response that are not in the model. + + Mutates the response in place. + """ + + key_values = list(response.items()) + + for key, value in key_values: + # Need to convert to snake case to match model fields names + # ex: UsageMetadata + alias_map = { + field_info.alias: key for key, field_info in model.model_fields.items() + } + + if key not in model.model_fields and key not in alias_map: + response.pop(key) + continue + + key = alias_map.get(key, key) + + annotation = model.model_fields[key].annotation + + # Get the BaseModel if Optional + if typing.get_origin(annotation) is Union: + annotation = typing.get_args(annotation)[0] + + # if dict, assume BaseModel but also check that field type is not dict + # example: FunctionCall.args + if isinstance(value, dict) and typing.get_origin(annotation) is not dict: + _remove_extra_fields(annotation, value) + elif isinstance(value, list): + for item in value: + # assume a list of dict is list of BaseModel + if isinstance(item, dict): + _remove_extra_fields(typing.get_args(annotation)[0], item) + + +class BaseModel(pydantic.BaseModel): + + model_config = pydantic.ConfigDict( + alias_generator=alias_generators.to_camel, + populate_by_name=True, + from_attributes=True, + protected_namespaces=(), + extra='forbid', + # This allows us to use arbitrary types in the model. E.g. PIL.Image. + arbitrary_types_allowed=True, + ser_json_bytes='base64', + val_json_bytes='base64', + ) + + @classmethod + def _from_response( + cls, response: dict[str, object], kwargs: dict[str, object] + ) -> 'BaseModel': + # To maintain forward compatibility, we need to remove extra fields from + # the response. + # We will provide another mechanism to allow users to access these fields. + _remove_extra_fields(cls, response) + validated_response = cls.model_validate(response) + return validated_response + + def to_json_dict(self) -> dict[str, object]: + return self.model_dump(exclude_none=True, mode='json') + + +class CaseInSensitiveEnum(str, enum.Enum): + """Case insensitive enum.""" + + @classmethod + def _missing_(cls, value): + try: + return cls[value.upper()] # Try to access directly with uppercase + except KeyError: + try: + return cls[value.lower()] # Try to access directly with lowercase + except KeyError as e: + raise ValueError(f"{value} is not a valid {cls.__name__}") from e + + +def timestamped_unique_name() -> str: + """Composes a timestamped unique name. + + Returns: + A string representing a unique name. + """ + timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S') + unique_id = uuid.uuid4().hex[0:5] + return f'{timestamp}_{unique_id}' + + +def encode_unserializable_types(data: dict[str, object]) -> dict[str, object]: + """Converts unserializable types in dict to json.dumps() compatible types. + + This function is called in models.py after calling convert_to_dict(). The + convert_to_dict() can convert pydantic object to dict. However, the input to + convert_to_dict() is dict mixed of pydantic object and nested dict(the output + of converters). So they may be bytes in the dict and they are out of + `ser_json_bytes` control in model_dump(mode='json') called in + `convert_to_dict`, as well as datetime deserialization in Pydantic json mode. + + Returns: + A dictionary with json.dumps() incompatible type (e.g. bytes datetime) + to compatible type (e.g. base64 encoded string, isoformat date string). + """ + processed_data = {} + if not isinstance(data, dict): + return data + for key, value in data.items(): + if isinstance(value, bytes): + processed_data[key] = base64.urlsafe_b64encode(value).decode('ascii') + elif isinstance(value, datetime.datetime): + processed_data[key] = value.isoformat() + elif isinstance(value, dict): + processed_data[key] = encode_unserializable_types(value) + elif isinstance(value, list): + if all(isinstance(v, bytes) for v in value): + processed_data[key] = [ + base64.urlsafe_b64encode(v).decode('ascii') for v in value + ] + if all(isinstance(v, datetime.datetime) for v in value): + processed_data[key] = [v.isoformat() for v in value] + else: + processed_data[key] = [encode_unserializable_types(v) for v in value] + else: + processed_data[key] = value + return processed_data diff --git a/.venv/lib/python3.12/site-packages/google/genai/_extra_utils.py b/.venv/lib/python3.12/site-packages/google/genai/_extra_utils.py new file mode 100644 index 00000000..db8d377b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/_extra_utils.py @@ -0,0 +1,310 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Extra utils depending on types that are shared between sync and async modules. +""" + +import inspect +import logging +from typing import Any, Callable, Dict, get_args, get_origin, Optional, types as typing_types, Union + +import pydantic + +from . import _common +from . import errors +from . import types + + +_DEFAULT_MAX_REMOTE_CALLS_AFC = 10 + + +def format_destination( + src: str, + config: Optional[types.CreateBatchJobConfigOrDict] = None, +) -> types.CreateBatchJobConfig: + """Formats the destination uri based on the source uri.""" + config = ( + types._CreateBatchJobParameters(config=config).config + or types.CreateBatchJobConfig() + ) + + unique_name = None + if not config.display_name: + unique_name = _common.timestamped_unique_name() + config.display_name = f'genai_batch_job_{unique_name}' + + if not config.dest: + if src.startswith('gs://') and src.endswith('.jsonl'): + # If source uri is "gs://bucket/path/to/src.jsonl", then the destination + # uri prefix will be "gs://bucket/path/to/src/dest". + config.dest = f'{src[:-6]}/dest' + elif src.startswith('bq://'): + # If source uri is "bq://project.dataset.src", then the destination + # uri will be "bq://project.dataset.src_dest_TIMESTAMP_UUID". + unique_name = unique_name or _common.timestamped_unique_name() + config.dest = f'{src}_dest_{unique_name}' + else: + raise ValueError(f'Unsupported source: {src}') + return config + + +def get_function_map( + config: Optional[types.GenerateContentConfigOrDict] = None, +) -> dict[str, object]: + """Returns a function map from the config.""" + config_model = ( + types.GenerateContentConfig(**config) + if config and isinstance(config, dict) + else config + ) + function_map = {} + if not config_model: + return function_map + if config_model.tools: + for tool in config_model.tools: + if callable(tool): + if inspect.iscoroutinefunction(tool): + raise errors.UnsupportedFunctionError( + f'Function {tool.__name__} is a coroutine function, which is not' + ' supported for automatic function calling. Please manually invoke' + f' {tool.__name__} to get the function response.' + ) + function_map[tool.__name__] = tool + return function_map + + +def convert_number_values_for_function_call_args( + args: Union[dict[str, object], list[object], object], +) -> Union[dict[str, object], list[object], object]: + """Converts float values with no decimal to integers.""" + if isinstance(args, float) and args.is_integer(): + return int(args) + if isinstance(args, dict): + return { + key: convert_number_values_for_function_call_args(value) + for key, value in args.items() + } + if isinstance(args, list): + return [ + convert_number_values_for_function_call_args(value) for value in args + ] + return args + + +def _is_annotation_pydantic_model(annotation: Any) -> bool: + return inspect.isclass(annotation) and issubclass( + annotation, pydantic.BaseModel + ) + + +def convert_if_exist_pydantic_model( + value: Any, annotation: Any, param_name: str, func_name: str +) -> Any: + if isinstance(value, dict) and _is_annotation_pydantic_model(annotation): + try: + return annotation(**value) + except pydantic.ValidationError as e: + raise errors.UnknownFunctionCallArgumentError( + f'Failed to parse parameter {param_name} for function' + f' {func_name} from function call part because function call argument' + f' value {value} is not compatible with parameter annotation' + f' {annotation}, due to error {e}' + ) + if isinstance(value, list) and get_origin(annotation) == list: + item_type = get_args(annotation)[0] + return [ + convert_if_exist_pydantic_model(item, item_type, param_name, func_name) + for item in value + ] + if isinstance(value, dict) and get_origin(annotation) == dict: + _, value_type = get_args(annotation) + return { + k: convert_if_exist_pydantic_model(v, value_type, param_name, func_name) + for k, v in value.items() + } + # example 1: typing.Union[int, float] + # example 2: int | float equivalent to typing.types.UnionType[int, float] + if get_origin(annotation) in (Union, typing_types.UnionType): + for arg in get_args(annotation): + if isinstance(value, arg) or ( + isinstance(value, dict) and _is_annotation_pydantic_model(arg) + ): + try: + return convert_if_exist_pydantic_model( + value, arg, param_name, func_name + ) + # do not raise here because there could be multiple pydantic model types + # in the union type. + except pydantic.ValidationError: + continue + # if none of the union type is matched, raise error + raise errors.UnknownFunctionCallArgumentError( + f'Failed to parse parameter {param_name} for function' + f' {func_name} from function call part because function call argument' + f' value {value} cannot be converted to parameter annotation' + f' {annotation}.' + ) + # the only exception for value and annotation type to be different is int and + # float. see convert_number_values_for_function_call_args function for context + if isinstance(value, int) and annotation is float: + return value + if not isinstance(value, annotation): + raise errors.UnknownFunctionCallArgumentError( + f'Failed to parse parameter {param_name} for function {func_name} from' + f' function call part because function call argument value {value} is' + f' not compatible with parameter annotation {annotation}.' + ) + return value + + +def invoke_function_from_dict_args( + args: Dict[str, Any], function_to_invoke: Callable +) -> Any: + signature = inspect.signature(function_to_invoke) + func_name = function_to_invoke.__name__ + converted_args = {} + for param_name, param in signature.parameters.items(): + if param_name in args: + converted_args[param_name] = convert_if_exist_pydantic_model( + args[param_name], + param.annotation, + param_name, + func_name, + ) + try: + return function_to_invoke(**converted_args) + except Exception as e: + raise errors.FunctionInvocationError( + f'Failed to invoke function {func_name} with converted arguments' + f' {converted_args} from model returned function call argument' + f' {args} because of error {e}' + ) + + +def get_function_response_parts( + response: types.GenerateContentResponse, + function_map: dict[str, object], +) -> list[types.Part]: + """Returns the function response parts from the response.""" + func_response_parts = [] + for part in response.candidates[0].content.parts: + if not part.function_call: + continue + func_name = part.function_call.name + func = function_map[func_name] + args = convert_number_values_for_function_call_args(part.function_call.args) + try: + response = {'result': invoke_function_from_dict_args(args, func)} + except Exception as e: # pylint: disable=broad-except + response = {'error': str(e)} + func_response = types.Part.from_function_response(func_name, response) + + func_response_parts.append(func_response) + return func_response_parts + + +def should_disable_afc( + config: Optional[types.GenerateContentConfigOrDict] = None, +) -> bool: + """Returns whether automatic function calling is enabled.""" + config_model = ( + types.GenerateContentConfig(**config) + if config and isinstance(config, dict) + else config + ) + + # If max_remote_calls is less or equal to 0, warn and disable AFC. + if ( + config_model + and config_model.automatic_function_calling + and config_model.automatic_function_calling.maximum_remote_calls + is not None + and int(config_model.automatic_function_calling.maximum_remote_calls) + <= 0 + ): + logging.warning( + 'max_remote_calls in automatic_function_calling_config' + f' {config_model.automatic_function_calling.maximum_remote_calls} is' + ' less than or equal to 0. Disabling automatic function calling.' + ' Please set max_remote_calls to a positive integer.' + ) + return True + + # Default to enable AFC if not specified. + if ( + not config_model + or not config_model.automatic_function_calling + or config_model.automatic_function_calling.disable is None + ): + return False + + if ( + config_model.automatic_function_calling.disable + and config_model.automatic_function_calling.maximum_remote_calls + is not None + and int(config_model.automatic_function_calling.maximum_remote_calls) > 0 + ): + logging.warning( + '`automatic_function_calling.disable` is set to `True`. But' + ' `automatic_function_calling.maximum_remote_calls` is set to be a' + ' positive number' + f' {config_model.automatic_function_calling.maximum_remote_calls}.' + ' Disabling automatic function calling. If you want to enable' + ' automatic function calling, please set' + ' `automatic_function_calling.disable` to `False` or leave it unset,' + ' and set `automatic_function_calling.maximum_remote_calls` to a' + ' positive integer or leave' + ' `automatic_function_calling.maximum_remote_calls` unset.' + ) + + return config_model.automatic_function_calling.disable + + +def get_max_remote_calls_afc( + config: Optional[types.GenerateContentConfigOrDict] = None, +) -> int: + """Returns the remaining remote calls for automatic function calling.""" + if should_disable_afc(config): + raise ValueError( + 'automatic function calling is not enabled, but SDK is trying to get' + ' max remote calls.' + ) + config_model = ( + types.GenerateContentConfig(**config) + if config and isinstance(config, dict) + else config + ) + if ( + not config_model + or not config_model.automatic_function_calling + or config_model.automatic_function_calling.maximum_remote_calls is None + ): + return _DEFAULT_MAX_REMOTE_CALLS_AFC + return int(config_model.automatic_function_calling.maximum_remote_calls) + +def should_append_afc_history( + config: Optional[types.GenerateContentConfigOrDict] = None, +) -> bool: + config_model = ( + types.GenerateContentConfig(**config) + if config and isinstance(config, dict) + else config + ) + if ( + not config_model + or not config_model.automatic_function_calling + ): + return True + return not config_model.automatic_function_calling.ignore_call_history diff --git a/.venv/lib/python3.12/site-packages/google/genai/_replay_api_client.py b/.venv/lib/python3.12/site-packages/google/genai/_replay_api_client.py new file mode 100644 index 00000000..3af2336d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/_replay_api_client.py @@ -0,0 +1,449 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Replay API client.""" + +import base64 +import copy +import datetime +import inspect +import io +import json +import os +import re +from typing import Any, Literal, Optional, Union + +import google.auth +from requests.exceptions import HTTPError + +from . import errors +from ._api_client import ApiClient +from ._api_client import HttpOptions +from ._api_client import HttpRequest +from ._api_client import HttpResponse +from ._common import BaseModel + + +def _redact_version_numbers(version_string: str) -> str: + """Redacts version numbers in the form x.y.z from a string.""" + return re.sub(r'\d+\.\d+\.\d+', '{VERSION_NUMBER}', version_string) + + +def _redact_language_label(language_label: str) -> str: + """Removed because replay requests are used for all languages.""" + return re.sub(r'gl-python/', '{LANGUAGE_LABEL}/', language_label) + + +def _redact_request_headers(headers): + """Redacts headers that should not be recorded.""" + redacted_headers = {} + for header_name, header_value in headers.items(): + if header_name.lower() == 'x-goog-api-key': + redacted_headers[header_name] = '{REDACTED}' + elif header_name.lower() == 'user-agent': + redacted_headers[header_name] = _redact_language_label( + _redact_version_numbers(header_value) + ) + elif header_name.lower() == 'x-goog-api-client': + redacted_headers[header_name] = _redact_language_label( + _redact_version_numbers(header_value) + ) + else: + redacted_headers[header_name] = header_value + return redacted_headers + + +def _redact_request_url(url: str) -> str: + # Redact all the url parts before the resource name, so the test can work + # against any project, location, version, or whether it's EasyGCP. + result = re.sub( + r'.*/projects/[^/]+/locations/[^/]+/', + '{VERTEX_URL_PREFIX}/', + url, + ) + result = re.sub( + r'.*-aiplatform.googleapis.com/[^/]+/', + '{VERTEX_URL_PREFIX}/', + result, + ) + result = re.sub( + r'https://generativelanguage.googleapis.com/[^/]+', + '{MLDEV_URL_PREFIX}', + result, + ) + return result + + +def _redact_project_location_path(path: str) -> str: + # Redact a field in the request that is known to vary based on project and + # location. + if 'projects/' in path and 'locations/' in path: + result = re.sub( + r'projects/[^/]+/locations/[^/]+/', + '{PROJECT_AND_LOCATION_PATH}/', + path, + ) + return result + else: + return path + + +def _redact_request_body(body: dict[str, object]) -> dict[str, object]: + for key, value in body.items(): + if isinstance(value, str): + body[key] = _redact_project_location_path(value) + + +def redact_http_request(http_request: HttpRequest): + http_request.headers = _redact_request_headers(http_request.headers) + http_request.url = _redact_request_url(http_request.url) + _redact_request_body(http_request.data) + + +def _current_file_path_and_line(): + """Prints the current file path and line number.""" + frame = inspect.currentframe().f_back.f_back + filepath = inspect.getfile(frame) + lineno = frame.f_lineno + return f'File: {filepath}, Line: {lineno}' + + +def _debug_print(message: str): + print( + 'DEBUG (test', + os.environ.get('PYTEST_CURRENT_TEST'), + ')', + _current_file_path_and_line(), + ':\n ', + message, + ) + + +class ReplayRequest(BaseModel): + """Represents a single request in a replay.""" + + method: str + url: str + headers: dict[str, str] + body_segments: list[dict[str, object]] + + +class ReplayResponse(BaseModel): + """Represents a single response in a replay.""" + + status_code: int = 200 + headers: dict[str, str] + body_segments: list[dict[str, object]] + byte_segments: Optional[list[bytes]] = None + sdk_response_segments: list[dict[str, object]] + + def model_post_init(self, __context: Any) -> None: + # Remove headers that are not deterministic so the replay files don't change + # every time they are recorded. + self.headers.pop('Date', None) + self.headers.pop('Server-Timing', None) + + +class ReplayInteraction(BaseModel): + """Represents a single interaction, request and response in a replay.""" + + request: ReplayRequest + response: ReplayResponse + + +class ReplayFile(BaseModel): + """Represents a recorded session.""" + + replay_id: str + interactions: list[ReplayInteraction] + + +class ReplayApiClient(ApiClient): + """For integration testing, send recorded response or records a response.""" + + def __init__( + self, + mode: Literal['record', 'replay', 'auto', 'api'], + replay_id: str, + replays_directory: Optional[str] = None, + vertexai: bool = False, + api_key: Optional[str] = None, + credentials: Optional[google.auth.credentials.Credentials] = None, + project: Optional[str] = None, + location: Optional[str] = None, + http_options: Optional[HttpOptions] = None, + ): + super().__init__( + vertexai=vertexai, + api_key=api_key, + credentials=credentials, + project=project, + location=location, + http_options=http_options, + ) + self.replays_directory = replays_directory + if not self.replays_directory: + self.replays_directory = os.environ.get( + 'GOOGLE_GENAI_REPLAYS_DIRECTORY', None + ) + # Valid replay modes are replay-only or record-and-replay. + self.replay_session = None + self._mode = mode + self._replay_id = replay_id + + def initialize_replay_session(self, replay_id: str): + self._replay_id = replay_id + self._initialize_replay_session() + + def _get_replay_file_path(self): + return self._generate_file_path_from_replay_id( + self.replays_directory, self._replay_id + ) + + def _should_call_api(self): + return self._mode in ['record', 'api'] or ( + self._mode == 'auto' + and not os.path.isfile(self._get_replay_file_path()) + ) + + def _should_update_replay(self): + return self._should_call_api() and self._mode != 'api' + + def _initialize_replay_session_if_not_loaded(self): + if not self.replay_session: + self._initialize_replay_session() + + def _initialize_replay_session(self): + _debug_print('Test is using replay id: ' + self._replay_id) + self._replay_index = 0 + self._sdk_response_index = 0 + replay_file_path = self._get_replay_file_path() + # This should not be triggered from the constructor. + replay_file_exists = os.path.isfile(replay_file_path) + if self._mode == 'replay' and not replay_file_exists: + raise ValueError( + 'Replay files do not exist for replay id: ' + self._replay_id + ) + + if self._mode in ['replay', 'auto'] and replay_file_exists: + with open(replay_file_path, 'r') as f: + self.replay_session = ReplayFile.model_validate(json.loads(f.read())) + + if self._should_update_replay(): + self.replay_session = ReplayFile( + replay_id=self._replay_id, interactions=[] + ) + + def _generate_file_path_from_replay_id(self, replay_directory, replay_id): + session_parts = replay_id.split('/') + if len(session_parts) < 3: + raise ValueError( + f'{replay_id}: Session ID must be in the format of' + ' module/function/[vertex|mldev]' + ) + if replay_directory is None: + path_parts = [] + else: + path_parts = [replay_directory] + path_parts.extend(session_parts) + return os.path.join(*path_parts) + '.json' + + def close(self): + if not self._should_update_replay() or not self.replay_session: + return + replay_file_path = self._get_replay_file_path() + os.makedirs(os.path.dirname(replay_file_path), exist_ok=True) + with open(replay_file_path, 'w') as f: + f.write(self.replay_session.model_dump_json(exclude_unset=True, indent=2)) + self.replay_session = None + + def _record_interaction( + self, + http_request: HttpRequest, + http_response: Union[HttpResponse, errors.APIError, bytes], + ): + if not self._should_update_replay(): + return + redact_http_request(http_request) + request = ReplayRequest( + method=http_request.method, + url=http_request.url, + headers=http_request.headers, + body_segments=[http_request.data], + ) + if isinstance(http_response, HttpResponse): + response = ReplayResponse( + headers=dict(http_response.headers), + body_segments=list(http_response.segments()), + byte_segments=[ + seg[:100] + b'...' for seg in http_response.byte_segments() + ], + status_code=http_response.status_code, + sdk_response_segments=[], + ) + else: + response = ReplayResponse( + headers=dict(http_response.response.headers), + body_segments=[http_response._to_replay_record()], + status_code=http_response.code, + sdk_response_segments=[], + ) + self.replay_session.interactions.append( + ReplayInteraction(request=request, response=response) + ) + + def _match_request( + self, + http_request: HttpRequest, + interaction: ReplayInteraction, + ): + assert http_request.url == interaction.request.url + assert http_request.headers == interaction.request.headers, ( + 'Request headers mismatch:\n' + f'Actual: {http_request.headers}\n' + f'Expected: {interaction.request.headers}' + ) + assert http_request.method == interaction.request.method + + # Sanitize the request body, rewrite any fields that vary. + request_data_copy = copy.deepcopy(http_request.data) + # Both the request and recorded request must be redacted before comparing + # so that the comparison is fair. + _redact_request_body(request_data_copy) + + actual_request_body = [request_data_copy] + expected_request_body = interaction.request.body_segments + assert actual_request_body == expected_request_body, ( + 'Request body mismatch:\n' + f'Actual: {actual_request_body}\n' + f'Expected: {expected_request_body}' + ) + + def _build_response_from_replay(self, http_request: HttpRequest): + redact_http_request(http_request) + + interaction = self.replay_session.interactions[self._replay_index] + # Replay is on the right side of the assert so the diff makes more sense. + self._match_request(http_request, interaction) + self._replay_index += 1 + self._sdk_response_index = 0 + errors.APIError.raise_for_response(interaction.response) + return HttpResponse( + headers=interaction.response.headers, + response_stream=[ + json.dumps(segment) + for segment in interaction.response.body_segments + ], + byte_stream=interaction.response.byte_segments, + ) + + def _verify_response(self, response_model: BaseModel): + if self._mode == 'api': + return + # replay_index is advanced in _build_response_from_replay, so we need to -1. + interaction = self.replay_session.interactions[self._replay_index - 1] + if self._should_update_replay(): + if isinstance(response_model, list): + response_model = response_model[0] + interaction.response.sdk_response_segments.append( + response_model.model_dump(exclude_none=True) + ) + return + + if isinstance(response_model, list): + response_model = response_model[0] + print('response_model: ', response_model.model_dump(exclude_none=True)) + actual = response_model.model_dump(exclude_none=True, mode='json') + expected = interaction.response.sdk_response_segments[ + self._sdk_response_index + ] + assert ( + actual == expected + ), f'SDK response mismatch:\nActual: {actual}\nExpected: {expected}' + self._sdk_response_index += 1 + + def _request( + self, + http_request: HttpRequest, + stream: bool = False, + ) -> HttpResponse: + self._initialize_replay_session_if_not_loaded() + if self._should_call_api(): + _debug_print('api mode request: %s' % http_request) + try: + result = super()._request(http_request, stream) + except errors.APIError as e: + self._record_interaction(http_request, e) + raise e + if stream: + result_segments = [] + for segment in result.segments(): + result_segments.append(json.dumps(segment)) + result = HttpResponse(result.headers, result_segments) + self._record_interaction(http_request, result) + # Need to return a RecordedResponse that rebuilds the response + # segments since the stream has been consumed. + else: + self._record_interaction(http_request, result) + _debug_print('api mode result: %s' % result.text) + return result + else: + return self._build_response_from_replay(http_request) + + def upload_file(self, file_path: Union[str, io.IOBase], upload_url: str, upload_size: int): + if isinstance(file_path, io.IOBase): + offset = file_path.tell() + content = file_path.read() + file_path.seek(offset, os.SEEK_SET) + request = HttpRequest( + method='POST', + url='', + data={'bytes': base64.b64encode(content).decode('utf-8')}, + headers={} + ) + else: + request = HttpRequest( + method='POST', url='', data={'file_path': file_path}, headers={} + ) + if self._should_call_api(): + try: + result = super().upload_file(file_path, upload_url, upload_size) + except HTTPError as e: + result = HttpResponse( + e.response.headers, [json.dumps({'reason': e.response.reason})] + ) + result.status_code = e.response.status_code + raise e + self._record_interaction(request, HttpResponse({}, [json.dumps(result)])) + return result + else: + return self._build_response_from_replay(request).text + + def _download_file_request(self, request): + self._initialize_replay_session_if_not_loaded() + if self._should_call_api(): + try: + result = super()._download_file_request(request) + except HTTPError as e: + result = HttpResponse( + e.response.headers, [json.dumps({'reason': e.response.reason})] + ) + result.status_code = e.response.status_code + raise e + self._record_interaction(request, result) + return result + else: + return self._build_response_from_replay(request) + diff --git a/.venv/lib/python3.12/site-packages/google/genai/_test_api_client.py b/.venv/lib/python3.12/site-packages/google/genai/_test_api_client.py new file mode 100644 index 00000000..3d3bf3e2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/_test_api_client.py @@ -0,0 +1,149 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import asyncio +import time +from unittest.mock import MagicMock, patch +import pytest +from .api_client import ApiClient + + +@patch('genai.api_client.ApiClient._build_request') +@patch('genai.api_client.ApiClient._request') +def test_request_streamed_non_blocking(mock_request, mock_build_request): + api_client = ApiClient(api_key='test_api_key') + http_method = 'GET' + path = 'test/path' + request_dict = {'key': 'value'} + + mock_http_request = MagicMock() + mock_build_request.return_value = mock_http_request + + def delayed_segments(): + chunks = ['{"chunk": 1}', '{"chunk": 2}', '{"chunk": 3}'] + for chunk in chunks: + time.sleep(0.1) # 100ms delay + yield chunk + + mock_response = MagicMock() + mock_response.segments.side_effect = delayed_segments + mock_request.return_value = mock_response + + chunks = [] + start_time = time.time() + for chunk in api_client.request_streamed(http_method, path, request_dict): + chunks.append(chunk) + assert len(chunks) <= 3 + end_time = time.time() + + mock_build_request.assert_called_once_with( + http_method, path, request_dict, None + ) + mock_request.assert_called_once_with(mock_http_request, stream=True) + assert chunks == ['{"chunk": 1}', '{"chunk": 2}', '{"chunk": 3}'] + assert end_time - start_time > 0.3 + + +@patch('genai.api_client.ApiClient._build_request') +@patch('genai.api_client.ApiClient._async_request') +@pytest.mark.asyncio +async def test_async_request(mock_async_request, mock_build_request): + api_client = ApiClient(api_key='test_api_key') + http_method = 'GET' + path = 'test/path' + request_dict = {'key': 'value'} + + mock_http_request = MagicMock() + mock_build_request.return_value = mock_http_request + + class MockResponse: + + def __init__(self, text): + self.text = text + + async def delayed_response(http_request, stream): + await asyncio.sleep(0.1) # 100ms delay + return MockResponse('value') + + mock_async_request.side_effect = delayed_response + + async_coroutine1 = api_client.async_request(http_method, path, request_dict) + async_coroutine2 = api_client.async_request(http_method, path, request_dict) + async_coroutine3 = api_client.async_request(http_method, path, request_dict) + + start_time = time.time() + results = await asyncio.gather( + async_coroutine1, async_coroutine2, async_coroutine3 + ) + end_time = time.time() + + mock_build_request.assert_called_with(http_method, path, request_dict, None) + assert mock_build_request.call_count == 3 + mock_async_request.assert_called_with( + http_request=mock_http_request, stream=False + ) + assert mock_async_request.call_count == 3 + assert results == ['value', 'value', 'value'] + assert 0.1 <= end_time - start_time < 0.15 + + +@patch('genai.api_client.ApiClient._build_request') +@patch('genai.api_client.ApiClient._async_request') +@pytest.mark.asyncio +async def test_async_request_streamed_non_blocking( + mock_async_request, mock_build_request +): + api_client = ApiClient(api_key='test_api_key') + http_method = 'GET' + path = 'test/path' + request_dict = {'key': 'value'} + + mock_http_request = MagicMock() + mock_build_request.return_value = mock_http_request + + class MockResponse: + + def __init__(self, segments): + self._segments = segments + + # should mock async generator here but source code combines sync and async streaming in one segment method. + # TODO: fix the above + def segments(self): + for segment in self._segments: + time.sleep(0.1) # 100ms delay + yield segment + + async def delayed_response(http_request, stream): + return MockResponse(['{"chunk": 1}', '{"chunk": 2}', '{"chunk": 3}']) + + mock_async_request.side_effect = delayed_response + + chunks = [] + start_time = time.time() + async for chunk in api_client.async_request_streamed( + http_method, path, request_dict + ): + chunks.append(chunk) + assert len(chunks) <= 3 + end_time = time.time() + + mock_build_request.assert_called_once_with( + http_method, path, request_dict, None + ) + mock_async_request.assert_called_once_with( + http_request=mock_http_request, stream=True + ) + assert chunks == ['{"chunk": 1}', '{"chunk": 2}', '{"chunk": 3}'] + assert end_time - start_time > 0.3 diff --git a/.venv/lib/python3.12/site-packages/google/genai/_transformers.py b/.venv/lib/python3.12/site-packages/google/genai/_transformers.py new file mode 100644 index 00000000..f1b392e9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/_transformers.py @@ -0,0 +1,621 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Transformers for Google GenAI SDK.""" + +import base64 +from collections.abc import Iterable, Mapping +import inspect +import io +import re +import time +import typing +from typing import Any, GenericAlias, Optional, Union + +import PIL.Image +import PIL.PngImagePlugin +import pydantic + +from . import _api_client +from . import types + + +def _resource_name( + client: _api_client.ApiClient, + resource_name: str, + *, + collection_identifier: str, + collection_hierarchy_depth: int = 2, +): + # pylint: disable=line-too-long + """Prepends resource name with project, location, collection_identifier if needed. + + The collection_identifier will only be prepended if it's not present + and the prepending won't violate the collection hierarchy depth. + When the prepending condition doesn't meet, returns the input + resource_name. + + Args: + client: The API client. + resource_name: The user input resource name to be completed. + collection_identifier: The collection identifier to be prepended. See + collection identifiers in https://google.aip.dev/122. + collection_hierarchy_depth: The collection hierarchy depth. Only set this + field when the resource has nested collections. For example, + `users/vhugo1802/events/birthday-dinner-226`, the collection_identifier is + `users` and collection_hierarchy_depth is 4. See nested collections in + https://google.aip.dev/122. + + Example: + + resource_name = 'cachedContents/123' + client.vertexai = True + client.project = 'bar' + client.location = 'us-west1' + _resource_name(client, 'cachedContents/123', + collection_identifier='cachedContents') + returns: 'projects/bar/locations/us-west1/cachedContents/123' + + Example: + + resource_name = 'projects/foo/locations/us-central1/cachedContents/123' + # resource_name = 'locations/us-central1/cachedContents/123' + client.vertexai = True + client.project = 'bar' + client.location = 'us-west1' + _resource_name(client, resource_name, + collection_identifier='cachedContents') + returns: 'projects/foo/locations/us-central1/cachedContents/123' + + Example: + + resource_name = '123' + # resource_name = 'cachedContents/123' + client.vertexai = False + _resource_name(client, resource_name, + collection_identifier='cachedContents') + returns 'cachedContents/123' + + Example: + resource_name = 'some/wrong/cachedContents/resource/name/123' + resource_prefix = 'cachedContents' + client.vertexai = False + # client.vertexai = True + _resource_name(client, resource_name, + collection_identifier='cachedContents') + returns: 'some/wrong/cachedContents/resource/name/123' + + Returns: + The completed resource name. + """ + should_prepend_collection_identifier = ( + not resource_name.startswith(f'{collection_identifier}/') + # Check if prepending the collection identifier won't violate the + # collection hierarchy depth. + and f'{collection_identifier}/{resource_name}'.count('/') + 1 + == collection_hierarchy_depth + ) + if client.vertexai: + if resource_name.startswith('projects/'): + return resource_name + elif resource_name.startswith('locations/'): + return f'projects/{client.project}/{resource_name}' + elif resource_name.startswith(f'{collection_identifier}/'): + return f'projects/{client.project}/locations/{client.location}/{resource_name}' + elif should_prepend_collection_identifier: + return f'projects/{client.project}/locations/{client.location}/{collection_identifier}/{resource_name}' + else: + return resource_name + else: + if should_prepend_collection_identifier: + return f'{collection_identifier}/{resource_name}' + else: + return resource_name + + +def t_model(client: _api_client.ApiClient, model: str): + if not model: + raise ValueError('model is required.') + if client.vertexai: + if ( + model.startswith('projects/') + or model.startswith('models/') + or model.startswith('publishers/') + ): + return model + elif '/' in model: + publisher, model_id = model.split('/', 1) + return f'publishers/{publisher}/models/{model_id}' + else: + return f'publishers/google/models/{model}' + else: + if model.startswith('models/'): + return model + elif model.startswith('tunedModels/'): + return model + else: + return f'models/{model}' + + +def t_models_url(api_client: _api_client.ApiClient, base_models: bool) -> str: + if api_client.vertexai: + if base_models: + return 'publishers/google/models' + else: + return 'models' + else: + if base_models: + return 'models' + else: + return 'tunedModels' + + +def t_extract_models( + api_client: _api_client.ApiClient, response: dict +) -> list[types.Model]: + if not response: + return [] + elif response.get('models') is not None: + return response.get('models') + elif response.get('tunedModels') is not None: + return response.get('tunedModels') + elif response.get('publisherModels') is not None: + return response.get('publisherModels') + else: + raise ValueError('Cannot determine the models type.') + + +def t_caches_model(api_client: _api_client.ApiClient, model: str): + model = t_model(api_client, model) + if not model: + return None + if model.startswith('publishers/') and api_client.vertexai: + # vertex caches only support model name start with projects. + return ( + f'projects/{api_client.project}/locations/{api_client.location}/{model}' + ) + elif model.startswith('models/') and api_client.vertexai: + return f'projects/{api_client.project}/locations/{api_client.location}/publishers/google/{model}' + else: + return model + + +def pil_to_blob(img): + bytesio = io.BytesIO() + if isinstance(img, PIL.PngImagePlugin.PngImageFile) or img.mode == 'RGBA': + img.save(bytesio, format='PNG') + mime_type = 'image/png' + else: + img.save(bytesio, format='JPEG') + mime_type = 'image/jpeg' + bytesio.seek(0) + data = bytesio.read() + return types.Blob(mime_type=mime_type, data=data) + + +PartType = Union[types.Part, types.PartDict, str, PIL.Image.Image] + + +def t_part(client: _api_client.ApiClient, part: PartType) -> types.Part: + if not part: + raise ValueError('content part is required.') + if isinstance(part, str): + return types.Part(text=part) + if isinstance(part, PIL.Image.Image): + return types.Part(inline_data=pil_to_blob(part)) + if isinstance(part, types.File): + if not part.uri or not part.mime_type: + raise ValueError('file uri and mime_type are required.') + return types.Part.from_uri(part.uri, part.mime_type) + else: + return part + + +def t_parts( + client: _api_client.ApiClient, parts: Union[list, PartType] +) -> list[types.Part]: + if parts is None: + raise ValueError('content parts are required.') + if isinstance(parts, list): + return [t_part(client, part) for part in parts] + else: + return [t_part(client, parts)] + + +def t_image_predictions( + client: _api_client.ApiClient, + predictions: Optional[Iterable[Mapping[str, Any]]], +) -> list[types.GeneratedImage]: + if not predictions: + return None + images = [] + for prediction in predictions: + if prediction.get('image'): + images.append( + types.GeneratedImage( + image=types.Image( + gcs_uri=prediction['image']['gcsUri'], + image_bytes=prediction['image']['imageBytes'], + ) + ) + ) + return images + + +ContentType = Union[types.Content, types.ContentDict, PartType] + + +def t_content( + client: _api_client.ApiClient, + content: ContentType, +): + if not content: + raise ValueError('content is required.') + if isinstance(content, types.Content): + return content + if isinstance(content, dict): + return types.Content.model_validate(content) + return types.Content(role='user', parts=t_parts(client, content)) + + +def t_contents_for_embed( + client: _api_client.ApiClient, + contents: Union[list[types.Content], list[types.ContentDict], ContentType], +): + if client.vertexai and isinstance(contents, list): + # TODO: Assert that only text is supported. + return [t_content(client, content).parts[0].text for content in contents] + elif client.vertexai: + return [t_content(client, contents).parts[0].text] + elif isinstance(contents, list): + return [t_content(client, content) for content in contents] + else: + return [t_content(client, contents)] + + +def t_contents( + client: _api_client.ApiClient, + contents: Union[list[types.Content], list[types.ContentDict], ContentType], +): + if not contents: + raise ValueError('contents are required.') + if isinstance(contents, list): + return [t_content(client, content) for content in contents] + else: + return [t_content(client, contents)] + + +def process_schema( + data: dict[str, Any], client: Optional[_api_client.ApiClient] = None +): + if isinstance(data, dict): + # Iterate over a copy of keys to allow deletion + for key in list(data.keys()): + # Only delete 'title'for the Gemini API + if client and not client.vertexai and key == 'title': + del data[key] + else: + process_schema(data[key], client) + elif isinstance(data, list): + for item in data: + process_schema(item, client) + + return data + + +def _build_schema(fname: str, fields_dict: dict[str, Any]) -> dict[str, Any]: + parameters = pydantic.create_model(fname, **fields_dict).model_json_schema() + defs = parameters.pop('$defs', {}) + + for _, value in defs.items(): + unpack_defs(value, defs) + + unpack_defs(parameters, defs) + return parameters['properties']['dummy'] + + +def unpack_defs(schema: dict[str, Any], defs: dict[str, Any]): + """Unpacks the $defs values in the schema generated by pydantic so they can be understood by the API. + + Example of a schema before and after unpacking: + Before: + + `schema` + + {'properties': { + 'dummy': { + 'items': { + '$ref': '#/$defs/CountryInfo' + }, + 'title': 'Dummy', + 'type': 'array' + } + }, + 'required': ['dummy'], + 'title': 'dummy', + 'type': 'object'} + + `defs` + + {'CountryInfo': {'properties': {'continent': {'title': 'Continent', 'type': + 'string'}, 'gdp': {'title': 'Gdp', 'type': 'integer'}}, 'required': + ['continent', 'gdp'], 'title': 'CountryInfo', 'type': 'object'}} + + After: + + `schema` + {'properties': { + 'continent': {'title': 'Continent', 'type': 'string'}, + 'gdp': {'title': 'Gdp', 'type': 'integer'} + }, + 'required': ['continent', 'gdp'], + 'title': 'CountryInfo', + 'type': 'object' + } + """ + properties = schema.get('properties', None) + if properties is None: + return + + for name, value in properties.items(): + ref_key = value.get('$ref', None) + if ref_key is not None: + ref = defs[ref_key.split('defs/')[-1]] + unpack_defs(ref, defs) + properties[name] = ref + continue + + anyof = value.get('anyOf', None) + if anyof is not None: + for i, atype in enumerate(anyof): + ref_key = atype.get('$ref', None) + if ref_key is not None: + ref = defs[ref_key.split('defs/')[-1]] + unpack_defs(ref, defs) + anyof[i] = ref + continue + + items = value.get('items', None) + if items is not None: + ref_key = items.get('$ref', None) + if ref_key is not None: + ref = defs[ref_key.split('defs/')[-1]] + unpack_defs(ref, defs) + value['items'] = ref + continue + + +def t_schema( + client: _api_client.ApiClient, origin: Union[types.SchemaUnionDict, Any] +) -> Optional[types.Schema]: + if not origin: + return None + if isinstance(origin, dict): + return process_schema(origin, client) + if isinstance(origin, types.Schema): + if dict(origin) == dict(types.Schema()): + # response_schema value was coerced to an empty Schema instance because it did not adhere to the Schema field annotation + raise ValueError(f'Unsupported schema type.') + schema = process_schema(origin.model_dump(exclude_unset=True), client) + return types.Schema.model_validate(schema) + if isinstance(origin, GenericAlias): + if origin.__origin__ is list: + if isinstance(origin.__args__[0], typing.types.UnionType): + raise ValueError(f'Unsupported schema type: GenericAlias {origin}') + if issubclass(origin.__args__[0], pydantic.BaseModel): + # Handle cases where response schema is `list[pydantic.BaseModel]` + list_schema = _build_schema( + 'dummy', {'dummy': (origin, pydantic.Field())} + ) + list_schema = process_schema(list_schema, client) + return types.Schema.model_validate(list_schema) + raise ValueError(f'Unsupported schema type: GenericAlias {origin}') + if issubclass(origin, pydantic.BaseModel): + schema = process_schema(origin.model_json_schema(), client) + return types.Schema.model_validate(schema) + raise ValueError(f'Unsupported schema type: {origin}') + + +def t_speech_config( + _: _api_client.ApiClient, origin: Union[types.SpeechConfigUnionDict, Any] +) -> Optional[types.SpeechConfig]: + if not origin: + return None + if isinstance(origin, types.SpeechConfig): + return origin + if isinstance(origin, str): + return types.SpeechConfig( + voice_config=types.VoiceConfig( + prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name=origin) + ) + ) + if ( + isinstance(origin, dict) + and 'voice_config' in origin + and 'prebuilt_voice_config' in origin['voice_config'] + ): + return types.SpeechConfig( + voice_config=types.VoiceConfig( + prebuilt_voice_config=types.PrebuiltVoiceConfig( + voice_name=origin['voice_config']['prebuilt_voice_config'].get( + 'voice_name' + ) + ) + ) + ) + raise ValueError(f'Unsupported speechConfig type: {type(origin)}') + + +def t_tool(client: _api_client.ApiClient, origin) -> types.Tool: + if not origin: + return None + if inspect.isfunction(origin) or inspect.ismethod(origin): + return types.Tool( + function_declarations=[ + types.FunctionDeclaration.from_callable(client, origin) + ] + ) + else: + return origin + + +# Only support functions now. +def t_tools( + client: _api_client.ApiClient, origin: list[Any] +) -> list[types.Tool]: + if not origin: + return [] + function_tool = types.Tool(function_declarations=[]) + tools = [] + for tool in origin: + transformed_tool = t_tool(client, tool) + # All functions should be merged into one tool. + if transformed_tool.function_declarations: + function_tool.function_declarations += ( + transformed_tool.function_declarations + ) + else: + tools.append(transformed_tool) + if function_tool.function_declarations: + tools.append(function_tool) + return tools + + +def t_cached_content_name(client: _api_client.ApiClient, name: str): + return _resource_name(client, name, collection_identifier='cachedContents') + + +def t_batch_job_source(client: _api_client.ApiClient, src: str): + if src.startswith('gs://'): + return types.BatchJobSource( + format='jsonl', + gcs_uri=[src], + ) + elif src.startswith('bq://'): + return types.BatchJobSource( + format='bigquery', + bigquery_uri=src, + ) + else: + raise ValueError(f'Unsupported source: {src}') + + +def t_batch_job_destination(client: _api_client.ApiClient, dest: str): + if dest.startswith('gs://'): + return types.BatchJobDestination( + format='jsonl', + gcs_uri=dest, + ) + elif dest.startswith('bq://'): + return types.BatchJobDestination( + format='bigquery', + bigquery_uri=dest, + ) + else: + raise ValueError(f'Unsupported destination: {dest}') + + +def t_batch_job_name(client: _api_client.ApiClient, name: str): + if not client.vertexai: + return name + + pattern = r'^projects/[^/]+/locations/[^/]+/batchPredictionJobs/[^/]+$' + if re.match(pattern, name): + return name.split('/')[-1] + elif name.isdigit(): + return name + else: + raise ValueError(f'Invalid batch job name: {name}.') + + +LRO_POLLING_INITIAL_DELAY_SECONDS = 1.0 +LRO_POLLING_MAXIMUM_DELAY_SECONDS = 20.0 +LRO_POLLING_TIMEOUT_SECONDS = 900.0 +LRO_POLLING_MULTIPLIER = 1.5 + + +def t_resolve_operation(api_client: _api_client.ApiClient, struct: dict): + if (name := struct.get('name')) and '/operations/' in name: + operation: dict[str, Any] = struct + total_seconds = 0.0 + delay_seconds = LRO_POLLING_INITIAL_DELAY_SECONDS + while operation.get('done') != True: + if total_seconds > LRO_POLLING_TIMEOUT_SECONDS: + raise RuntimeError(f'Operation {name} timed out.\n{operation}') + # TODO(b/374433890): Replace with LRO module once it's available. + operation: dict[str, Any] = api_client.request( + http_method='GET', path=name, request_dict={} + ) + time.sleep(delay_seconds) + total_seconds += total_seconds + # Exponential backoff + delay_seconds = min( + delay_seconds * LRO_POLLING_MULTIPLIER, + LRO_POLLING_MAXIMUM_DELAY_SECONDS, + ) + if error := operation.get('error'): + raise RuntimeError( + f'Operation {name} failed with error: {error}.\n{operation}' + ) + return operation.get('response') + else: + return struct + + +def t_file_name( + api_client: _api_client.ApiClient, name: Union[str, types.File] +): + # Remove the files/ prefix since it's added to the url path. + if isinstance(name, types.File): + name = name.name + + if name is None: + raise ValueError('File name is required.') + + if name.startswith('https://'): + suffix = name.split('files/')[1] + match = re.match('[a-z0-9]+', suffix) + if match is None: + raise ValueError(f'Could not extract file name from URI: {name}') + name = match.group(0) + elif name.startswith('files/'): + name = name.split('files/')[1] + + return name + + +def t_tuning_job_status( + api_client: _api_client.ApiClient, status: str +) -> types.JobState: + if status == 'STATE_UNSPECIFIED': + return 'JOB_STATE_UNSPECIFIED' + elif status == 'CREATING': + return 'JOB_STATE_RUNNING' + elif status == 'ACTIVE': + return 'JOB_STATE_SUCCEEDED' + elif status == 'FAILED': + return 'JOB_STATE_FAILED' + else: + return status + + +# Some fields don't accept url safe base64 encoding. +# We shouldn't use this transformer if the backend adhere to Cloud Type +# format https://cloud.google.com/docs/discovery/type-format. +# TODO(b/389133914,b/390320301): Remove the hack after backend fix the issue. +def t_bytes(api_client: _api_client.ApiClient, data: bytes) -> str: + if not isinstance(data, bytes): + return data + return base64.b64encode(data).decode('ascii') diff --git a/.venv/lib/python3.12/site-packages/google/genai/batches.py b/.venv/lib/python3.12/site-packages/google/genai/batches.py new file mode 100644 index 00000000..6dd5b4c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/batches.py @@ -0,0 +1,1293 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Code generated by the Google Gen AI SDK generator DO NOT EDIT. + +from typing import Optional, Union +from urllib.parse import urlencode +from . import _common +from . import _extra_utils +from . import _transformers as t +from . import types +from ._api_client import ApiClient +from ._common import get_value_by_path as getv +from ._common import set_value_by_path as setv +from .pagers import AsyncPager, Pager + + +def _BatchJobSource_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['format']) is not None: + raise ValueError('format parameter is not supported in Google AI.') + + if getv(from_object, ['gcs_uri']) is not None: + raise ValueError('gcs_uri parameter is not supported in Google AI.') + + if getv(from_object, ['bigquery_uri']) is not None: + raise ValueError('bigquery_uri parameter is not supported in Google AI.') + + return to_object + + +def _BatchJobSource_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['format']) is not None: + setv(to_object, ['instancesFormat'], getv(from_object, ['format'])) + + if getv(from_object, ['gcs_uri']) is not None: + setv(to_object, ['gcsSource', 'uris'], getv(from_object, ['gcs_uri'])) + + if getv(from_object, ['bigquery_uri']) is not None: + setv( + to_object, + ['bigquerySource', 'inputUri'], + getv(from_object, ['bigquery_uri']), + ) + + return to_object + + +def _BatchJobDestination_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['format']) is not None: + raise ValueError('format parameter is not supported in Google AI.') + + if getv(from_object, ['gcs_uri']) is not None: + raise ValueError('gcs_uri parameter is not supported in Google AI.') + + if getv(from_object, ['bigquery_uri']) is not None: + raise ValueError('bigquery_uri parameter is not supported in Google AI.') + + return to_object + + +def _BatchJobDestination_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['format']) is not None: + setv(to_object, ['predictionsFormat'], getv(from_object, ['format'])) + + if getv(from_object, ['gcs_uri']) is not None: + setv( + to_object, + ['gcsDestination', 'outputUriPrefix'], + getv(from_object, ['gcs_uri']), + ) + + if getv(from_object, ['bigquery_uri']) is not None: + setv( + to_object, + ['bigqueryDestination', 'outputUri'], + getv(from_object, ['bigquery_uri']), + ) + + return to_object + + +def _CreateBatchJobConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['display_name']) is not None: + setv(parent_object, ['displayName'], getv(from_object, ['display_name'])) + + if getv(from_object, ['dest']) is not None: + raise ValueError('dest parameter is not supported in Google AI.') + + return to_object + + +def _CreateBatchJobConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['display_name']) is not None: + setv(parent_object, ['displayName'], getv(from_object, ['display_name'])) + + if getv(from_object, ['dest']) is not None: + setv( + parent_object, + ['outputConfig'], + _BatchJobDestination_to_vertex( + api_client, + t.t_batch_job_destination(api_client, getv(from_object, ['dest'])), + to_object, + ), + ) + + return to_object + + +def _CreateBatchJobParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + raise ValueError('model parameter is not supported in Google AI.') + + if getv(from_object, ['src']) is not None: + raise ValueError('src parameter is not supported in Google AI.') + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CreateBatchJobConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _CreateBatchJobParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['src']) is not None: + setv( + to_object, + ['inputConfig'], + _BatchJobSource_to_vertex( + api_client, + t.t_batch_job_source(api_client, getv(from_object, ['src'])), + to_object, + ), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CreateBatchJobConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _GetBatchJobConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _GetBatchJobConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _GetBatchJobParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + raise ValueError('name parameter is not supported in Google AI.') + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _GetBatchJobConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _GetBatchJobParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_batch_job_name(api_client, getv(from_object, ['name'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _GetBatchJobConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _CancelBatchJobConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _CancelBatchJobConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _CancelBatchJobParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + raise ValueError('name parameter is not supported in Google AI.') + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CancelBatchJobConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _CancelBatchJobParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_batch_job_name(api_client, getv(from_object, ['name'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CancelBatchJobConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _ListBatchJobConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['page_size']) is not None: + setv( + parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size']) + ) + + if getv(from_object, ['page_token']) is not None: + setv( + parent_object, + ['_query', 'pageToken'], + getv(from_object, ['page_token']), + ) + + if getv(from_object, ['filter']) is not None: + raise ValueError('filter parameter is not supported in Google AI.') + + return to_object + + +def _ListBatchJobConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['page_size']) is not None: + setv( + parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size']) + ) + + if getv(from_object, ['page_token']) is not None: + setv( + parent_object, + ['_query', 'pageToken'], + getv(from_object, ['page_token']), + ) + + if getv(from_object, ['filter']) is not None: + setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter'])) + + return to_object + + +def _ListBatchJobParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['config']) is not None: + raise ValueError('config parameter is not supported in Google AI.') + + return to_object + + +def _ListBatchJobParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _ListBatchJobConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _DeleteBatchJobParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + raise ValueError('name parameter is not supported in Google AI.') + + return to_object + + +def _DeleteBatchJobParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_batch_job_name(api_client, getv(from_object, ['name'])), + ) + + return to_object + + +def _JobError_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _JobError_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['details']) is not None: + setv(to_object, ['details'], getv(from_object, ['details'])) + + if getv(from_object, ['code']) is not None: + setv(to_object, ['code'], getv(from_object, ['code'])) + + if getv(from_object, ['message']) is not None: + setv(to_object, ['message'], getv(from_object, ['message'])) + + return to_object + + +def _BatchJobSource_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _BatchJobSource_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['instancesFormat']) is not None: + setv(to_object, ['format'], getv(from_object, ['instancesFormat'])) + + if getv(from_object, ['gcsSource', 'uris']) is not None: + setv(to_object, ['gcs_uri'], getv(from_object, ['gcsSource', 'uris'])) + + if getv(from_object, ['bigquerySource', 'inputUri']) is not None: + setv( + to_object, + ['bigquery_uri'], + getv(from_object, ['bigquerySource', 'inputUri']), + ) + + return to_object + + +def _BatchJobDestination_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _BatchJobDestination_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['predictionsFormat']) is not None: + setv(to_object, ['format'], getv(from_object, ['predictionsFormat'])) + + if getv(from_object, ['gcsDestination', 'outputUriPrefix']) is not None: + setv( + to_object, + ['gcs_uri'], + getv(from_object, ['gcsDestination', 'outputUriPrefix']), + ) + + if getv(from_object, ['bigqueryDestination', 'outputUri']) is not None: + setv( + to_object, + ['bigquery_uri'], + getv(from_object, ['bigqueryDestination', 'outputUri']), + ) + + return to_object + + +def _BatchJob_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _BatchJob_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['displayName']) is not None: + setv(to_object, ['display_name'], getv(from_object, ['displayName'])) + + if getv(from_object, ['state']) is not None: + setv(to_object, ['state'], getv(from_object, ['state'])) + + if getv(from_object, ['error']) is not None: + setv( + to_object, + ['error'], + _JobError_from_vertex( + api_client, getv(from_object, ['error']), to_object + ), + ) + + if getv(from_object, ['createTime']) is not None: + setv(to_object, ['create_time'], getv(from_object, ['createTime'])) + + if getv(from_object, ['startTime']) is not None: + setv(to_object, ['start_time'], getv(from_object, ['startTime'])) + + if getv(from_object, ['endTime']) is not None: + setv(to_object, ['end_time'], getv(from_object, ['endTime'])) + + if getv(from_object, ['updateTime']) is not None: + setv(to_object, ['update_time'], getv(from_object, ['updateTime'])) + + if getv(from_object, ['model']) is not None: + setv(to_object, ['model'], getv(from_object, ['model'])) + + if getv(from_object, ['inputConfig']) is not None: + setv( + to_object, + ['src'], + _BatchJobSource_from_vertex( + api_client, getv(from_object, ['inputConfig']), to_object + ), + ) + + if getv(from_object, ['outputConfig']) is not None: + setv( + to_object, + ['dest'], + _BatchJobDestination_from_vertex( + api_client, getv(from_object, ['outputConfig']), to_object + ), + ) + + return to_object + + +def _ListBatchJobResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['nextPageToken']) is not None: + setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken'])) + + return to_object + + +def _ListBatchJobResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['nextPageToken']) is not None: + setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken'])) + + if getv(from_object, ['batchPredictionJobs']) is not None: + setv( + to_object, + ['batch_jobs'], + [ + _BatchJob_from_vertex(api_client, item, to_object) + for item in getv(from_object, ['batchPredictionJobs']) + ], + ) + + return to_object + + +def _DeleteResourceJob_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _DeleteResourceJob_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['done']) is not None: + setv(to_object, ['done'], getv(from_object, ['done'])) + + if getv(from_object, ['error']) is not None: + setv( + to_object, + ['error'], + _JobError_from_vertex( + api_client, getv(from_object, ['error']), to_object + ), + ) + + return to_object + + +class Batches(_common.BaseModule): + + def _create( + self, + *, + model: str, + src: str, + config: Optional[types.CreateBatchJobConfigOrDict] = None, + ) -> types.BatchJob: + parameter_model = types._CreateBatchJobParameters( + model=model, + src=src, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _CreateBatchJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'batchPredictionJobs'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _BatchJob_from_vertex(self._api_client, response_dict) + else: + response_dict = _BatchJob_from_mldev(self._api_client, response_dict) + + return_value = types.BatchJob._from_response(response_dict, parameter_model) + self._api_client._verify_response(return_value) + return return_value + + def get( + self, *, name: str, config: Optional[types.GetBatchJobConfigOrDict] = None + ) -> types.BatchJob: + """Gets a batch job. + + Args: + name (str): A fully-qualified BatchJob resource name or ID. + Example: "projects/.../locations/.../batchPredictionJobs/456" or "456" + when project and location are initialized in the client. + + Returns: + A BatchJob object that contains details about the batch job. + + Usage: + + .. code-block:: python + + batch_job = client.batches.get(name='123456789') + print(f"Batch job: {batch_job.name}, state {batch_job.state}") + """ + + parameter_model = types._GetBatchJobParameters( + name=name, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _GetBatchJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _BatchJob_from_vertex(self._api_client, response_dict) + else: + response_dict = _BatchJob_from_mldev(self._api_client, response_dict) + + return_value = types.BatchJob._from_response(response_dict, parameter_model) + self._api_client._verify_response(return_value) + return return_value + + def cancel( + self, + *, + name: str, + config: Optional[types.CancelBatchJobConfigOrDict] = None, + ) -> None: + parameter_model = types._CancelBatchJobParameters( + name=name, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _CancelBatchJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'batchPredictionJobs/{name}:cancel'.format_map( + request_dict.get('_url') + ) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + def _list( + self, *, config: types.ListBatchJobConfigOrDict + ) -> types.ListBatchJobResponse: + parameter_model = types._ListBatchJobParameters( + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _ListBatchJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'batchPredictionJobs'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ListBatchJobResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ListBatchJobResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ListBatchJobResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def delete(self, *, name: str) -> types.DeleteResourceJob: + """Deletes a batch job. + + Args: + name (str): A fully-qualified BatchJob resource name or ID. + Example: "projects/.../locations/.../batchPredictionJobs/456" or "456" + when project and location are initialized in the client. + + Returns: + A DeleteResourceJob object that shows the status of the deletion. + + Usage: + + .. code-block:: python + + client.batches.delete(name='123456789') + """ + + parameter_model = types._DeleteBatchJobParameters( + name=name, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _DeleteBatchJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'delete', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _DeleteResourceJob_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _DeleteResourceJob_from_mldev( + self._api_client, response_dict + ) + + return_value = types.DeleteResourceJob._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def create( + self, + *, + model: str, + src: str, + config: Optional[types.CreateBatchJobConfigOrDict] = None, + ) -> types.BatchJob: + """Creates a batch job. + + Args: + model (str): The model to use for the batch job. + src (str): The source of the batch job. Currently supports GCS URI(-s) or + BigQuery URI. Example: "gs://path/to/input/data" or + "bq://projectId.bqDatasetId.bqTableId". + config (CreateBatchJobConfig): Optional configuration for the batch job. + + Returns: + A BatchJob object that contains details about the batch job. + + Usage: + + .. code-block:: python + + batch_job = client.batches.create( + model="gemini-1.5-flash", + src="gs://path/to/input/data", + ) + print(batch_job.state) + """ + config = _extra_utils.format_destination(src, config) + return self._create(model=model, src=src, config=config) + + def list( + self, *, config: Optional[types.ListBatchJobConfigOrDict] = None + ) -> Pager[types.BatchJob]: + """Lists batch jobs. + + Args: + config (ListBatchJobConfig): Optional configuration for the list request. + + Returns: + A Pager object that contains one page of batch jobs. When iterating over + the pager, it automatically fetches the next page if there are more. + + Usage: + + .. code-block:: python + + batch_jobs = client.batches.list(config={"page_size": 10}) + for batch_job in batch_jobs: + print(f"Batch job: {batch_job.name}, state {batch_job.state}") + """ + return Pager( + 'batch_jobs', + self._list, + self._list(config=config), + config, + ) + + +class AsyncBatches(_common.BaseModule): + + async def _create( + self, + *, + model: str, + src: str, + config: Optional[types.CreateBatchJobConfigOrDict] = None, + ) -> types.BatchJob: + parameter_model = types._CreateBatchJobParameters( + model=model, + src=src, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _CreateBatchJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'batchPredictionJobs'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _BatchJob_from_vertex(self._api_client, response_dict) + else: + response_dict = _BatchJob_from_mldev(self._api_client, response_dict) + + return_value = types.BatchJob._from_response(response_dict, parameter_model) + self._api_client._verify_response(return_value) + return return_value + + async def get( + self, *, name: str, config: Optional[types.GetBatchJobConfigOrDict] = None + ) -> types.BatchJob: + """Gets a batch job. + + Args: + name (str): A fully-qualified BatchJob resource name or ID. + Example: "projects/.../locations/.../batchPredictionJobs/456" or "456" + when project and location are initialized in the client. + + Returns: + A BatchJob object that contains details about the batch job. + + Usage: + + .. code-block:: python + + batch_job = client.batches.get(name='123456789') + print(f"Batch job: {batch_job.name}, state {batch_job.state}") + """ + + parameter_model = types._GetBatchJobParameters( + name=name, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _GetBatchJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _BatchJob_from_vertex(self._api_client, response_dict) + else: + response_dict = _BatchJob_from_mldev(self._api_client, response_dict) + + return_value = types.BatchJob._from_response(response_dict, parameter_model) + self._api_client._verify_response(return_value) + return return_value + + async def cancel( + self, + *, + name: str, + config: Optional[types.CancelBatchJobConfigOrDict] = None, + ) -> None: + parameter_model = types._CancelBatchJobParameters( + name=name, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _CancelBatchJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'batchPredictionJobs/{name}:cancel'.format_map( + request_dict.get('_url') + ) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + async def _list( + self, *, config: types.ListBatchJobConfigOrDict + ) -> types.ListBatchJobResponse: + parameter_model = types._ListBatchJobParameters( + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _ListBatchJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'batchPredictionJobs'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ListBatchJobResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ListBatchJobResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ListBatchJobResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def delete(self, *, name: str) -> types.DeleteResourceJob: + """Deletes a batch job. + + Args: + name (str): A fully-qualified BatchJob resource name or ID. + Example: "projects/.../locations/.../batchPredictionJobs/456" or "456" + when project and location are initialized in the client. + + Returns: + A DeleteResourceJob object that shows the status of the deletion. + + Usage: + + .. code-block:: python + + client.batches.delete(name='123456789') + """ + + parameter_model = types._DeleteBatchJobParameters( + name=name, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _DeleteBatchJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'delete', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _DeleteResourceJob_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _DeleteResourceJob_from_mldev( + self._api_client, response_dict + ) + + return_value = types.DeleteResourceJob._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def create( + self, + *, + model: str, + src: str, + config: Optional[types.CreateBatchJobConfigOrDict] = None, + ) -> types.BatchJob: + """Creates a batch job asynchronously. + + Args: + model (str): The model to use for the batch job. + src (str): The source of the batch job. Currently supports GCS URI(-s) or + BigQuery URI. Example: "gs://path/to/input/data" or + "bq://projectId.bqDatasetId.bqTableId". + config (CreateBatchJobConfig): Optional configuration for the batch job. + + Returns: + A BatchJob object that contains details about the batch job. + + Usage: + + .. code-block:: python + + batch_job = await client.aio.batches.create( + model="gemini-1.5-flash", + src="gs://path/to/input/data", + ) + """ + config = _extra_utils.format_destination(src, config) + return await self._create(model=model, src=src, config=config) + + async def list( + self, *, config: Optional[types.ListBatchJobConfigOrDict] = None + ) -> AsyncPager[types.BatchJob]: + """Lists batch jobs asynchronously. + + Args: + config (ListBatchJobConfig): Optional configuration for the list request. + + Returns: + A Pager object that contains one page of batch jobs. When iterating over + the pager, it automatically fetches the next page if there are more. + + Usage: + + .. code-block:: python + + batch_jobs = await client.aio.batches.list(config={'page_size': 5}) + print(f"current page: {batch_jobs.page}") + await batch_jobs_pager.next_page() + print(f"next page: {batch_jobs_pager.page}") + """ + return AsyncPager( + 'batch_jobs', + self._list, + await self._list(config=config), + config, + ) diff --git a/.venv/lib/python3.12/site-packages/google/genai/caches.py b/.venv/lib/python3.12/site-packages/google/genai/caches.py new file mode 100644 index 00000000..6d0e576b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/caches.py @@ -0,0 +1,1856 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Code generated by the Google Gen AI SDK generator DO NOT EDIT. + +from typing import Optional, Union +from urllib.parse import urlencode +from . import _common +from . import _transformers as t +from . import types +from ._api_client import ApiClient +from ._common import get_value_by_path as getv +from ._common import set_value_by_path as setv +from .pagers import AsyncPager, Pager + + +def _Part_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['video_metadata']) is not None: + raise ValueError('video_metadata parameter is not supported in Google AI.') + + if getv(from_object, ['thought']) is not None: + setv(to_object, ['thought'], getv(from_object, ['thought'])) + + if getv(from_object, ['code_execution_result']) is not None: + setv( + to_object, + ['codeExecutionResult'], + getv(from_object, ['code_execution_result']), + ) + + if getv(from_object, ['executable_code']) is not None: + setv(to_object, ['executableCode'], getv(from_object, ['executable_code'])) + + if getv(from_object, ['file_data']) is not None: + setv(to_object, ['fileData'], getv(from_object, ['file_data'])) + + if getv(from_object, ['function_call']) is not None: + setv(to_object, ['functionCall'], getv(from_object, ['function_call'])) + + if getv(from_object, ['function_response']) is not None: + setv( + to_object, + ['functionResponse'], + getv(from_object, ['function_response']), + ) + + if getv(from_object, ['inline_data']) is not None: + setv(to_object, ['inlineData'], getv(from_object, ['inline_data'])) + + if getv(from_object, ['text']) is not None: + setv(to_object, ['text'], getv(from_object, ['text'])) + + return to_object + + +def _Part_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['video_metadata']) is not None: + setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + + if getv(from_object, ['thought']) is not None: + setv(to_object, ['thought'], getv(from_object, ['thought'])) + + if getv(from_object, ['code_execution_result']) is not None: + setv( + to_object, + ['codeExecutionResult'], + getv(from_object, ['code_execution_result']), + ) + + if getv(from_object, ['executable_code']) is not None: + setv(to_object, ['executableCode'], getv(from_object, ['executable_code'])) + + if getv(from_object, ['file_data']) is not None: + setv(to_object, ['fileData'], getv(from_object, ['file_data'])) + + if getv(from_object, ['function_call']) is not None: + setv(to_object, ['functionCall'], getv(from_object, ['function_call'])) + + if getv(from_object, ['function_response']) is not None: + setv( + to_object, + ['functionResponse'], + getv(from_object, ['function_response']), + ) + + if getv(from_object, ['inline_data']) is not None: + setv(to_object, ['inlineData'], getv(from_object, ['inline_data'])) + + if getv(from_object, ['text']) is not None: + setv(to_object, ['text'], getv(from_object, ['text'])) + + return to_object + + +def _Content_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['parts']) is not None: + setv( + to_object, + ['parts'], + [ + _Part_to_mldev(api_client, item, to_object) + for item in getv(from_object, ['parts']) + ], + ) + + if getv(from_object, ['role']) is not None: + setv(to_object, ['role'], getv(from_object, ['role'])) + + return to_object + + +def _Content_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['parts']) is not None: + setv( + to_object, + ['parts'], + [ + _Part_to_vertex(api_client, item, to_object) + for item in getv(from_object, ['parts']) + ], + ) + + if getv(from_object, ['role']) is not None: + setv(to_object, ['role'], getv(from_object, ['role'])) + + return to_object + + +def _Schema_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['min_items']) is not None: + raise ValueError('min_items parameter is not supported in Google AI.') + + if getv(from_object, ['example']) is not None: + raise ValueError('example parameter is not supported in Google AI.') + + if getv(from_object, ['property_ordering']) is not None: + raise ValueError( + 'property_ordering parameter is not supported in Google AI.' + ) + + if getv(from_object, ['pattern']) is not None: + raise ValueError('pattern parameter is not supported in Google AI.') + + if getv(from_object, ['minimum']) is not None: + raise ValueError('minimum parameter is not supported in Google AI.') + + if getv(from_object, ['default']) is not None: + raise ValueError('default parameter is not supported in Google AI.') + + if getv(from_object, ['any_of']) is not None: + raise ValueError('any_of parameter is not supported in Google AI.') + + if getv(from_object, ['max_length']) is not None: + raise ValueError('max_length parameter is not supported in Google AI.') + + if getv(from_object, ['title']) is not None: + raise ValueError('title parameter is not supported in Google AI.') + + if getv(from_object, ['min_length']) is not None: + raise ValueError('min_length parameter is not supported in Google AI.') + + if getv(from_object, ['min_properties']) is not None: + raise ValueError('min_properties parameter is not supported in Google AI.') + + if getv(from_object, ['max_items']) is not None: + raise ValueError('max_items parameter is not supported in Google AI.') + + if getv(from_object, ['maximum']) is not None: + raise ValueError('maximum parameter is not supported in Google AI.') + + if getv(from_object, ['nullable']) is not None: + raise ValueError('nullable parameter is not supported in Google AI.') + + if getv(from_object, ['max_properties']) is not None: + raise ValueError('max_properties parameter is not supported in Google AI.') + + if getv(from_object, ['type']) is not None: + setv(to_object, ['type'], getv(from_object, ['type'])) + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['enum']) is not None: + setv(to_object, ['enum'], getv(from_object, ['enum'])) + + if getv(from_object, ['format']) is not None: + setv(to_object, ['format'], getv(from_object, ['format'])) + + if getv(from_object, ['items']) is not None: + setv(to_object, ['items'], getv(from_object, ['items'])) + + if getv(from_object, ['properties']) is not None: + setv(to_object, ['properties'], getv(from_object, ['properties'])) + + if getv(from_object, ['required']) is not None: + setv(to_object, ['required'], getv(from_object, ['required'])) + + return to_object + + +def _Schema_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['min_items']) is not None: + setv(to_object, ['minItems'], getv(from_object, ['min_items'])) + + if getv(from_object, ['example']) is not None: + setv(to_object, ['example'], getv(from_object, ['example'])) + + if getv(from_object, ['property_ordering']) is not None: + setv( + to_object, + ['propertyOrdering'], + getv(from_object, ['property_ordering']), + ) + + if getv(from_object, ['pattern']) is not None: + setv(to_object, ['pattern'], getv(from_object, ['pattern'])) + + if getv(from_object, ['minimum']) is not None: + setv(to_object, ['minimum'], getv(from_object, ['minimum'])) + + if getv(from_object, ['default']) is not None: + setv(to_object, ['default'], getv(from_object, ['default'])) + + if getv(from_object, ['any_of']) is not None: + setv(to_object, ['anyOf'], getv(from_object, ['any_of'])) + + if getv(from_object, ['max_length']) is not None: + setv(to_object, ['maxLength'], getv(from_object, ['max_length'])) + + if getv(from_object, ['title']) is not None: + setv(to_object, ['title'], getv(from_object, ['title'])) + + if getv(from_object, ['min_length']) is not None: + setv(to_object, ['minLength'], getv(from_object, ['min_length'])) + + if getv(from_object, ['min_properties']) is not None: + setv(to_object, ['minProperties'], getv(from_object, ['min_properties'])) + + if getv(from_object, ['max_items']) is not None: + setv(to_object, ['maxItems'], getv(from_object, ['max_items'])) + + if getv(from_object, ['maximum']) is not None: + setv(to_object, ['maximum'], getv(from_object, ['maximum'])) + + if getv(from_object, ['nullable']) is not None: + setv(to_object, ['nullable'], getv(from_object, ['nullable'])) + + if getv(from_object, ['max_properties']) is not None: + setv(to_object, ['maxProperties'], getv(from_object, ['max_properties'])) + + if getv(from_object, ['type']) is not None: + setv(to_object, ['type'], getv(from_object, ['type'])) + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['enum']) is not None: + setv(to_object, ['enum'], getv(from_object, ['enum'])) + + if getv(from_object, ['format']) is not None: + setv(to_object, ['format'], getv(from_object, ['format'])) + + if getv(from_object, ['items']) is not None: + setv(to_object, ['items'], getv(from_object, ['items'])) + + if getv(from_object, ['properties']) is not None: + setv(to_object, ['properties'], getv(from_object, ['properties'])) + + if getv(from_object, ['required']) is not None: + setv(to_object, ['required'], getv(from_object, ['required'])) + + return to_object + + +def _FunctionDeclaration_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['response']) is not None: + raise ValueError('response parameter is not supported in Google AI.') + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['parameters']) is not None: + setv(to_object, ['parameters'], getv(from_object, ['parameters'])) + + return to_object + + +def _FunctionDeclaration_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['response']) is not None: + setv( + to_object, + ['response'], + _Schema_to_vertex( + api_client, getv(from_object, ['response']), to_object + ), + ) + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['parameters']) is not None: + setv(to_object, ['parameters'], getv(from_object, ['parameters'])) + + return to_object + + +def _GoogleSearch_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _GoogleSearch_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _DynamicRetrievalConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['mode']) is not None: + setv(to_object, ['mode'], getv(from_object, ['mode'])) + + if getv(from_object, ['dynamic_threshold']) is not None: + setv( + to_object, + ['dynamicThreshold'], + getv(from_object, ['dynamic_threshold']), + ) + + return to_object + + +def _DynamicRetrievalConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['mode']) is not None: + setv(to_object, ['mode'], getv(from_object, ['mode'])) + + if getv(from_object, ['dynamic_threshold']) is not None: + setv( + to_object, + ['dynamicThreshold'], + getv(from_object, ['dynamic_threshold']), + ) + + return to_object + + +def _GoogleSearchRetrieval_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['dynamic_retrieval_config']) is not None: + setv( + to_object, + ['dynamicRetrievalConfig'], + _DynamicRetrievalConfig_to_mldev( + api_client, + getv(from_object, ['dynamic_retrieval_config']), + to_object, + ), + ) + + return to_object + + +def _GoogleSearchRetrieval_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['dynamic_retrieval_config']) is not None: + setv( + to_object, + ['dynamicRetrievalConfig'], + _DynamicRetrievalConfig_to_vertex( + api_client, + getv(from_object, ['dynamic_retrieval_config']), + to_object, + ), + ) + + return to_object + + +def _Tool_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['function_declarations']) is not None: + setv( + to_object, + ['functionDeclarations'], + [ + _FunctionDeclaration_to_mldev(api_client, item, to_object) + for item in getv(from_object, ['function_declarations']) + ], + ) + + if getv(from_object, ['retrieval']) is not None: + raise ValueError('retrieval parameter is not supported in Google AI.') + + if getv(from_object, ['google_search']) is not None: + setv( + to_object, + ['googleSearch'], + _GoogleSearch_to_mldev( + api_client, getv(from_object, ['google_search']), to_object + ), + ) + + if getv(from_object, ['google_search_retrieval']) is not None: + setv( + to_object, + ['googleSearchRetrieval'], + _GoogleSearchRetrieval_to_mldev( + api_client, + getv(from_object, ['google_search_retrieval']), + to_object, + ), + ) + + if getv(from_object, ['code_execution']) is not None: + setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) + + return to_object + + +def _Tool_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['function_declarations']) is not None: + setv( + to_object, + ['functionDeclarations'], + [ + _FunctionDeclaration_to_vertex(api_client, item, to_object) + for item in getv(from_object, ['function_declarations']) + ], + ) + + if getv(from_object, ['retrieval']) is not None: + setv(to_object, ['retrieval'], getv(from_object, ['retrieval'])) + + if getv(from_object, ['google_search']) is not None: + setv( + to_object, + ['googleSearch'], + _GoogleSearch_to_vertex( + api_client, getv(from_object, ['google_search']), to_object + ), + ) + + if getv(from_object, ['google_search_retrieval']) is not None: + setv( + to_object, + ['googleSearchRetrieval'], + _GoogleSearchRetrieval_to_vertex( + api_client, + getv(from_object, ['google_search_retrieval']), + to_object, + ), + ) + + if getv(from_object, ['code_execution']) is not None: + setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) + + return to_object + + +def _FunctionCallingConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['mode']) is not None: + setv(to_object, ['mode'], getv(from_object, ['mode'])) + + if getv(from_object, ['allowed_function_names']) is not None: + setv( + to_object, + ['allowedFunctionNames'], + getv(from_object, ['allowed_function_names']), + ) + + return to_object + + +def _FunctionCallingConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['mode']) is not None: + setv(to_object, ['mode'], getv(from_object, ['mode'])) + + if getv(from_object, ['allowed_function_names']) is not None: + setv( + to_object, + ['allowedFunctionNames'], + getv(from_object, ['allowed_function_names']), + ) + + return to_object + + +def _ToolConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['function_calling_config']) is not None: + setv( + to_object, + ['functionCallingConfig'], + _FunctionCallingConfig_to_mldev( + api_client, + getv(from_object, ['function_calling_config']), + to_object, + ), + ) + + return to_object + + +def _ToolConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['function_calling_config']) is not None: + setv( + to_object, + ['functionCallingConfig'], + _FunctionCallingConfig_to_vertex( + api_client, + getv(from_object, ['function_calling_config']), + to_object, + ), + ) + + return to_object + + +def _CreateCachedContentConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['ttl']) is not None: + setv(parent_object, ['ttl'], getv(from_object, ['ttl'])) + + if getv(from_object, ['expire_time']) is not None: + setv(parent_object, ['expireTime'], getv(from_object, ['expire_time'])) + + if getv(from_object, ['display_name']) is not None: + setv(parent_object, ['displayName'], getv(from_object, ['display_name'])) + + if getv(from_object, ['contents']) is not None: + setv( + parent_object, + ['contents'], + [ + _Content_to_mldev(api_client, item, to_object) + for item in t.t_contents( + api_client, getv(from_object, ['contents']) + ) + ], + ) + + if getv(from_object, ['system_instruction']) is not None: + setv( + parent_object, + ['systemInstruction'], + _Content_to_mldev( + api_client, + t.t_content(api_client, getv(from_object, ['system_instruction'])), + to_object, + ), + ) + + if getv(from_object, ['tools']) is not None: + setv( + parent_object, + ['tools'], + [ + _Tool_to_mldev(api_client, item, to_object) + for item in getv(from_object, ['tools']) + ], + ) + + if getv(from_object, ['tool_config']) is not None: + setv( + parent_object, + ['toolConfig'], + _ToolConfig_to_mldev( + api_client, getv(from_object, ['tool_config']), to_object + ), + ) + + return to_object + + +def _CreateCachedContentConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['ttl']) is not None: + setv(parent_object, ['ttl'], getv(from_object, ['ttl'])) + + if getv(from_object, ['expire_time']) is not None: + setv(parent_object, ['expireTime'], getv(from_object, ['expire_time'])) + + if getv(from_object, ['display_name']) is not None: + setv(parent_object, ['displayName'], getv(from_object, ['display_name'])) + + if getv(from_object, ['contents']) is not None: + setv( + parent_object, + ['contents'], + [ + _Content_to_vertex(api_client, item, to_object) + for item in t.t_contents( + api_client, getv(from_object, ['contents']) + ) + ], + ) + + if getv(from_object, ['system_instruction']) is not None: + setv( + parent_object, + ['systemInstruction'], + _Content_to_vertex( + api_client, + t.t_content(api_client, getv(from_object, ['system_instruction'])), + to_object, + ), + ) + + if getv(from_object, ['tools']) is not None: + setv( + parent_object, + ['tools'], + [ + _Tool_to_vertex(api_client, item, to_object) + for item in getv(from_object, ['tools']) + ], + ) + + if getv(from_object, ['tool_config']) is not None: + setv( + parent_object, + ['toolConfig'], + _ToolConfig_to_vertex( + api_client, getv(from_object, ['tool_config']), to_object + ), + ) + + return to_object + + +def _CreateCachedContentParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['model'], + t.t_caches_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CreateCachedContentConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _CreateCachedContentParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['model'], + t.t_caches_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CreateCachedContentConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _GetCachedContentConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _GetCachedContentConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _GetCachedContentParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_cached_content_name(api_client, getv(from_object, ['name'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _GetCachedContentConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _GetCachedContentParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_cached_content_name(api_client, getv(from_object, ['name'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _GetCachedContentConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _DeleteCachedContentConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _DeleteCachedContentConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _DeleteCachedContentParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_cached_content_name(api_client, getv(from_object, ['name'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _DeleteCachedContentConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _DeleteCachedContentParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_cached_content_name(api_client, getv(from_object, ['name'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _DeleteCachedContentConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _UpdateCachedContentConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['ttl']) is not None: + setv(parent_object, ['ttl'], getv(from_object, ['ttl'])) + + if getv(from_object, ['expire_time']) is not None: + setv(parent_object, ['expireTime'], getv(from_object, ['expire_time'])) + + return to_object + + +def _UpdateCachedContentConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['ttl']) is not None: + setv(parent_object, ['ttl'], getv(from_object, ['ttl'])) + + if getv(from_object, ['expire_time']) is not None: + setv(parent_object, ['expireTime'], getv(from_object, ['expire_time'])) + + return to_object + + +def _UpdateCachedContentParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_cached_content_name(api_client, getv(from_object, ['name'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _UpdateCachedContentConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _UpdateCachedContentParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_cached_content_name(api_client, getv(from_object, ['name'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _UpdateCachedContentConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _ListCachedContentsConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['page_size']) is not None: + setv( + parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size']) + ) + + if getv(from_object, ['page_token']) is not None: + setv( + parent_object, + ['_query', 'pageToken'], + getv(from_object, ['page_token']), + ) + + return to_object + + +def _ListCachedContentsConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['page_size']) is not None: + setv( + parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size']) + ) + + if getv(from_object, ['page_token']) is not None: + setv( + parent_object, + ['_query', 'pageToken'], + getv(from_object, ['page_token']), + ) + + return to_object + + +def _ListCachedContentsParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _ListCachedContentsConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _ListCachedContentsParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _ListCachedContentsConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _CachedContent_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['displayName']) is not None: + setv(to_object, ['display_name'], getv(from_object, ['displayName'])) + + if getv(from_object, ['model']) is not None: + setv(to_object, ['model'], getv(from_object, ['model'])) + + if getv(from_object, ['createTime']) is not None: + setv(to_object, ['create_time'], getv(from_object, ['createTime'])) + + if getv(from_object, ['updateTime']) is not None: + setv(to_object, ['update_time'], getv(from_object, ['updateTime'])) + + if getv(from_object, ['expireTime']) is not None: + setv(to_object, ['expire_time'], getv(from_object, ['expireTime'])) + + if getv(from_object, ['usageMetadata']) is not None: + setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata'])) + + return to_object + + +def _CachedContent_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['displayName']) is not None: + setv(to_object, ['display_name'], getv(from_object, ['displayName'])) + + if getv(from_object, ['model']) is not None: + setv(to_object, ['model'], getv(from_object, ['model'])) + + if getv(from_object, ['createTime']) is not None: + setv(to_object, ['create_time'], getv(from_object, ['createTime'])) + + if getv(from_object, ['updateTime']) is not None: + setv(to_object, ['update_time'], getv(from_object, ['updateTime'])) + + if getv(from_object, ['expireTime']) is not None: + setv(to_object, ['expire_time'], getv(from_object, ['expireTime'])) + + if getv(from_object, ['usageMetadata']) is not None: + setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata'])) + + return to_object + + +def _DeleteCachedContentResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _DeleteCachedContentResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _ListCachedContentsResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['nextPageToken']) is not None: + setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken'])) + + if getv(from_object, ['cachedContents']) is not None: + setv( + to_object, + ['cached_contents'], + [ + _CachedContent_from_mldev(api_client, item, to_object) + for item in getv(from_object, ['cachedContents']) + ], + ) + + return to_object + + +def _ListCachedContentsResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['nextPageToken']) is not None: + setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken'])) + + if getv(from_object, ['cachedContents']) is not None: + setv( + to_object, + ['cached_contents'], + [ + _CachedContent_from_vertex(api_client, item, to_object) + for item in getv(from_object, ['cachedContents']) + ], + ) + + return to_object + + +class Caches(_common.BaseModule): + + def create( + self, + *, + model: str, + config: Optional[types.CreateCachedContentConfigOrDict] = None, + ) -> types.CachedContent: + """Creates cached content, this call will initialize the cached + + content in the data storage, and users need to pay for the cache data + storage. + + Usage: + + .. code-block:: python + + contents = ... // Initialize the content to cache. + response = await client.aio.caches.create( + model= ... // The publisher model id + contents=contents, + config={ + 'display_name': 'test cache', + 'system_instruction': 'What is the sum of the two pdfs?', + 'ttl': '86400s', + }, + ) + """ + + parameter_model = types._CreateCachedContentParameters( + model=model, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _CreateCachedContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'cachedContents'.format_map(request_dict.get('_url')) + else: + request_dict = _CreateCachedContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'cachedContents'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _CachedContent_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _CachedContent_from_mldev(self._api_client, response_dict) + + return_value = types.CachedContent._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def get( + self, + *, + name: str, + config: Optional[types.GetCachedContentConfigOrDict] = None, + ) -> types.CachedContent: + """Gets cached content configurations. + + .. code-block:: python + + await client.aio.caches.get(name= ... ) // The server-generated resource + name. + """ + + parameter_model = types._GetCachedContentParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _GetCachedContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _GetCachedContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _CachedContent_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _CachedContent_from_mldev(self._api_client, response_dict) + + return_value = types.CachedContent._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def delete( + self, + *, + name: str, + config: Optional[types.DeleteCachedContentConfigOrDict] = None, + ) -> types.DeleteCachedContentResponse: + """Deletes cached content. + + Usage: + + .. code-block:: python + + await client.aio.caches.delete(name= ... ) // The server-generated + resource name. + """ + + parameter_model = types._DeleteCachedContentParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _DeleteCachedContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _DeleteCachedContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'delete', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _DeleteCachedContentResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _DeleteCachedContentResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.DeleteCachedContentResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def update( + self, + *, + name: str, + config: Optional[types.UpdateCachedContentConfigOrDict] = None, + ) -> types.CachedContent: + """Updates cached content configurations. + + .. code-block:: python + + response = await client.aio.caches.update( + name= ... // The server-generated resource name. + config={ + 'ttl': '7600s', + }, + ) + """ + + parameter_model = types._UpdateCachedContentParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _UpdateCachedContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _UpdateCachedContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'patch', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _CachedContent_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _CachedContent_from_mldev(self._api_client, response_dict) + + return_value = types.CachedContent._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def _list( + self, *, config: Optional[types.ListCachedContentsConfigOrDict] = None + ) -> types.ListCachedContentsResponse: + """Lists cached content configurations. + + .. code-block:: python + + cached_contents = await client.aio.caches.list(config={'page_size': 2}) + async for cached_content in cached_contents: + print(cached_content) + """ + + parameter_model = types._ListCachedContentsParameters( + config=config, + ) + + if self._api_client.vertexai: + request_dict = _ListCachedContentsParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'cachedContents'.format_map(request_dict.get('_url')) + else: + request_dict = _ListCachedContentsParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'cachedContents'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ListCachedContentsResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ListCachedContentsResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ListCachedContentsResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def list( + self, *, config: Optional[types.ListCachedContentsConfigOrDict] = None + ) -> Pager[types.CachedContent]: + return Pager( + 'cached_contents', + self._list, + self._list(config=config), + config, + ) + + +class AsyncCaches(_common.BaseModule): + + async def create( + self, + *, + model: str, + config: Optional[types.CreateCachedContentConfigOrDict] = None, + ) -> types.CachedContent: + """Creates cached content, this call will initialize the cached + + content in the data storage, and users need to pay for the cache data + storage. + + Usage: + + .. code-block:: python + + contents = ... // Initialize the content to cache. + response = await client.aio.caches.create( + model= ... // The publisher model id + contents=contents, + config={ + 'display_name': 'test cache', + 'system_instruction': 'What is the sum of the two pdfs?', + 'ttl': '86400s', + }, + ) + """ + + parameter_model = types._CreateCachedContentParameters( + model=model, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _CreateCachedContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'cachedContents'.format_map(request_dict.get('_url')) + else: + request_dict = _CreateCachedContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'cachedContents'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _CachedContent_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _CachedContent_from_mldev(self._api_client, response_dict) + + return_value = types.CachedContent._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def get( + self, + *, + name: str, + config: Optional[types.GetCachedContentConfigOrDict] = None, + ) -> types.CachedContent: + """Gets cached content configurations. + + .. code-block:: python + + await client.aio.caches.get(name= ... ) // The server-generated resource + name. + """ + + parameter_model = types._GetCachedContentParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _GetCachedContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _GetCachedContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _CachedContent_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _CachedContent_from_mldev(self._api_client, response_dict) + + return_value = types.CachedContent._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def delete( + self, + *, + name: str, + config: Optional[types.DeleteCachedContentConfigOrDict] = None, + ) -> types.DeleteCachedContentResponse: + """Deletes cached content. + + Usage: + + .. code-block:: python + + await client.aio.caches.delete(name= ... ) // The server-generated + resource name. + """ + + parameter_model = types._DeleteCachedContentParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _DeleteCachedContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _DeleteCachedContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'delete', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _DeleteCachedContentResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _DeleteCachedContentResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.DeleteCachedContentResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def update( + self, + *, + name: str, + config: Optional[types.UpdateCachedContentConfigOrDict] = None, + ) -> types.CachedContent: + """Updates cached content configurations. + + .. code-block:: python + + response = await client.aio.caches.update( + name= ... // The server-generated resource name. + config={ + 'ttl': '7600s', + }, + ) + """ + + parameter_model = types._UpdateCachedContentParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _UpdateCachedContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _UpdateCachedContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'patch', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _CachedContent_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _CachedContent_from_mldev(self._api_client, response_dict) + + return_value = types.CachedContent._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def _list( + self, *, config: Optional[types.ListCachedContentsConfigOrDict] = None + ) -> types.ListCachedContentsResponse: + """Lists cached content configurations. + + .. code-block:: python + + cached_contents = await client.aio.caches.list(config={'page_size': 2}) + async for cached_content in cached_contents: + print(cached_content) + """ + + parameter_model = types._ListCachedContentsParameters( + config=config, + ) + + if self._api_client.vertexai: + request_dict = _ListCachedContentsParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'cachedContents'.format_map(request_dict.get('_url')) + else: + request_dict = _ListCachedContentsParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'cachedContents'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ListCachedContentsResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ListCachedContentsResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ListCachedContentsResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def list( + self, *, config: Optional[types.ListCachedContentsConfigOrDict] = None + ) -> AsyncPager[types.CachedContent]: + return AsyncPager( + 'cached_contents', + self._list, + await self._list(config=config), + config, + ) diff --git a/.venv/lib/python3.12/site-packages/google/genai/chats.py b/.venv/lib/python3.12/site-packages/google/genai/chats.py new file mode 100644 index 00000000..707b7c8d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/chats.py @@ -0,0 +1,266 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Optional +from typing import Union + +from . import _transformers as t +from .models import AsyncModels, Models +from .types import Content, ContentDict, GenerateContentConfigOrDict, GenerateContentResponse, Part, PartUnionDict + + +def _validate_response(response: GenerateContentResponse) -> bool: + if not response.candidates: + return False + if not response.candidates[0].content: + return False + if not response.candidates[0].content.parts: + return False + for part in response.candidates[0].content.parts: + if part == Part(): + return False + if part.text is not None and part.text == "": + return False + return True + + +class _BaseChat: + """Base chat session.""" + + def __init__( + self, + *, + modules: Union[Models, AsyncModels], + model: str, + config: GenerateContentConfigOrDict = None, + history: list[Content], + ): + self._modules = modules + self._model = model + self._config = config + self._curated_history = history + + +class Chat(_BaseChat): + """Chat session.""" + + def send_message( + self, message: Union[list[PartUnionDict], PartUnionDict] + ) -> GenerateContentResponse: + """Sends the conversation history with the additional message and returns the model's response. + + Args: + message: The message to send to the model. + + Returns: + The model's response. + + Usage: + + .. code-block:: python + + chat = client.chats.create(model='gemini-1.5-flash') + response = chat.send_message('tell me a story') + """ + + input_content = t.t_content(self._modules._api_client, message) + response = self._modules.generate_content( + model=self._model, + contents=self._curated_history + [input_content], + config=self._config, + ) + if _validate_response(response): + if response.automatic_function_calling_history: + self._curated_history.extend( + response.automatic_function_calling_history + ) + else: + self._curated_history.append(input_content) + self._curated_history.append(response.candidates[0].content) + return response + + def send_message_stream( + self, message: Union[list[PartUnionDict], PartUnionDict] + ): + """Sends the conversation history with the additional message and yields the model's response in chunks. + + Args: + message: The message to send to the model. + + Yields: + The model's response in chunks. + + Usage: + + .. code-block:: python + + chat = client.chats.create(model='gemini-1.5-flash') + for chunk in chat.send_message_stream('tell me a story'): + print(chunk.text) + """ + + input_content = t.t_content(self._modules._api_client, message) + output_contents = [] + finish_reason = None + for chunk in self._modules.generate_content_stream( + model=self._model, + contents=self._curated_history + [input_content], + config=self._config, + ): + if _validate_response(chunk): + output_contents.append(chunk.candidates[0].content) + if chunk.candidates and chunk.candidates[0].finish_reason: + finish_reason = chunk.candidates[0].finish_reason + yield chunk + if output_contents and finish_reason: + self._curated_history.append(input_content) + self._curated_history.extend(output_contents) + + +class Chats: + """A util class to create chat sessions.""" + + def __init__(self, modules: Models): + self._modules = modules + + def create( + self, + *, + model: str, + config: GenerateContentConfigOrDict = None, + history: Optional[list[Content]] = None, + ) -> Chat: + """Creates a new chat session. + + Args: + model: The model to use for the chat. + config: The configuration to use for the generate content request. + history: The history to use for the chat. + + Returns: + A new chat session. + """ + return Chat( + modules=self._modules, + model=model, + config=config, + history=history if history else [], + ) + + +class AsyncChat(_BaseChat): + """Async chat session.""" + + async def send_message( + self, message: Union[list[PartUnionDict], PartUnionDict] + ) -> GenerateContentResponse: + """Sends the conversation history with the additional message and returns model's response. + + Args: + message: The message to send to the model. + + Returns: + The model's response. + + Usage: + + .. code-block:: python + + chat = client.aio.chats.create(model='gemini-1.5-flash') + response = await chat.send_message('tell me a story') + """ + + input_content = t.t_content(self._modules._api_client, message) + response = await self._modules.generate_content( + model=self._model, + contents=self._curated_history + [input_content], + config=self._config, + ) + if _validate_response(response): + if response.automatic_function_calling_history: + self._curated_history.extend( + response.automatic_function_calling_history + ) + else: + self._curated_history.append(input_content) + self._curated_history.append(response.candidates[0].content) + return response + + async def send_message_stream( + self, message: Union[list[PartUnionDict], PartUnionDict] + ): + """Sends the conversation history with the additional message and yields the model's response in chunks. + + Args: + message: The message to send to the model. + + Yields: + The model's response in chunks. + + Usage: + + .. code-block:: python + chat = client.aio.chats.create(model='gemini-1.5-flash') + async for chunk in chat.send_message_stream('tell me a story'): + print(chunk.text) + """ + + input_content = t.t_content(self._modules._api_client, message) + output_contents = [] + finish_reason = None + async for chunk in self._modules.generate_content_stream( + model=self._model, + contents=self._curated_history + [input_content], + config=self._config, + ): + if _validate_response(chunk): + output_contents.append(chunk.candidates[0].content) + if chunk.candidates and chunk.candidates[0].finish_reason: + finish_reason = chunk.candidates[0].finish_reason + yield chunk + if output_contents and finish_reason: + self._curated_history.append(input_content) + self._curated_history.extend(output_contents) + + +class AsyncChats: + """A util class to create async chat sessions.""" + + def __init__(self, modules: AsyncModels): + self._modules = modules + + def create( + self, + *, + model: str, + config: GenerateContentConfigOrDict = None, + history: Optional[list[Content]] = None, + ) -> AsyncChat: + """Creates a new chat session. + + Args: + model: The model to use for the chat. + config: The configuration to use for the generate content request. + history: The history to use for the chat. + + Returns: + A new chat session. + """ + return AsyncChat( + modules=self._modules, + model=model, + config=config, + history=history if history else [], + ) diff --git a/.venv/lib/python3.12/site-packages/google/genai/client.py b/.venv/lib/python3.12/site-packages/google/genai/client.py new file mode 100644 index 00000000..f29bfe72 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/client.py @@ -0,0 +1,281 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +from typing import Optional, Union + +import google.auth +import pydantic + +from ._api_client import ApiClient, HttpOptions, HttpOptionsDict +from ._replay_api_client import ReplayApiClient +from .batches import AsyncBatches, Batches +from .caches import AsyncCaches, Caches +from .chats import AsyncChats, Chats +from .files import AsyncFiles, Files +from .live import AsyncLive +from .models import AsyncModels, Models +from .tunings import AsyncTunings, Tunings + + +class AsyncClient: + """Client for making asynchronous (non-blocking) requests.""" + + def __init__(self, api_client: ApiClient): + + self._api_client = api_client + self._models = AsyncModels(self._api_client) + self._tunings = AsyncTunings(self._api_client) + self._caches = AsyncCaches(self._api_client) + self._batches = AsyncBatches(self._api_client) + self._files = AsyncFiles(self._api_client) + self._live = AsyncLive(self._api_client) + + @property + def models(self) -> AsyncModels: + return self._models + + @property + def tunings(self) -> AsyncTunings: + return self._tunings + + @property + def caches(self) -> AsyncCaches: + return self._caches + + @property + def batches(self) -> AsyncBatches: + return self._batches + + @property + def chats(self) -> AsyncChats: + return AsyncChats(modules=self.models) + + @property + def files(self) -> AsyncFiles: + return self._files + + @property + def live(self) -> AsyncLive: + return self._live + + +class DebugConfig(pydantic.BaseModel): + """Configuration options that change client network behavior when testing.""" + + client_mode: Optional[str] = pydantic.Field( + default_factory=lambda: os.getenv('GOOGLE_GENAI_CLIENT_MODE', None) + ) + + replays_directory: Optional[str] = pydantic.Field( + default_factory=lambda: os.getenv('GOOGLE_GENAI_REPLAYS_DIRECTORY', None) + ) + + replay_id: Optional[str] = pydantic.Field( + default_factory=lambda: os.getenv('GOOGLE_GENAI_REPLAY_ID', None) + ) + + +class Client: + """Client for making synchronous requests. + + Use this client to make a request to the Gemini Developer API or Vertex AI + API and then wait for the response. + + Attributes: + api_key: The `API key <https://ai.google.dev/gemini-api/docs/api-key>`_ to + use for authentication. Applies to the Gemini Developer API only. + vertexai: Indicates whether the client should use the Vertex AI + API endpoints. Defaults to False (uses Gemini Developer API endpoints). + Applies to the Vertex AI API only. + credentials: The credentials to use for authentication when calling the + Vertex AI APIs. Credentials can be obtained from environment variables and + default credentials. For more information, see + `Set up Application Default Credentials + <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_. + Applies to the Vertex AI API only. + project: The `Google Cloud project ID <https://cloud.google.com/vertex-ai/docs/start/cloud-environment>`_ to + use for quota. Can be obtained from environment variables (for example, + ``GOOGLE_CLOUD_PROJECT``). Applies to the Vertex AI API only. + location: The `location <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations>`_ + to send API requests to (for example, ``us-central1``). Can be obtained + from environment variables. Applies to the Vertex AI API only. + debug_config: Config settings that control network behavior of the client. + This is typically used when running test code. + http_options: Http options to use for the client. Response_payload can't be + set when passing to the client constructor. + + Usage for the Gemini Developer API: + + .. code-block:: python + + from google import genai + + client = genai.Client(api_key='my-api-key') + + Usage for the Vertex AI API: + + .. code-block:: python + + from google import genai + + client = genai.Client( + vertexai=True, project='my-project-id', location='us-central1' + ) + """ + + def __init__( + self, + *, + vertexai: Optional[bool] = None, + api_key: Optional[str] = None, + credentials: Optional[google.auth.credentials.Credentials] = None, + project: Optional[str] = None, + location: Optional[str] = None, + debug_config: Optional[DebugConfig] = None, + http_options: Optional[Union[HttpOptions, HttpOptionsDict]] = None, + ): + """Initializes the client. + + Args: + vertexai (bool): + Indicates whether the client should use the Vertex AI + API endpoints. Defaults to False (uses Gemini Developer API + endpoints). Applies to the Vertex AI API only. + api_key (str): + The `API key + <https://ai.google.dev/gemini-api/docs/api-key>`_ to use for + authentication. Applies to the Gemini Developer API only. + credentials (google.auth.credentials.Credentials): + The credentials to + use for authentication when calling the Vertex AI APIs. Credentials + can be obtained from environment variables and default credentials. + For more information, see `Set up Application Default Credentials + <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_. + Applies to the Vertex AI API only. + project (str): + The `Google Cloud project ID + <https://cloud.google.com/vertex-ai/docs/start/cloud-environment>`_ to + use for quota. Can be obtained from environment variables (for + example, ``GOOGLE_CLOUD_PROJECT``). Applies to the Vertex AI API only. + location (str): + The `location + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations>`_ + to send API requests to (for example, ``us-central1``). Can be + obtained from environment variables. Applies to the Vertex AI API + only. + debug_config (DebugConfig): + Config settings that control network + behavior of the client. This is typically used when running test code. + http_options (Union[HttpOptions, HttpOptionsDict]): + Http options to use for the client. Response_payload can't be + set when passing to the client constructor. + """ + + self._debug_config = debug_config or DebugConfig() + + # Throw ValueError if response_payload is set in http_options due to + # unpredictable behavior when running multiple coroutines through + # client.aio. + if http_options and 'response_payload' in http_options: + raise ValueError( + 'Setting response_payload in http_options is not supported.' + ) + + self._api_client = self._get_api_client( + vertexai=vertexai, + api_key=api_key, + credentials=credentials, + project=project, + location=location, + debug_config=self._debug_config, + http_options=http_options, + ) + + self._aio = AsyncClient(self._api_client) + self._models = Models(self._api_client) + self._tunings = Tunings(self._api_client) + self._caches = Caches(self._api_client) + self._batches = Batches(self._api_client) + self._files = Files(self._api_client) + + @staticmethod + def _get_api_client( + vertexai: Optional[bool] = None, + api_key: Optional[str] = None, + credentials: Optional[google.auth.credentials.Credentials] = None, + project: Optional[str] = None, + location: Optional[str] = None, + debug_config: Optional[DebugConfig] = None, + http_options: Optional[HttpOptions] = None, + ): + if debug_config and debug_config.client_mode in [ + 'record', + 'replay', + 'auto', + ]: + return ReplayApiClient( + mode=debug_config.client_mode, + replay_id=debug_config.replay_id, + replays_directory=debug_config.replays_directory, + vertexai=vertexai, + api_key=api_key, + credentials=credentials, + project=project, + location=location, + http_options=http_options, + ) + + return ApiClient( + vertexai=vertexai, + api_key=api_key, + credentials=credentials, + project=project, + location=location, + http_options=http_options, + ) + + @property + def chats(self) -> Chats: + return Chats(modules=self.models) + + @property + def aio(self) -> AsyncClient: + return self._aio + + @property + def models(self) -> Models: + return self._models + + @property + def tunings(self) -> Tunings: + return self._tunings + + @property + def caches(self) -> Caches: + return self._caches + + @property + def batches(self) -> Batches: + return self._batches + + @property + def files(self) -> Files: + return self._files + + @property + def vertexai(self) -> bool: + """Returns whether the client is using the Vertex AI API.""" + return self._api_client.vertexai or False diff --git a/.venv/lib/python3.12/site-packages/google/genai/errors.py b/.venv/lib/python3.12/site-packages/google/genai/errors.py new file mode 100644 index 00000000..12e03dfc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/errors.py @@ -0,0 +1,130 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Error classes for the GenAI SDK.""" + +from typing import Any, Optional, TYPE_CHECKING, Union + +import requests + + +if TYPE_CHECKING: + from .replay_api_client import ReplayResponse + + +class APIError(Exception): + """General errors raised by the GenAI API.""" + code: int + response: requests.Response + + status: Optional[str] = None + message: Optional[str] = None + response: Optional[Any] = None + + def __init__( + self, code: int, response: Union[requests.Response, 'ReplayResponse'] + ): + self.response = response + + if isinstance(response, requests.Response): + try: + # do not do any extra muanipulation on the response. + # return the raw response json as is. + response_json = response.json() + except requests.exceptions.JSONDecodeError: + response_json = { + 'message': response.text, + 'status': response.reason, + } + else: + response_json = response.body_segments[0].get('error', {}) + + self.details = response_json + self.message = self._get_message(response_json) + self.status = self._get_status(response_json) + self.code = code if code else self._get_code(response_json) + + super().__init__(f'{self.code} {self.status}. {self.details}') + + def _get_status(self, response_json): + return response_json.get( + 'status', response_json.get('error', {}).get('status', None) + ) + + def _get_message(self, response_json): + return response_json.get( + 'message', response_json.get('error', {}).get('message', None) + ) + + def _get_code(self, response_json): + return response_json.get( + 'code', response_json.get('error', {}).get('code', None) + ) + + def _to_replay_record(self): + """Returns a dictionary representation of the error for replay recording. + + details is not included since it may expose internal information in the + replay file. + """ + return { + 'error': { + 'code': self.code, + 'message': self.message, + 'status': self.status, + } + } + + @classmethod + def raise_for_response( + cls, response: Union[requests.Response, 'ReplayResponse'] + ): + """Raises an error with detailed error message if the response has an error status.""" + if response.status_code == 200: + return + + status_code = response.status_code + if 400 <= status_code < 500: + raise ClientError(status_code, response) + elif 500 <= status_code < 600: + raise ServerError(status_code, response) + else: + raise cls(status_code, response) + + +class ClientError(APIError): + """Client error raised by the GenAI API.""" + pass + + +class ServerError(APIError): + """Server error raised by the GenAI API.""" + pass + + +class UnknownFunctionCallArgumentError(ValueError): + """Raised when the function call argument cannot be converted to the parameter annotation.""" + + pass + + +class UnsupportedFunctionError(ValueError): + """Raised when the function is not supported.""" + + +class FunctionInvocationError(ValueError): + """Raised when the function cannot be invoked with the given arguments.""" + + pass diff --git a/.venv/lib/python3.12/site-packages/google/genai/files.py b/.venv/lib/python3.12/site-packages/google/genai/files.py new file mode 100644 index 00000000..20cf30af --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/files.py @@ -0,0 +1,1417 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Code generated by the Google Gen AI SDK generator DO NOT EDIT. + +import io +import mimetypes +import os +import pathlib +from typing import Optional, Union +from urllib.parse import urlencode +from . import _common +from . import _transformers as t +from . import types +from ._api_client import ApiClient +from ._common import get_value_by_path as getv +from ._common import set_value_by_path as setv +from .pagers import AsyncPager, Pager + + +def _ListFilesConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['page_size']) is not None: + setv( + parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size']) + ) + + if getv(from_object, ['page_token']) is not None: + setv( + parent_object, + ['_query', 'pageToken'], + getv(from_object, ['page_token']), + ) + + return to_object + + +def _ListFilesConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['page_size']) is not None: + setv( + parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size']) + ) + + if getv(from_object, ['page_token']) is not None: + setv( + parent_object, + ['_query', 'pageToken'], + getv(from_object, ['page_token']), + ) + + return to_object + + +def _ListFilesParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _ListFilesConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _ListFilesParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['config']) is not None: + raise ValueError('config parameter is not supported in Vertex AI.') + + return to_object + + +def _FileStatus_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['details']) is not None: + setv(to_object, ['details'], getv(from_object, ['details'])) + + if getv(from_object, ['message']) is not None: + setv(to_object, ['message'], getv(from_object, ['message'])) + + if getv(from_object, ['code']) is not None: + setv(to_object, ['code'], getv(from_object, ['code'])) + + return to_object + + +def _FileStatus_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['details']) is not None: + raise ValueError('details parameter is not supported in Vertex AI.') + + if getv(from_object, ['message']) is not None: + raise ValueError('message parameter is not supported in Vertex AI.') + + if getv(from_object, ['code']) is not None: + raise ValueError('code parameter is not supported in Vertex AI.') + + return to_object + + +def _File_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['display_name']) is not None: + setv(to_object, ['displayName'], getv(from_object, ['display_name'])) + + if getv(from_object, ['mime_type']) is not None: + setv(to_object, ['mimeType'], getv(from_object, ['mime_type'])) + + if getv(from_object, ['size_bytes']) is not None: + setv(to_object, ['sizeBytes'], getv(from_object, ['size_bytes'])) + + if getv(from_object, ['create_time']) is not None: + setv(to_object, ['createTime'], getv(from_object, ['create_time'])) + + if getv(from_object, ['expiration_time']) is not None: + setv(to_object, ['expirationTime'], getv(from_object, ['expiration_time'])) + + if getv(from_object, ['update_time']) is not None: + setv(to_object, ['updateTime'], getv(from_object, ['update_time'])) + + if getv(from_object, ['sha256_hash']) is not None: + setv(to_object, ['sha256Hash'], getv(from_object, ['sha256_hash'])) + + if getv(from_object, ['uri']) is not None: + setv(to_object, ['uri'], getv(from_object, ['uri'])) + + if getv(from_object, ['download_uri']) is not None: + setv(to_object, ['downloadUri'], getv(from_object, ['download_uri'])) + + if getv(from_object, ['state']) is not None: + setv(to_object, ['state'], getv(from_object, ['state'])) + + if getv(from_object, ['source']) is not None: + setv(to_object, ['source'], getv(from_object, ['source'])) + + if getv(from_object, ['video_metadata']) is not None: + setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + + if getv(from_object, ['error']) is not None: + setv( + to_object, + ['error'], + _FileStatus_to_mldev( + api_client, getv(from_object, ['error']), to_object + ), + ) + + return to_object + + +def _File_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + raise ValueError('name parameter is not supported in Vertex AI.') + + if getv(from_object, ['display_name']) is not None: + raise ValueError('display_name parameter is not supported in Vertex AI.') + + if getv(from_object, ['mime_type']) is not None: + raise ValueError('mime_type parameter is not supported in Vertex AI.') + + if getv(from_object, ['size_bytes']) is not None: + raise ValueError('size_bytes parameter is not supported in Vertex AI.') + + if getv(from_object, ['create_time']) is not None: + raise ValueError('create_time parameter is not supported in Vertex AI.') + + if getv(from_object, ['expiration_time']) is not None: + raise ValueError('expiration_time parameter is not supported in Vertex AI.') + + if getv(from_object, ['update_time']) is not None: + raise ValueError('update_time parameter is not supported in Vertex AI.') + + if getv(from_object, ['sha256_hash']) is not None: + raise ValueError('sha256_hash parameter is not supported in Vertex AI.') + + if getv(from_object, ['uri']) is not None: + raise ValueError('uri parameter is not supported in Vertex AI.') + + if getv(from_object, ['download_uri']) is not None: + raise ValueError('download_uri parameter is not supported in Vertex AI.') + + if getv(from_object, ['state']) is not None: + raise ValueError('state parameter is not supported in Vertex AI.') + + if getv(from_object, ['source']) is not None: + raise ValueError('source parameter is not supported in Vertex AI.') + + if getv(from_object, ['video_metadata']) is not None: + raise ValueError('video_metadata parameter is not supported in Vertex AI.') + + if getv(from_object, ['error']) is not None: + raise ValueError('error parameter is not supported in Vertex AI.') + + return to_object + + +def _CreateFileConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _CreateFileConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _CreateFileParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['file']) is not None: + setv( + to_object, + ['file'], + _File_to_mldev(api_client, getv(from_object, ['file']), to_object), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CreateFileConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _CreateFileParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['file']) is not None: + raise ValueError('file parameter is not supported in Vertex AI.') + + if getv(from_object, ['config']) is not None: + raise ValueError('config parameter is not supported in Vertex AI.') + + return to_object + + +def _GetFileConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _GetFileConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _GetFileParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'file'], + t.t_file_name(api_client, getv(from_object, ['name'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _GetFileConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _GetFileParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + raise ValueError('name parameter is not supported in Vertex AI.') + + if getv(from_object, ['config']) is not None: + raise ValueError('config parameter is not supported in Vertex AI.') + + return to_object + + +def _DeleteFileConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _DeleteFileConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _DeleteFileParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv( + to_object, + ['_url', 'file'], + t.t_file_name(api_client, getv(from_object, ['name'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _DeleteFileConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _DeleteFileParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + raise ValueError('name parameter is not supported in Vertex AI.') + + if getv(from_object, ['config']) is not None: + raise ValueError('config parameter is not supported in Vertex AI.') + + return to_object + + +def _FileStatus_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['details']) is not None: + setv(to_object, ['details'], getv(from_object, ['details'])) + + if getv(from_object, ['message']) is not None: + setv(to_object, ['message'], getv(from_object, ['message'])) + + if getv(from_object, ['code']) is not None: + setv(to_object, ['code'], getv(from_object, ['code'])) + + return to_object + + +def _FileStatus_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _File_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['displayName']) is not None: + setv(to_object, ['display_name'], getv(from_object, ['displayName'])) + + if getv(from_object, ['mimeType']) is not None: + setv(to_object, ['mime_type'], getv(from_object, ['mimeType'])) + + if getv(from_object, ['sizeBytes']) is not None: + setv(to_object, ['size_bytes'], getv(from_object, ['sizeBytes'])) + + if getv(from_object, ['createTime']) is not None: + setv(to_object, ['create_time'], getv(from_object, ['createTime'])) + + if getv(from_object, ['expirationTime']) is not None: + setv(to_object, ['expiration_time'], getv(from_object, ['expirationTime'])) + + if getv(from_object, ['updateTime']) is not None: + setv(to_object, ['update_time'], getv(from_object, ['updateTime'])) + + if getv(from_object, ['sha256Hash']) is not None: + setv(to_object, ['sha256_hash'], getv(from_object, ['sha256Hash'])) + + if getv(from_object, ['uri']) is not None: + setv(to_object, ['uri'], getv(from_object, ['uri'])) + + if getv(from_object, ['downloadUri']) is not None: + setv(to_object, ['download_uri'], getv(from_object, ['downloadUri'])) + + if getv(from_object, ['state']) is not None: + setv(to_object, ['state'], getv(from_object, ['state'])) + + if getv(from_object, ['source']) is not None: + setv(to_object, ['source'], getv(from_object, ['source'])) + + if getv(from_object, ['videoMetadata']) is not None: + setv(to_object, ['video_metadata'], getv(from_object, ['videoMetadata'])) + + if getv(from_object, ['error']) is not None: + setv( + to_object, + ['error'], + _FileStatus_from_mldev( + api_client, getv(from_object, ['error']), to_object + ), + ) + + return to_object + + +def _File_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _ListFilesResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['nextPageToken']) is not None: + setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken'])) + + if getv(from_object, ['files']) is not None: + setv( + to_object, + ['files'], + [ + _File_from_mldev(api_client, item, to_object) + for item in getv(from_object, ['files']) + ], + ) + + return to_object + + +def _ListFilesResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _CreateFileResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _CreateFileResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _DeleteFileResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _DeleteFileResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +class Files(_common.BaseModule): + + def _list( + self, *, config: Optional[types.ListFilesConfigOrDict] = None + ) -> types.ListFilesResponse: + """Lists all files from the service. + + Args: + config (ListFilesConfig): Optional, configuration for the list method. + + Returns: + ListFilesResponse: The response for the list method. + + Usage: + + .. code-block:: python + + pager = client.files.list(config={'page_size': 10}) + for file in pager.page: + print(file.name) + """ + + parameter_model = types._ListFilesParameters( + config=config, + ) + + if self._api_client.vertexai: + raise ValueError('This method is only supported in the default client.') + else: + request_dict = _ListFilesParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'files'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ListFilesResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ListFilesResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ListFilesResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def _create( + self, + *, + file: types.FileOrDict, + config: Optional[types.CreateFileConfigOrDict] = None, + ) -> types.CreateFileResponse: + parameter_model = types._CreateFileParameters( + file=file, + config=config, + ) + + if self._api_client.vertexai: + raise ValueError('This method is only supported in the default client.') + else: + request_dict = _CreateFileParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'upload/v1beta/files'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _CreateFileResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _CreateFileResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.CreateFileResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def get( + self, *, name: str, config: Optional[types.GetFileConfigOrDict] = None + ) -> types.File: + """Retrieves the file information from the service. + + Args: + name (str): The name identifier for the file to retrieve. + config (GetFileConfig): Optional, configuration for the get method. + + Returns: + File: The file information. + + Usage: + + .. code-block:: python + + file = client.files.get(name='files/...') + print(file.uri) + """ + + parameter_model = types._GetFileParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + raise ValueError('This method is only supported in the default client.') + else: + request_dict = _GetFileParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'files/{file}'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _File_from_vertex(self._api_client, response_dict) + else: + response_dict = _File_from_mldev(self._api_client, response_dict) + + return_value = types.File._from_response(response_dict, parameter_model) + self._api_client._verify_response(return_value) + return return_value + + def delete( + self, *, name: str, config: Optional[types.DeleteFileConfigOrDict] = None + ) -> types.DeleteFileResponse: + """Deletes an existing file from the service. + + Args: + name (str): The name identifier for the file to delete. + config (DeleteFileConfig): Optional, configuration for the delete method. + + Returns: + DeleteFileResponse: The response for the delete method + + Usage: + + .. code-block:: python + + client.files.delete(name='files/...') + """ + + parameter_model = types._DeleteFileParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + raise ValueError('This method is only supported in the default client.') + else: + request_dict = _DeleteFileParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'files/{file}'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'delete', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _DeleteFileResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _DeleteFileResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.DeleteFileResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def upload( + self, + *, + path: Union[str, pathlib.Path, os.PathLike, io.IOBase], + config: Optional[types.UploadFileConfigOrDict] = None, + ) -> types.File: + """Calls the API to upload a file using a supported file service. + + Args: + path: The path to the file or an `IOBase` object to be uploaded. If it's + an IOBase object, it must be opened in blocking mode and binary mode. In + other words, do not use non-blocking mode or text mode. The given stream + must be seekable, that is, it must be able to call seek() on 'path'. + config: Optional parameters to set `diplay_name`, `mime_type`, and `name`. + """ + if self._api_client.vertexai: + raise ValueError( + 'Vertex AI does not support creating files. You can upload files to' + ' GCS files instead.' + ) + config_model = None + if config: + if isinstance(config, dict): + config_model = types.UploadFileConfig(**config) + else: + config_model = config + file = types.File( + mime_type=config_model.mime_type, + name=config_model.name, + display_name=config_model.display_name, + ) + else: # if not config + file = types.File() + if file.name is not None and not file.name.startswith('files/'): + file.name = f'files/{file.name}' + + if isinstance(path, io.IOBase): + if file.mime_type is None: + raise ValueError( + 'Unknown mime type: Could not determine the mimetype for your' + ' file\n please set the `mime_type` argument' + ) + if hasattr(path, 'mode'): + if 'b' not in path.mode: + raise ValueError('The file must be opened in binary mode.') + offset = path.tell() + path.seek(0, os.SEEK_END) + file.size_bytes = path.tell() - offset + path.seek(offset, os.SEEK_SET) + else: + fs_path = os.fspath(path) + if not fs_path or not os.path.isfile(fs_path): + raise FileNotFoundError(f'{path} is not a valid file path.') + file.size_bytes = os.path.getsize(fs_path) + if file.mime_type is None: + file.mime_type, _ = mimetypes.guess_type(fs_path) + if file.mime_type is None: + raise ValueError( + 'Unknown mime type: Could not determine the mimetype for your' + ' file\n please set the `mime_type` argument' + ) + response = {} + if config_model and config_model.http_options: + http_options = config_model.http_options + else: + http_options = { + 'api_version': '', # api-version is set in the path. + 'headers': { + 'Content-Type': 'application/json', + 'X-Goog-Upload-Protocol': 'resumable', + 'X-Goog-Upload-Command': 'start', + 'X-Goog-Upload-Header-Content-Length': f'{file.size_bytes}', + 'X-Goog-Upload-Header-Content-Type': f'{file.mime_type}', + }, + 'response_payload': response, + } + self._create(file=file, config={'http_options': http_options}) + + if ( + 'headers' not in response + or 'X-Goog-Upload-URL' not in response['headers'] + ): + raise KeyError( + 'Failed to create file. Upload URL did not returned from the create' + ' file request.' + ) + upload_url = response['headers']['X-Goog-Upload-URL'] + + if isinstance(path, io.IOBase): + return_file = self._api_client.upload_file( + path, upload_url, file.size_bytes + ) + else: + return_file = self._api_client.upload_file( + fs_path, upload_url, file.size_bytes + ) + + return types.File._from_response( + _File_from_mldev(self._api_client, return_file['file']), None + ) + + def list( + self, *, config: Optional[types.ListFilesConfigOrDict] = None + ) -> Pager[types.File]: + return Pager( + 'files', + self._list, + self._list(config=config), + config, + ) + + def download( + self, + *, + file: Union[str, types.File], + config: Optional[types.DownloadFileConfigOrDict] = None, + ) -> bytes: + """Downloads a file's data from storage. + + Files created by `upload` can't be downloaded. You can tell which files are + downloadable by checking the `source` or `download_uri` property. + + Args: + file (str): A file name, uri, or file object. Identifying which file to + download. + config (DownloadFileConfigOrDict): Optional, configuration for the get + method. + + Returns: + File: The file data as bytes. + + Usage: + + .. code-block:: python + + for file client.files.list(): + if file.download_uri is not None: + break + else: + raise ValueError('No files found with a `download_uri`.') + data = client.files.download(file=file) + # data = client.files.download(file=file.name) + # data = client.files.download(file=file.download_uri) + """ + if self._api_client.vertexai: + raise ValueError( + 'Vertex AI does not support the Files API. Use GCS files instead.' + ) + + config_model = None + if config: + if isinstance(config, dict): + config_model = types.DownloadFileConfig(**config) + else: + config_model = config + + if isinstance(file, types.File) and file.download_uri is None: + raise ValueError( + "Only generated files can be downloaded, uploaded files can't be " + 'downloaded. You can tell which files are downloadable by checking ' + 'the `source` or `download_uri` property.' + ) + name = t.t_file_name(self, file) + + path = f'files/{name}:download' + + query_params = {'alt': 'media'} + path = f'{path}?{urlencode(query_params)}' + http_options = None + if getv(config_model, ['http_options']) is not None: + http_options = getv(config_model, ['http_options']) + + data = self._api_client.download_file( + path, + http_options, + ) + + return data + + +class AsyncFiles(_common.BaseModule): + + async def _list( + self, *, config: Optional[types.ListFilesConfigOrDict] = None + ) -> types.ListFilesResponse: + """Lists all files from the service. + + Args: + config (ListFilesConfig): Optional, configuration for the list method. + + Returns: + ListFilesResponse: The response for the list method. + + Usage: + + .. code-block:: python + + pager = client.files.list(config={'page_size': 10}) + for file in pager.page: + print(file.name) + """ + + parameter_model = types._ListFilesParameters( + config=config, + ) + + if self._api_client.vertexai: + raise ValueError('This method is only supported in the default client.') + else: + request_dict = _ListFilesParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'files'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ListFilesResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ListFilesResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ListFilesResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def _create( + self, + *, + file: types.FileOrDict, + config: Optional[types.CreateFileConfigOrDict] = None, + ) -> types.CreateFileResponse: + parameter_model = types._CreateFileParameters( + file=file, + config=config, + ) + + if self._api_client.vertexai: + raise ValueError('This method is only supported in the default client.') + else: + request_dict = _CreateFileParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'upload/v1beta/files'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _CreateFileResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _CreateFileResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.CreateFileResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def get( + self, *, name: str, config: Optional[types.GetFileConfigOrDict] = None + ) -> types.File: + """Retrieves the file information from the service. + + Args: + name (str): The name identifier for the file to retrieve. + config (GetFileConfig): Optional, configuration for the get method. + + Returns: + File: The file information. + + Usage: + + .. code-block:: python + + file = client.files.get(name='files/...') + print(file.uri) + """ + + parameter_model = types._GetFileParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + raise ValueError('This method is only supported in the default client.') + else: + request_dict = _GetFileParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'files/{file}'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _File_from_vertex(self._api_client, response_dict) + else: + response_dict = _File_from_mldev(self._api_client, response_dict) + + return_value = types.File._from_response(response_dict, parameter_model) + self._api_client._verify_response(return_value) + return return_value + + async def delete( + self, *, name: str, config: Optional[types.DeleteFileConfigOrDict] = None + ) -> types.DeleteFileResponse: + """Deletes an existing file from the service. + + Args: + name (str): The name identifier for the file to delete. + config (DeleteFileConfig): Optional, configuration for the delete method. + + Returns: + DeleteFileResponse: The response for the delete method + + Usage: + + .. code-block:: python + + client.files.delete(name='files/...') + """ + + parameter_model = types._DeleteFileParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + raise ValueError('This method is only supported in the default client.') + else: + request_dict = _DeleteFileParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'files/{file}'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'delete', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _DeleteFileResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _DeleteFileResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.DeleteFileResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def upload( + self, + *, + path: Union[str, pathlib.Path, os.PathLike, io.IOBase], + config: Optional[types.UploadFileConfigOrDict] = None, + ) -> types.File: + """Calls the API to upload a file asynchronously using a supported file service. + + Args: + path: The path to the file or an `IOBase` object to be uploaded. If it's + an IOBase object, it must be opened in blocking mode and binary mode. In + other words, do not use non-blocking mode or text mode. The given stream + must be seekable, that is, it must be able to call seek() on 'path'. + config: Optional parameters to set `diplay_name`, `mime_type`, and `name`. + """ + if self._api_client.vertexai: + raise ValueError( + 'Vertex AI does not support creating files. You can upload files to' + ' GCS files instead.' + ) + config_model = None + if config: + if isinstance(config, dict): + config_model = types.UploadFileConfig(**config) + else: + config_model = config + file = types.File( + mime_type=config_model.mime_type, + name=config_model.name, + display_name=config_model.display_name, + ) + else: # if not config + file = types.File() + if file.name is not None and not file.name.startswith('files/'): + file.name = f'files/{file.name}' + + if isinstance(path, io.IOBase): + if file.mime_type is None: + raise ValueError( + 'Unknown mime type: Could not determine the mimetype for your' + ' file\n please set the `mime_type` argument' + ) + if hasattr(path, 'mode'): + if 'b' not in path.mode: + raise ValueError('The file must be opened in binary mode.') + offset = path.tell() + path.seek(0, os.SEEK_END) + file.size_bytes = path.tell() - offset + path.seek(offset, os.SEEK_SET) + else: + fs_path = os.fspath(path) + if not fs_path or not os.path.isfile(fs_path): + raise FileNotFoundError(f'{path} is not a valid file path.') + file.size_bytes = os.path.getsize(fs_path) + if file.mime_type is None: + file.mime_type, _ = mimetypes.guess_type(fs_path) + if file.mime_type is None: + raise ValueError( + 'Unknown mime type: Could not determine the mimetype for your' + ' file\n please set the `mime_type` argument' + ) + + response = {} + if config_model and config_model.http_options: + http_options = config_model.http_options + else: + http_options = { + 'api_version': '', # api-version is set in the path. + 'headers': { + 'Content-Type': 'application/json', + 'X-Goog-Upload-Protocol': 'resumable', + 'X-Goog-Upload-Command': 'start', + 'X-Goog-Upload-Header-Content-Length': f'{file.size_bytes}', + 'X-Goog-Upload-Header-Content-Type': f'{file.mime_type}', + }, + 'response_payload': response, + } + await self._create(file=file, config={'http_options': http_options}) + if ( + 'headers' not in response + or 'X-Goog-Upload-URL' not in response['headers'] + ): + raise KeyError( + 'Failed to create file. Upload URL did not returned from the create' + ' file request.' + ) + upload_url = response['headers']['X-Goog-Upload-URL'] + + if isinstance(path, io.IOBase): + return_file = await self._api_client.async_upload_file( + path, upload_url, file.size_bytes + ) + else: + return_file = await self._api_client.async_upload_file( + fs_path, upload_url, file.size_bytes + ) + + return types.File._from_response( + _File_from_mldev(self._api_client, return_file['file']), None + ) + + async def list( + self, *, config: Optional[types.ListFilesConfigOrDict] = None + ) -> AsyncPager[types.File]: + return AsyncPager( + 'files', + self._list, + await self._list(config=config), + config, + ) + + async def download( + self, + *, + file: Union[str, types.File], + config: Optional[types.DownloadFileConfigOrDict] = None, + ) -> bytes: + """Downloads a file's data from the file service. + + The Vertex-AI implementation of the API foes not include the file service. + + Files created by `upload` can't be downloaded. You can tell which files are + downloadable by checking the `download_uri` property. + + Args: + File (str): A file name, uri, or file object. Identifying which file to + download. + config (DownloadFileConfigOrDict): Optional, configuration for the get + method. + + Returns: + File: The file data as bytes. + + Usage: + + .. code-block:: python + + for file client.files.list(): + if file.download_uri is not None: + break + else: + raise ValueError('No files found with a `download_uri`.') + data = client.files.download(file=file) + # data = client.files.download(file=file.name) + # data = client.files.download(file=file.uri) + """ + if self._api_client.vertexai: + raise ValueError( + 'Vertex AI does not support the Files API. Use GCS files instead.' + ) + + config_model = None + if config: + if isinstance(config, dict): + config_model = types.DownloadFileConfig(**config) + else: + config_model = config + + name = t.t_file_name(self, file) + + path = f'files/{name}:download' + + http_options = None + if getv(config_model, ['http_options']) is not None: + http_options = getv(config_model, ['http_options']) + + query_params = {'alt': 'media'} + if query_params: + path = f'{path}?{urlencode(query_params)}' + + data = await self._api_client.async_download_file( + path, + http_options, + ) + + return data diff --git a/.venv/lib/python3.12/site-packages/google/genai/live.py b/.venv/lib/python3.12/site-packages/google/genai/live.py new file mode 100644 index 00000000..586ba0ce --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/live.py @@ -0,0 +1,696 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Live client.""" + +import asyncio +import base64 +import contextlib +import json +import logging +from typing import AsyncIterator, Optional, Sequence, Union + +import google.auth +from websockets import ConnectionClosed + +from . import _common +from . import _transformers as t +from . import client +from . import types +from ._api_client import ApiClient +from ._common import get_value_by_path as getv +from ._common import set_value_by_path as setv +from .models import _Content_from_mldev +from .models import _Content_from_vertex +from .models import _Content_to_mldev +from .models import _Content_to_vertex +from .models import _GenerateContentConfig_to_mldev +from .models import _GenerateContentConfig_to_vertex +from .models import _SafetySetting_to_mldev +from .models import _SafetySetting_to_vertex +from .models import _SpeechConfig_to_mldev +from .models import _SpeechConfig_to_vertex +from .models import _Tool_to_mldev +from .models import _Tool_to_vertex + +try: + from websockets.asyncio.client import ClientConnection + from websockets.asyncio.client import connect +except ModuleNotFoundError: + from websockets.client import ClientConnection + from websockets.client import connect + + +_FUNCTION_RESPONSE_REQUIRES_ID = ( + 'FunctionResponse request must have an `id` field from the' + ' response of a ToolCall.FunctionalCalls in Google AI.' +) + + +class AsyncSession: + """AsyncSession.""" + + def __init__(self, api_client: client.ApiClient, websocket: ClientConnection): + self._api_client = api_client + self._ws = websocket + + async def send( + self, + *, + input: Union[ + types.ContentListUnion, + types.ContentListUnionDict, + types.LiveClientContentOrDict, + types.LiveClientRealtimeInputOrDict, + types.LiveClientRealtimeInputOrDict, + types.LiveClientToolResponseOrDict, + types.FunctionResponseOrDict, + Sequence[types.FunctionResponseOrDict], + ], + end_of_turn: Optional[bool] = False, + ): + """Send input to the model. + + The method will send the input request to the server. + + Args: + input: The input request to the model. + end_of_turn: Whether the input is the last message in a turn. + + Example usage: + + .. code-block:: python + + client = genai.Client(api_key=API_KEY) + + async with client.aio.live.connect(model='...') as session: + await session.send(input='Hello world!', end_of_turn=True) + async for message in session.receive(): + print(message) + """ + client_message = self._parse_client_message(input, end_of_turn) + await self._ws.send(json.dumps(client_message)) + + async def receive(self) -> AsyncIterator[types.LiveServerMessage]: + """Receive model responses from the server. + + The method will yield the model responses from the server. The returned + responses will represent a complete model turn. When the returned message + is function call, user must call `send` with the function response to + continue the turn. + + Yields: + The model responses from the server. + + Example usage: + + .. code-block:: python + + client = genai.Client(api_key=API_KEY) + + async with client.aio.live.connect(model='...') as session: + await session.send(input='Hello world!', end_of_turn=True) + async for message in session.receive(): + print(message) + """ + # TODO(b/365983264) Handle intermittent issues for the user. + while result := await self._receive(): + if result.server_content and result.server_content.turn_complete: + yield result + break + yield result + + async def start_stream( + self, *, stream: AsyncIterator[bytes], mime_type: str + ) -> AsyncIterator[types.LiveServerMessage]: + """start a live session from a data stream. + + The interaction terminates when the input stream is complete. + This method will start two async tasks. One task will be used to send the + input stream to the model and the other task will be used to receive the + responses from the model. + + Args: + stream: An iterator that yields the model response. + mime_type: The MIME type of the data in the stream. + + Yields: + The audio bytes received from the model and server response messages. + + Example usage: + + .. code-block:: python + + client = genai.Client(api_key=API_KEY) + config = {'response_modalities': ['AUDIO']} + async def audio_stream(): + stream = read_audio() + for data in stream: + yield data + async with client.aio.live.connect(model='...') as session: + for audio in session.start_stream(stream = audio_stream(), + mime_type = 'audio/pcm'): + play_audio_chunk(audio.data) + """ + stop_event = asyncio.Event() + # Start the send loop. When stream is complete stop_event is set. + asyncio.create_task(self._send_loop(stream, mime_type, stop_event)) + recv_task = None + while not stop_event.is_set(): + try: + recv_task = asyncio.create_task(self._receive()) + await asyncio.wait( + [ + recv_task, + asyncio.create_task(stop_event.wait()), + ], + return_when=asyncio.FIRST_COMPLETED, + ) + if recv_task.done(): + yield recv_task.result() + # Give a chance for the send loop to process requests. + await asyncio.sleep(10**-12) + except ConnectionClosed: + break + if recv_task is not None and not recv_task.done(): + recv_task.cancel() + # Wait for the task to finish (cancelled or not) + try: + await recv_task + except asyncio.CancelledError: + pass + + async def _receive(self) -> types.LiveServerMessage: + parameter_model = types.LiveServerMessage() + raw_response = await self._ws.recv(decode=False) + if raw_response: + try: + response = json.loads(raw_response) + except json.decoder.JSONDecodeError: + raise ValueError(f'Failed to parse response: {raw_response}') + else: + response = {} + if self._api_client.vertexai: + response_dict = self._LiveServerMessage_from_vertex(response) + else: + response_dict = self._LiveServerMessage_from_mldev(response) + + return types.LiveServerMessage._from_response( + response_dict, parameter_model + ) + + async def _send_loop( + self, + data_stream: AsyncIterator[bytes], + mime_type: str, + stop_event: asyncio.Event, + ): + async for data in data_stream: + input = {'data': data, 'mimeType': mime_type} + await self.send(input=input) + # Give a chance for the receive loop to process responses. + await asyncio.sleep(10**-12) + # Give a chance for the receiver to process the last response. + stop_event.set() + + def _LiveServerContent_from_mldev( + self, + from_object: Union[dict, object], + ) -> dict: + to_object = {} + if getv(from_object, ['modelTurn']) is not None: + setv( + to_object, + ['model_turn'], + _Content_from_mldev( + self._api_client, + getv(from_object, ['modelTurn']), + ), + ) + if getv(from_object, ['turnComplete']) is not None: + setv(to_object, ['turn_complete'], getv(from_object, ['turnComplete'])) + if getv(from_object, ['interrupted']) is not None: + setv(to_object, ['interrupted'], getv(from_object, ['interrupted'])) + return to_object + + def _LiveToolCall_from_mldev( + self, + from_object: Union[dict, object], + ) -> dict: + to_object = {} + if getv(from_object, ['functionCalls']) is not None: + setv( + to_object, + ['function_calls'], + getv(from_object, ['functionCalls']), + ) + return to_object + + def _LiveToolCall_from_vertex( + self, + from_object: Union[dict, object], + ) -> dict: + to_object = {} + if getv(from_object, ['functionCalls']) is not None: + setv( + to_object, + ['function_calls'], + getv(from_object, ['functionCalls']), + ) + return to_object + + def _LiveServerMessage_from_mldev( + self, + from_object: Union[dict, object], + ) -> dict: + to_object = {} + if getv(from_object, ['serverContent']) is not None: + setv( + to_object, + ['server_content'], + self._LiveServerContent_from_mldev( + getv(from_object, ['serverContent']) + ), + ) + if getv(from_object, ['toolCall']) is not None: + setv( + to_object, + ['tool_call'], + self._LiveToolCall_from_mldev(getv(from_object, ['toolCall'])), + ) + if getv(from_object, ['toolCallCancellation']) is not None: + setv( + to_object, + ['tool_call_cancellation'], + getv(from_object, ['toolCallCancellation']), + ) + return to_object + + def _LiveServerContent_from_vertex( + self, + from_object: Union[dict, object], + ) -> dict: + to_object = {} + if getv(from_object, ['modelTurn']) is not None: + setv( + to_object, + ['model_turn'], + _Content_from_vertex( + self._api_client, + getv(from_object, ['modelTurn']), + ), + ) + if getv(from_object, ['turnComplete']) is not None: + setv(to_object, ['turn_complete'], getv(from_object, ['turnComplete'])) + if getv(from_object, ['interrupted']) is not None: + setv(to_object, ['interrupted'], getv(from_object, ['interrupted'])) + return to_object + + def _LiveServerMessage_from_vertex( + self, + from_object: Union[dict, object], + ) -> dict: + to_object = {} + if getv(from_object, ['serverContent']) is not None: + setv( + to_object, + ['server_content'], + self._LiveServerContent_from_vertex( + getv(from_object, ['serverContent']) + ), + ) + + if getv(from_object, ['toolCall']) is not None: + setv( + to_object, + ['tool_call'], + self._LiveToolCall_from_vertex(getv(from_object, ['toolCall'])), + ) + if getv(from_object, ['toolCallCancellation']) is not None: + setv( + to_object, + ['tool_call_cancellation'], + getv(from_object, ['toolCallCancellation']), + ) + return to_object + + def _parse_client_message( + self, + input: Union[ + types.ContentListUnion, + types.ContentListUnionDict, + types.LiveClientContentOrDict, + types.LiveClientRealtimeInputOrDict, + types.LiveClientRealtimeInputOrDict, + types.LiveClientToolResponseOrDict, + types.FunctionResponseOrDict, + Sequence[types.FunctionResponseOrDict], + ], + end_of_turn: Optional[bool] = False, + ) -> dict: + if isinstance(input, str): + input = [input] + elif isinstance(input, dict) and 'data' in input: + if isinstance(input['data'], bytes): + decoded_data = base64.b64encode(input['data']).decode('utf-8') + input['data'] = decoded_data + input = [input] + elif isinstance(input, types.Blob): + input.data = base64.b64encode(input.data).decode('utf-8') + input = [input] + elif isinstance(input, dict) and 'name' in input and 'response' in input: + # ToolResponse.FunctionResponse + if not (self._api_client.vertexai) and 'id' not in input: + raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID) + input = [input] + + if isinstance(input, Sequence) and any( + isinstance(c, dict) and 'name' in c and 'response' in c for c in input + ): + # ToolResponse.FunctionResponse + if not (self._api_client.vertexai): + for item in input: + if 'id' not in item: + raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID) + client_message = {'tool_response': {'function_responses': input}} + elif isinstance(input, Sequence) and any(isinstance(c, str) for c in input): + to_object = {} + if self._api_client.vertexai: + contents = [ + _Content_to_vertex(self._api_client, item, to_object) + for item in t.t_contents(self._api_client, input) + ] + else: + contents = [ + _Content_to_mldev(self._api_client, item, to_object) + for item in t.t_contents(self._api_client, input) + ] + + client_message = { + 'client_content': {'turns': contents, 'turn_complete': end_of_turn} + } + elif isinstance(input, Sequence): + if any((isinstance(b, dict) and 'data' in b) for b in input): + pass + elif any(isinstance(b, types.Blob) for b in input): + input = [b.model_dump(exclude_none=True) for b in input] + else: + raise ValueError( + f'Unsupported input type "{type(input)}" or input content "{input}"' + ) + + client_message = {'realtime_input': {'media_chunks': input}} + + elif isinstance(input, dict) and 'content' in input: + # TODO(b/365983264) Add validation checks for content_update input_dict. + client_message = {'client_content': input} + elif isinstance(input, types.LiveClientRealtimeInput): + client_message = {'realtime_input': input.model_dump(exclude_none=True)} + if isinstance( + client_message['realtime_input']['media_chunks'][0]['data'], bytes + ): + client_message['realtime_input']['media_chunks'] = [ + { + 'data': base64.b64encode(item['data']).decode('utf-8'), + 'mime_type': item['mime_type'], + } + for item in client_message['realtime_input']['media_chunks'] + ] + + elif isinstance(input, types.LiveClientContent): + client_message = {'client_content': input.model_dump(exclude_none=True)} + elif isinstance(input, types.LiveClientToolResponse): + # ToolResponse.FunctionResponse + if not (self._api_client.vertexai) and not ( + input.function_responses[0].id + ): + raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID) + client_message = {'tool_response': input.model_dump(exclude_none=True)} + elif isinstance(input, types.FunctionResponse): + if not (self._api_client.vertexai) and not (input.id): + raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID) + client_message = { + 'tool_response': { + 'function_responses': [input.model_dump(exclude_none=True)] + } + } + elif isinstance(input, Sequence) and isinstance( + input[0], types.FunctionResponse + ): + if not (self._api_client.vertexai) and not (input[0].id): + raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID) + client_message = { + 'tool_response': { + 'function_responses': [ + c.model_dump(exclude_none=True) for c in input + ] + } + } + else: + raise ValueError( + f'Unsupported input type "{type(input)}" or input content "{input}"' + ) + + return client_message + + async def close(self): + # Close the websocket connection. + await self._ws.close() + + +class AsyncLive(_common.BaseModule): + """AsyncLive.""" + + def _LiveSetup_to_mldev( + self, model: str, config: Optional[types.LiveConnectConfigOrDict] = None + ): + if isinstance(config, types.LiveConnectConfig): + from_object = config.model_dump(exclude_none=True) + else: + from_object = config + + to_object = {} + if getv(from_object, ['generation_config']) is not None: + setv( + to_object, + ['generationConfig'], + _GenerateContentConfig_to_mldev( + self._api_client, + getv(from_object, ['generation_config']), + to_object, + ), + ) + if getv(from_object, ['response_modalities']) is not None: + if getv(to_object, ['generationConfig']) is not None: + to_object['generationConfig']['responseModalities'] = from_object[ + 'response_modalities' + ] + else: + to_object['generationConfig'] = { + 'responseModalities': from_object['response_modalities'] + } + if getv(from_object, ['speech_config']) is not None: + if getv(to_object, ['generationConfig']) is not None: + to_object['generationConfig']['speechConfig'] = _SpeechConfig_to_mldev( + self._api_client, + t.t_speech_config( + self._api_client, getv(from_object, ['speech_config']) + ), + to_object, + ) + else: + to_object['generationConfig'] = { + 'speechConfig': _SpeechConfig_to_mldev( + self._api_client, + t.t_speech_config( + self._api_client, getv(from_object, ['speech_config']) + ), + to_object, + ) + } + + if getv(from_object, ['system_instruction']) is not None: + setv( + to_object, + ['systemInstruction'], + _Content_to_mldev( + self._api_client, + t.t_content( + self._api_client, getv(from_object, ['system_instruction']) + ), + to_object, + ), + ) + if getv(from_object, ['tools']) is not None: + setv( + to_object, + ['tools'], + [ + _Tool_to_mldev(self._api_client, item, to_object) + for item in getv(from_object, ['tools']) + ], + ) + + return_value = {'setup': {'model': model}} + return_value['setup'].update(to_object) + return return_value + + def _LiveSetup_to_vertex( + self, model: str, config: Optional[types.LiveConnectConfigOrDict] = None + ): + if isinstance(config, types.LiveConnectConfig): + from_object = config.model_dump(exclude_none=True) + else: + from_object = config + + to_object = {} + + if getv(from_object, ['generation_config']) is not None: + setv( + to_object, + ['generationConfig'], + _GenerateContentConfig_to_vertex( + self._api_client, + getv(from_object, ['generation_config']), + to_object, + ), + ) + if getv(from_object, ['response_modalities']) is not None: + if getv(to_object, ['generationConfig']) is not None: + to_object['generationConfig']['responseModalities'] = from_object[ + 'response_modalities' + ] + else: + to_object['generationConfig'] = { + 'responseModalities': from_object['response_modalities'] + } + else: + # Set default to AUDIO to align with MLDev API. + if getv(to_object, ['generationConfig']) is not None: + to_object['generationConfig'].update({'responseModalities': ['AUDIO']}) + else: + to_object.update( + {'generationConfig': {'responseModalities': ['AUDIO']}} + ) + if getv(from_object, ['speech_config']) is not None: + if getv(to_object, ['generationConfig']) is not None: + to_object['generationConfig']['speechConfig'] = _SpeechConfig_to_vertex( + self._api_client, + t.t_speech_config( + self._api_client, getv(from_object, ['speech_config']) + ), + to_object, + ) + else: + to_object['generationConfig'] = { + 'speechConfig': _SpeechConfig_to_vertex( + self._api_client, + t.t_speech_config( + self._api_client, getv(from_object, ['speech_config']) + ), + to_object, + ) + } + if getv(from_object, ['system_instruction']) is not None: + setv( + to_object, + ['systemInstruction'], + _Content_to_vertex( + self._api_client, + t.t_content( + self._api_client, getv(from_object, ['system_instruction']) + ), + to_object, + ), + ) + if getv(from_object, ['tools']) is not None: + setv( + to_object, + ['tools'], + [ + _Tool_to_vertex(self._api_client, item, to_object) + for item in getv(from_object, ['tools']) + ], + ) + + return_value = {'setup': {'model': model}} + return_value['setup'].update(to_object) + return return_value + + @contextlib.asynccontextmanager + async def connect( + self, + *, + model: str, + config: Optional[types.LiveConnectConfigOrDict] = None, + ) -> AsyncSession: + """Connect to the live server. + + Usage: + + .. code-block:: python + + client = genai.Client(api_key=API_KEY) + config = {} + async with client.aio.live.connect(model='...', config=config) as session: + await session.send(input='Hello world!', end_of_turn=True) + async for message in session.receive(): + print(message) + """ + base_url = self._api_client._websocket_base_url() + if self._api_client.api_key: + api_key = self._api_client.api_key + version = self._api_client._http_options['api_version'] + uri = f'{base_url}/ws/google.ai.generativelanguage.{version}.GenerativeService.BidiGenerateContent?key={api_key}' + headers = self._api_client._http_options['headers'] + + transformed_model = t.t_model(self._api_client, model) + request = json.dumps( + self._LiveSetup_to_mldev(model=transformed_model, config=config) + ) + else: + # Get bearer token through Application Default Credentials. + creds, _ = google.auth.default( + scopes=['https://www.googleapis.com/auth/cloud-platform'] + ) + + # creds.valid is False, and creds.token is None + # Need to refresh credentials to populate those + auth_req = google.auth.transport.requests.Request() + creds.refresh(auth_req) + bearer_token = creds.token + headers = { + 'Content-Type': 'application/json', + 'Authorization': 'Bearer {}'.format(bearer_token), + } + version = self._api_client._http_options['api_version'] + uri = f'{base_url}/ws/google.cloud.aiplatform.{version}.LlmBidiService/BidiGenerateContent' + location = self._api_client.location + project = self._api_client.project + transformed_model = t.t_model(self._api_client, model) + if transformed_model.startswith('publishers/'): + transformed_model = ( + f'projects/{project}/locations/{location}/' + transformed_model + ) + + request = json.dumps( + self._LiveSetup_to_vertex(model=transformed_model, config=config) + ) + + async with connect(uri, additional_headers=headers) as ws: + await ws.send(request) + logging.info(await ws.recv(decode=False)) + + yield AsyncSession(api_client=self._api_client, websocket=ws) diff --git a/.venv/lib/python3.12/site-packages/google/genai/models.py b/.venv/lib/python3.12/site-packages/google/genai/models.py new file mode 100644 index 00000000..b23428b6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/models.py @@ -0,0 +1,5567 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Code generated by the Google Gen AI SDK generator DO NOT EDIT. + +import logging +from typing import AsyncIterator, Iterator, Optional, Union +from urllib.parse import urlencode +from . import _common +from . import _extra_utils +from . import _transformers as t +from . import types +from ._api_client import ApiClient, HttpOptionsDict +from ._common import get_value_by_path as getv +from ._common import set_value_by_path as setv +from .pagers import AsyncPager, Pager + + +def _Part_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['video_metadata']) is not None: + raise ValueError('video_metadata parameter is not supported in Google AI.') + + if getv(from_object, ['thought']) is not None: + setv(to_object, ['thought'], getv(from_object, ['thought'])) + + if getv(from_object, ['code_execution_result']) is not None: + setv( + to_object, + ['codeExecutionResult'], + getv(from_object, ['code_execution_result']), + ) + + if getv(from_object, ['executable_code']) is not None: + setv(to_object, ['executableCode'], getv(from_object, ['executable_code'])) + + if getv(from_object, ['file_data']) is not None: + setv(to_object, ['fileData'], getv(from_object, ['file_data'])) + + if getv(from_object, ['function_call']) is not None: + setv(to_object, ['functionCall'], getv(from_object, ['function_call'])) + + if getv(from_object, ['function_response']) is not None: + setv( + to_object, + ['functionResponse'], + getv(from_object, ['function_response']), + ) + + if getv(from_object, ['inline_data']) is not None: + setv(to_object, ['inlineData'], getv(from_object, ['inline_data'])) + + if getv(from_object, ['text']) is not None: + setv(to_object, ['text'], getv(from_object, ['text'])) + + return to_object + + +def _Part_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['video_metadata']) is not None: + setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + + if getv(from_object, ['thought']) is not None: + setv(to_object, ['thought'], getv(from_object, ['thought'])) + + if getv(from_object, ['code_execution_result']) is not None: + setv( + to_object, + ['codeExecutionResult'], + getv(from_object, ['code_execution_result']), + ) + + if getv(from_object, ['executable_code']) is not None: + setv(to_object, ['executableCode'], getv(from_object, ['executable_code'])) + + if getv(from_object, ['file_data']) is not None: + setv(to_object, ['fileData'], getv(from_object, ['file_data'])) + + if getv(from_object, ['function_call']) is not None: + setv(to_object, ['functionCall'], getv(from_object, ['function_call'])) + + if getv(from_object, ['function_response']) is not None: + setv( + to_object, + ['functionResponse'], + getv(from_object, ['function_response']), + ) + + if getv(from_object, ['inline_data']) is not None: + setv(to_object, ['inlineData'], getv(from_object, ['inline_data'])) + + if getv(from_object, ['text']) is not None: + setv(to_object, ['text'], getv(from_object, ['text'])) + + return to_object + + +def _Content_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['parts']) is not None: + setv( + to_object, + ['parts'], + [ + _Part_to_mldev(api_client, item, to_object) + for item in getv(from_object, ['parts']) + ], + ) + + if getv(from_object, ['role']) is not None: + setv(to_object, ['role'], getv(from_object, ['role'])) + + return to_object + + +def _Content_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['parts']) is not None: + setv( + to_object, + ['parts'], + [ + _Part_to_vertex(api_client, item, to_object) + for item in getv(from_object, ['parts']) + ], + ) + + if getv(from_object, ['role']) is not None: + setv(to_object, ['role'], getv(from_object, ['role'])) + + return to_object + + +def _Schema_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['min_items']) is not None: + raise ValueError('min_items parameter is not supported in Google AI.') + + if getv(from_object, ['example']) is not None: + raise ValueError('example parameter is not supported in Google AI.') + + if getv(from_object, ['property_ordering']) is not None: + raise ValueError( + 'property_ordering parameter is not supported in Google AI.' + ) + + if getv(from_object, ['pattern']) is not None: + raise ValueError('pattern parameter is not supported in Google AI.') + + if getv(from_object, ['minimum']) is not None: + raise ValueError('minimum parameter is not supported in Google AI.') + + if getv(from_object, ['default']) is not None: + raise ValueError('default parameter is not supported in Google AI.') + + if getv(from_object, ['any_of']) is not None: + raise ValueError('any_of parameter is not supported in Google AI.') + + if getv(from_object, ['max_length']) is not None: + raise ValueError('max_length parameter is not supported in Google AI.') + + if getv(from_object, ['title']) is not None: + raise ValueError('title parameter is not supported in Google AI.') + + if getv(from_object, ['min_length']) is not None: + raise ValueError('min_length parameter is not supported in Google AI.') + + if getv(from_object, ['min_properties']) is not None: + raise ValueError('min_properties parameter is not supported in Google AI.') + + if getv(from_object, ['max_items']) is not None: + raise ValueError('max_items parameter is not supported in Google AI.') + + if getv(from_object, ['maximum']) is not None: + raise ValueError('maximum parameter is not supported in Google AI.') + + if getv(from_object, ['nullable']) is not None: + raise ValueError('nullable parameter is not supported in Google AI.') + + if getv(from_object, ['max_properties']) is not None: + raise ValueError('max_properties parameter is not supported in Google AI.') + + if getv(from_object, ['type']) is not None: + setv(to_object, ['type'], getv(from_object, ['type'])) + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['enum']) is not None: + setv(to_object, ['enum'], getv(from_object, ['enum'])) + + if getv(from_object, ['format']) is not None: + setv(to_object, ['format'], getv(from_object, ['format'])) + + if getv(from_object, ['items']) is not None: + setv(to_object, ['items'], getv(from_object, ['items'])) + + if getv(from_object, ['properties']) is not None: + setv(to_object, ['properties'], getv(from_object, ['properties'])) + + if getv(from_object, ['required']) is not None: + setv(to_object, ['required'], getv(from_object, ['required'])) + + return to_object + + +def _Schema_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['min_items']) is not None: + setv(to_object, ['minItems'], getv(from_object, ['min_items'])) + + if getv(from_object, ['example']) is not None: + setv(to_object, ['example'], getv(from_object, ['example'])) + + if getv(from_object, ['property_ordering']) is not None: + setv( + to_object, + ['propertyOrdering'], + getv(from_object, ['property_ordering']), + ) + + if getv(from_object, ['pattern']) is not None: + setv(to_object, ['pattern'], getv(from_object, ['pattern'])) + + if getv(from_object, ['minimum']) is not None: + setv(to_object, ['minimum'], getv(from_object, ['minimum'])) + + if getv(from_object, ['default']) is not None: + setv(to_object, ['default'], getv(from_object, ['default'])) + + if getv(from_object, ['any_of']) is not None: + setv(to_object, ['anyOf'], getv(from_object, ['any_of'])) + + if getv(from_object, ['max_length']) is not None: + setv(to_object, ['maxLength'], getv(from_object, ['max_length'])) + + if getv(from_object, ['title']) is not None: + setv(to_object, ['title'], getv(from_object, ['title'])) + + if getv(from_object, ['min_length']) is not None: + setv(to_object, ['minLength'], getv(from_object, ['min_length'])) + + if getv(from_object, ['min_properties']) is not None: + setv(to_object, ['minProperties'], getv(from_object, ['min_properties'])) + + if getv(from_object, ['max_items']) is not None: + setv(to_object, ['maxItems'], getv(from_object, ['max_items'])) + + if getv(from_object, ['maximum']) is not None: + setv(to_object, ['maximum'], getv(from_object, ['maximum'])) + + if getv(from_object, ['nullable']) is not None: + setv(to_object, ['nullable'], getv(from_object, ['nullable'])) + + if getv(from_object, ['max_properties']) is not None: + setv(to_object, ['maxProperties'], getv(from_object, ['max_properties'])) + + if getv(from_object, ['type']) is not None: + setv(to_object, ['type'], getv(from_object, ['type'])) + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['enum']) is not None: + setv(to_object, ['enum'], getv(from_object, ['enum'])) + + if getv(from_object, ['format']) is not None: + setv(to_object, ['format'], getv(from_object, ['format'])) + + if getv(from_object, ['items']) is not None: + setv(to_object, ['items'], getv(from_object, ['items'])) + + if getv(from_object, ['properties']) is not None: + setv(to_object, ['properties'], getv(from_object, ['properties'])) + + if getv(from_object, ['required']) is not None: + setv(to_object, ['required'], getv(from_object, ['required'])) + + return to_object + + +def _SafetySetting_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['method']) is not None: + raise ValueError('method parameter is not supported in Google AI.') + + if getv(from_object, ['category']) is not None: + setv(to_object, ['category'], getv(from_object, ['category'])) + + if getv(from_object, ['threshold']) is not None: + setv(to_object, ['threshold'], getv(from_object, ['threshold'])) + + return to_object + + +def _SafetySetting_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['method']) is not None: + setv(to_object, ['method'], getv(from_object, ['method'])) + + if getv(from_object, ['category']) is not None: + setv(to_object, ['category'], getv(from_object, ['category'])) + + if getv(from_object, ['threshold']) is not None: + setv(to_object, ['threshold'], getv(from_object, ['threshold'])) + + return to_object + + +def _FunctionDeclaration_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['response']) is not None: + raise ValueError('response parameter is not supported in Google AI.') + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['parameters']) is not None: + setv(to_object, ['parameters'], getv(from_object, ['parameters'])) + + return to_object + + +def _FunctionDeclaration_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['response']) is not None: + setv( + to_object, + ['response'], + _Schema_to_vertex( + api_client, getv(from_object, ['response']), to_object + ), + ) + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['parameters']) is not None: + setv(to_object, ['parameters'], getv(from_object, ['parameters'])) + + return to_object + + +def _GoogleSearch_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _GoogleSearch_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _DynamicRetrievalConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['mode']) is not None: + setv(to_object, ['mode'], getv(from_object, ['mode'])) + + if getv(from_object, ['dynamic_threshold']) is not None: + setv( + to_object, + ['dynamicThreshold'], + getv(from_object, ['dynamic_threshold']), + ) + + return to_object + + +def _DynamicRetrievalConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['mode']) is not None: + setv(to_object, ['mode'], getv(from_object, ['mode'])) + + if getv(from_object, ['dynamic_threshold']) is not None: + setv( + to_object, + ['dynamicThreshold'], + getv(from_object, ['dynamic_threshold']), + ) + + return to_object + + +def _GoogleSearchRetrieval_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['dynamic_retrieval_config']) is not None: + setv( + to_object, + ['dynamicRetrievalConfig'], + _DynamicRetrievalConfig_to_mldev( + api_client, + getv(from_object, ['dynamic_retrieval_config']), + to_object, + ), + ) + + return to_object + + +def _GoogleSearchRetrieval_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['dynamic_retrieval_config']) is not None: + setv( + to_object, + ['dynamicRetrievalConfig'], + _DynamicRetrievalConfig_to_vertex( + api_client, + getv(from_object, ['dynamic_retrieval_config']), + to_object, + ), + ) + + return to_object + + +def _Tool_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['function_declarations']) is not None: + setv( + to_object, + ['functionDeclarations'], + [ + _FunctionDeclaration_to_mldev(api_client, item, to_object) + for item in getv(from_object, ['function_declarations']) + ], + ) + + if getv(from_object, ['retrieval']) is not None: + raise ValueError('retrieval parameter is not supported in Google AI.') + + if getv(from_object, ['google_search']) is not None: + setv( + to_object, + ['googleSearch'], + _GoogleSearch_to_mldev( + api_client, getv(from_object, ['google_search']), to_object + ), + ) + + if getv(from_object, ['google_search_retrieval']) is not None: + setv( + to_object, + ['googleSearchRetrieval'], + _GoogleSearchRetrieval_to_mldev( + api_client, + getv(from_object, ['google_search_retrieval']), + to_object, + ), + ) + + if getv(from_object, ['code_execution']) is not None: + setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) + + return to_object + + +def _Tool_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['function_declarations']) is not None: + setv( + to_object, + ['functionDeclarations'], + [ + _FunctionDeclaration_to_vertex(api_client, item, to_object) + for item in getv(from_object, ['function_declarations']) + ], + ) + + if getv(from_object, ['retrieval']) is not None: + setv(to_object, ['retrieval'], getv(from_object, ['retrieval'])) + + if getv(from_object, ['google_search']) is not None: + setv( + to_object, + ['googleSearch'], + _GoogleSearch_to_vertex( + api_client, getv(from_object, ['google_search']), to_object + ), + ) + + if getv(from_object, ['google_search_retrieval']) is not None: + setv( + to_object, + ['googleSearchRetrieval'], + _GoogleSearchRetrieval_to_vertex( + api_client, + getv(from_object, ['google_search_retrieval']), + to_object, + ), + ) + + if getv(from_object, ['code_execution']) is not None: + setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) + + return to_object + + +def _FunctionCallingConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['mode']) is not None: + setv(to_object, ['mode'], getv(from_object, ['mode'])) + + if getv(from_object, ['allowed_function_names']) is not None: + setv( + to_object, + ['allowedFunctionNames'], + getv(from_object, ['allowed_function_names']), + ) + + return to_object + + +def _FunctionCallingConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['mode']) is not None: + setv(to_object, ['mode'], getv(from_object, ['mode'])) + + if getv(from_object, ['allowed_function_names']) is not None: + setv( + to_object, + ['allowedFunctionNames'], + getv(from_object, ['allowed_function_names']), + ) + + return to_object + + +def _ToolConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['function_calling_config']) is not None: + setv( + to_object, + ['functionCallingConfig'], + _FunctionCallingConfig_to_mldev( + api_client, + getv(from_object, ['function_calling_config']), + to_object, + ), + ) + + return to_object + + +def _ToolConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['function_calling_config']) is not None: + setv( + to_object, + ['functionCallingConfig'], + _FunctionCallingConfig_to_vertex( + api_client, + getv(from_object, ['function_calling_config']), + to_object, + ), + ) + + return to_object + + +def _PrebuiltVoiceConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['voice_name']) is not None: + setv(to_object, ['voiceName'], getv(from_object, ['voice_name'])) + + return to_object + + +def _PrebuiltVoiceConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['voice_name']) is not None: + setv(to_object, ['voiceName'], getv(from_object, ['voice_name'])) + + return to_object + + +def _VoiceConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['prebuilt_voice_config']) is not None: + setv( + to_object, + ['prebuiltVoiceConfig'], + _PrebuiltVoiceConfig_to_mldev( + api_client, getv(from_object, ['prebuilt_voice_config']), to_object + ), + ) + + return to_object + + +def _VoiceConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['prebuilt_voice_config']) is not None: + setv( + to_object, + ['prebuiltVoiceConfig'], + _PrebuiltVoiceConfig_to_vertex( + api_client, getv(from_object, ['prebuilt_voice_config']), to_object + ), + ) + + return to_object + + +def _SpeechConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['voice_config']) is not None: + setv( + to_object, + ['voiceConfig'], + _VoiceConfig_to_mldev( + api_client, getv(from_object, ['voice_config']), to_object + ), + ) + + return to_object + + +def _SpeechConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['voice_config']) is not None: + setv( + to_object, + ['voiceConfig'], + _VoiceConfig_to_vertex( + api_client, getv(from_object, ['voice_config']), to_object + ), + ) + + return to_object + + +def _ThinkingConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['include_thoughts']) is not None: + setv( + to_object, ['includeThoughts'], getv(from_object, ['include_thoughts']) + ) + + return to_object + + +def _ThinkingConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['include_thoughts']) is not None: + setv( + to_object, ['includeThoughts'], getv(from_object, ['include_thoughts']) + ) + + return to_object + + +def _GenerateContentConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['system_instruction']) is not None: + setv( + parent_object, + ['systemInstruction'], + _Content_to_mldev( + api_client, + t.t_content(api_client, getv(from_object, ['system_instruction'])), + to_object, + ), + ) + + if getv(from_object, ['temperature']) is not None: + setv(to_object, ['temperature'], getv(from_object, ['temperature'])) + + if getv(from_object, ['top_p']) is not None: + setv(to_object, ['topP'], getv(from_object, ['top_p'])) + + if getv(from_object, ['top_k']) is not None: + setv(to_object, ['topK'], getv(from_object, ['top_k'])) + + if getv(from_object, ['candidate_count']) is not None: + setv(to_object, ['candidateCount'], getv(from_object, ['candidate_count'])) + + if getv(from_object, ['max_output_tokens']) is not None: + setv( + to_object, ['maxOutputTokens'], getv(from_object, ['max_output_tokens']) + ) + + if getv(from_object, ['stop_sequences']) is not None: + setv(to_object, ['stopSequences'], getv(from_object, ['stop_sequences'])) + + if getv(from_object, ['response_logprobs']) is not None: + setv( + to_object, + ['responseLogprobs'], + getv(from_object, ['response_logprobs']), + ) + + if getv(from_object, ['logprobs']) is not None: + setv(to_object, ['logprobs'], getv(from_object, ['logprobs'])) + + if getv(from_object, ['presence_penalty']) is not None: + setv( + to_object, ['presencePenalty'], getv(from_object, ['presence_penalty']) + ) + + if getv(from_object, ['frequency_penalty']) is not None: + setv( + to_object, + ['frequencyPenalty'], + getv(from_object, ['frequency_penalty']), + ) + + if getv(from_object, ['seed']) is not None: + setv(to_object, ['seed'], getv(from_object, ['seed'])) + + if getv(from_object, ['response_mime_type']) is not None: + setv( + to_object, + ['responseMimeType'], + getv(from_object, ['response_mime_type']), + ) + + if getv(from_object, ['response_schema']) is not None: + setv( + to_object, + ['responseSchema'], + _Schema_to_mldev( + api_client, + t.t_schema(api_client, getv(from_object, ['response_schema'])), + to_object, + ), + ) + + if getv(from_object, ['routing_config']) is not None: + raise ValueError('routing_config parameter is not supported in Google AI.') + + if getv(from_object, ['safety_settings']) is not None: + setv( + parent_object, + ['safetySettings'], + [ + _SafetySetting_to_mldev(api_client, item, to_object) + for item in getv(from_object, ['safety_settings']) + ], + ) + + if getv(from_object, ['tools']) is not None: + setv( + parent_object, + ['tools'], + [ + _Tool_to_mldev(api_client, t.t_tool(api_client, item), to_object) + for item in t.t_tools(api_client, getv(from_object, ['tools'])) + ], + ) + + if getv(from_object, ['tool_config']) is not None: + setv( + parent_object, + ['toolConfig'], + _ToolConfig_to_mldev( + api_client, getv(from_object, ['tool_config']), to_object + ), + ) + + if getv(from_object, ['cached_content']) is not None: + setv( + parent_object, + ['cachedContent'], + t.t_cached_content_name( + api_client, getv(from_object, ['cached_content']) + ), + ) + + if getv(from_object, ['response_modalities']) is not None: + setv( + to_object, + ['responseModalities'], + getv(from_object, ['response_modalities']), + ) + + if getv(from_object, ['media_resolution']) is not None: + raise ValueError( + 'media_resolution parameter is not supported in Google AI.' + ) + + if getv(from_object, ['speech_config']) is not None: + setv( + to_object, + ['speechConfig'], + _SpeechConfig_to_mldev( + api_client, + t.t_speech_config(api_client, getv(from_object, ['speech_config'])), + to_object, + ), + ) + + if getv(from_object, ['audio_timestamp']) is not None: + raise ValueError('audio_timestamp parameter is not supported in Google AI.') + + if getv(from_object, ['thinking_config']) is not None: + setv( + to_object, + ['thinkingConfig'], + _ThinkingConfig_to_mldev( + api_client, getv(from_object, ['thinking_config']), to_object + ), + ) + + return to_object + + +def _GenerateContentConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['system_instruction']) is not None: + setv( + parent_object, + ['systemInstruction'], + _Content_to_vertex( + api_client, + t.t_content(api_client, getv(from_object, ['system_instruction'])), + to_object, + ), + ) + + if getv(from_object, ['temperature']) is not None: + setv(to_object, ['temperature'], getv(from_object, ['temperature'])) + + if getv(from_object, ['top_p']) is not None: + setv(to_object, ['topP'], getv(from_object, ['top_p'])) + + if getv(from_object, ['top_k']) is not None: + setv(to_object, ['topK'], getv(from_object, ['top_k'])) + + if getv(from_object, ['candidate_count']) is not None: + setv(to_object, ['candidateCount'], getv(from_object, ['candidate_count'])) + + if getv(from_object, ['max_output_tokens']) is not None: + setv( + to_object, ['maxOutputTokens'], getv(from_object, ['max_output_tokens']) + ) + + if getv(from_object, ['stop_sequences']) is not None: + setv(to_object, ['stopSequences'], getv(from_object, ['stop_sequences'])) + + if getv(from_object, ['response_logprobs']) is not None: + setv( + to_object, + ['responseLogprobs'], + getv(from_object, ['response_logprobs']), + ) + + if getv(from_object, ['logprobs']) is not None: + setv(to_object, ['logprobs'], getv(from_object, ['logprobs'])) + + if getv(from_object, ['presence_penalty']) is not None: + setv( + to_object, ['presencePenalty'], getv(from_object, ['presence_penalty']) + ) + + if getv(from_object, ['frequency_penalty']) is not None: + setv( + to_object, + ['frequencyPenalty'], + getv(from_object, ['frequency_penalty']), + ) + + if getv(from_object, ['seed']) is not None: + setv(to_object, ['seed'], getv(from_object, ['seed'])) + + if getv(from_object, ['response_mime_type']) is not None: + setv( + to_object, + ['responseMimeType'], + getv(from_object, ['response_mime_type']), + ) + + if getv(from_object, ['response_schema']) is not None: + setv( + to_object, + ['responseSchema'], + _Schema_to_vertex( + api_client, + t.t_schema(api_client, getv(from_object, ['response_schema'])), + to_object, + ), + ) + + if getv(from_object, ['routing_config']) is not None: + setv(to_object, ['routingConfig'], getv(from_object, ['routing_config'])) + + if getv(from_object, ['safety_settings']) is not None: + setv( + parent_object, + ['safetySettings'], + [ + _SafetySetting_to_vertex(api_client, item, to_object) + for item in getv(from_object, ['safety_settings']) + ], + ) + + if getv(from_object, ['tools']) is not None: + setv( + parent_object, + ['tools'], + [ + _Tool_to_vertex(api_client, t.t_tool(api_client, item), to_object) + for item in t.t_tools(api_client, getv(from_object, ['tools'])) + ], + ) + + if getv(from_object, ['tool_config']) is not None: + setv( + parent_object, + ['toolConfig'], + _ToolConfig_to_vertex( + api_client, getv(from_object, ['tool_config']), to_object + ), + ) + + if getv(from_object, ['cached_content']) is not None: + setv( + parent_object, + ['cachedContent'], + t.t_cached_content_name( + api_client, getv(from_object, ['cached_content']) + ), + ) + + if getv(from_object, ['response_modalities']) is not None: + setv( + to_object, + ['responseModalities'], + getv(from_object, ['response_modalities']), + ) + + if getv(from_object, ['media_resolution']) is not None: + setv( + to_object, ['mediaResolution'], getv(from_object, ['media_resolution']) + ) + + if getv(from_object, ['speech_config']) is not None: + setv( + to_object, + ['speechConfig'], + _SpeechConfig_to_vertex( + api_client, + t.t_speech_config(api_client, getv(from_object, ['speech_config'])), + to_object, + ), + ) + + if getv(from_object, ['audio_timestamp']) is not None: + setv(to_object, ['audioTimestamp'], getv(from_object, ['audio_timestamp'])) + + if getv(from_object, ['thinking_config']) is not None: + setv( + to_object, + ['thinkingConfig'], + _ThinkingConfig_to_vertex( + api_client, getv(from_object, ['thinking_config']), to_object + ), + ) + + return to_object + + +def _GenerateContentParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['contents']) is not None: + setv( + to_object, + ['contents'], + [ + _Content_to_mldev(api_client, item, to_object) + for item in t.t_contents( + api_client, getv(from_object, ['contents']) + ) + ], + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['generationConfig'], + _GenerateContentConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _GenerateContentParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['contents']) is not None: + setv( + to_object, + ['contents'], + [ + _Content_to_vertex(api_client, item, to_object) + for item in t.t_contents( + api_client, getv(from_object, ['contents']) + ) + ], + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['generationConfig'], + _GenerateContentConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _EmbedContentConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['task_type']) is not None: + setv( + parent_object, + ['requests[]', 'taskType'], + getv(from_object, ['task_type']), + ) + + if getv(from_object, ['title']) is not None: + setv(parent_object, ['requests[]', 'title'], getv(from_object, ['title'])) + + if getv(from_object, ['output_dimensionality']) is not None: + setv( + parent_object, + ['requests[]', 'outputDimensionality'], + getv(from_object, ['output_dimensionality']), + ) + + if getv(from_object, ['mime_type']) is not None: + raise ValueError('mime_type parameter is not supported in Google AI.') + + if getv(from_object, ['auto_truncate']) is not None: + raise ValueError('auto_truncate parameter is not supported in Google AI.') + + return to_object + + +def _EmbedContentConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['task_type']) is not None: + setv( + parent_object, + ['instances[]', 'task_type'], + getv(from_object, ['task_type']), + ) + + if getv(from_object, ['title']) is not None: + setv(parent_object, ['instances[]', 'title'], getv(from_object, ['title'])) + + if getv(from_object, ['output_dimensionality']) is not None: + setv( + parent_object, + ['parameters', 'outputDimensionality'], + getv(from_object, ['output_dimensionality']), + ) + + if getv(from_object, ['mime_type']) is not None: + setv( + parent_object, + ['instances[]', 'mimeType'], + getv(from_object, ['mime_type']), + ) + + if getv(from_object, ['auto_truncate']) is not None: + setv( + parent_object, + ['parameters', 'autoTruncate'], + getv(from_object, ['auto_truncate']), + ) + + return to_object + + +def _EmbedContentParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['contents']) is not None: + setv( + to_object, + ['requests[]', 'content'], + t.t_contents_for_embed(api_client, getv(from_object, ['contents'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _EmbedContentConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + setv( + to_object, + ['requests[]', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + return to_object + + +def _EmbedContentParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['contents']) is not None: + setv( + to_object, + ['instances[]', 'content'], + t.t_contents_for_embed(api_client, getv(from_object, ['contents'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _EmbedContentConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _GenerateImageConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['output_gcs_uri']) is not None: + raise ValueError('output_gcs_uri parameter is not supported in Google AI.') + + if getv(from_object, ['negative_prompt']) is not None: + setv( + parent_object, + ['parameters', 'negativePrompt'], + getv(from_object, ['negative_prompt']), + ) + + if getv(from_object, ['number_of_images']) is not None: + setv( + parent_object, + ['parameters', 'sampleCount'], + getv(from_object, ['number_of_images']), + ) + + if getv(from_object, ['guidance_scale']) is not None: + setv( + parent_object, + ['parameters', 'guidanceScale'], + getv(from_object, ['guidance_scale']), + ) + + if getv(from_object, ['seed']) is not None: + raise ValueError('seed parameter is not supported in Google AI.') + + if getv(from_object, ['safety_filter_level']) is not None: + setv( + parent_object, + ['parameters', 'safetySetting'], + getv(from_object, ['safety_filter_level']), + ) + + if getv(from_object, ['person_generation']) is not None: + setv( + parent_object, + ['parameters', 'personGeneration'], + getv(from_object, ['person_generation']), + ) + + if getv(from_object, ['include_safety_attributes']) is not None: + setv( + parent_object, + ['parameters', 'includeSafetyAttributes'], + getv(from_object, ['include_safety_attributes']), + ) + + if getv(from_object, ['include_rai_reason']) is not None: + setv( + parent_object, + ['parameters', 'includeRaiReason'], + getv(from_object, ['include_rai_reason']), + ) + + if getv(from_object, ['language']) is not None: + setv( + parent_object, + ['parameters', 'language'], + getv(from_object, ['language']), + ) + + if getv(from_object, ['output_mime_type']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'mimeType'], + getv(from_object, ['output_mime_type']), + ) + + if getv(from_object, ['output_compression_quality']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'compressionQuality'], + getv(from_object, ['output_compression_quality']), + ) + + if getv(from_object, ['add_watermark']) is not None: + raise ValueError('add_watermark parameter is not supported in Google AI.') + + if getv(from_object, ['aspect_ratio']) is not None: + setv( + parent_object, + ['parameters', 'aspectRatio'], + getv(from_object, ['aspect_ratio']), + ) + + return to_object + + +def _GenerateImageConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['output_gcs_uri']) is not None: + setv( + parent_object, + ['parameters', 'storageUri'], + getv(from_object, ['output_gcs_uri']), + ) + + if getv(from_object, ['negative_prompt']) is not None: + setv( + parent_object, + ['parameters', 'negativePrompt'], + getv(from_object, ['negative_prompt']), + ) + + if getv(from_object, ['number_of_images']) is not None: + setv( + parent_object, + ['parameters', 'sampleCount'], + getv(from_object, ['number_of_images']), + ) + + if getv(from_object, ['guidance_scale']) is not None: + setv( + parent_object, + ['parameters', 'guidanceScale'], + getv(from_object, ['guidance_scale']), + ) + + if getv(from_object, ['seed']) is not None: + setv(parent_object, ['parameters', 'seed'], getv(from_object, ['seed'])) + + if getv(from_object, ['safety_filter_level']) is not None: + setv( + parent_object, + ['parameters', 'safetySetting'], + getv(from_object, ['safety_filter_level']), + ) + + if getv(from_object, ['person_generation']) is not None: + setv( + parent_object, + ['parameters', 'personGeneration'], + getv(from_object, ['person_generation']), + ) + + if getv(from_object, ['include_safety_attributes']) is not None: + setv( + parent_object, + ['parameters', 'includeSafetyAttributes'], + getv(from_object, ['include_safety_attributes']), + ) + + if getv(from_object, ['include_rai_reason']) is not None: + setv( + parent_object, + ['parameters', 'includeRaiReason'], + getv(from_object, ['include_rai_reason']), + ) + + if getv(from_object, ['language']) is not None: + setv( + parent_object, + ['parameters', 'language'], + getv(from_object, ['language']), + ) + + if getv(from_object, ['output_mime_type']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'mimeType'], + getv(from_object, ['output_mime_type']), + ) + + if getv(from_object, ['output_compression_quality']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'compressionQuality'], + getv(from_object, ['output_compression_quality']), + ) + + if getv(from_object, ['add_watermark']) is not None: + setv( + parent_object, + ['parameters', 'addWatermark'], + getv(from_object, ['add_watermark']), + ) + + if getv(from_object, ['aspect_ratio']) is not None: + setv( + parent_object, + ['parameters', 'aspectRatio'], + getv(from_object, ['aspect_ratio']), + ) + + return to_object + + +def _GenerateImageParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['prompt']) is not None: + setv(to_object, ['instances', 'prompt'], getv(from_object, ['prompt'])) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _GenerateImageConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _GenerateImageParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['prompt']) is not None: + setv(to_object, ['instances', 'prompt'], getv(from_object, ['prompt'])) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _GenerateImageConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _Image_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcs_uri']) is not None: + raise ValueError('gcs_uri parameter is not supported in Google AI.') + + if getv(from_object, ['image_bytes']) is not None: + setv( + to_object, + ['bytesBase64Encoded'], + t.t_bytes(api_client, getv(from_object, ['image_bytes'])), + ) + + return to_object + + +def _Image_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcs_uri']) is not None: + setv(to_object, ['gcsUri'], getv(from_object, ['gcs_uri'])) + + if getv(from_object, ['image_bytes']) is not None: + setv( + to_object, + ['bytesBase64Encoded'], + t.t_bytes(api_client, getv(from_object, ['image_bytes'])), + ) + + return to_object + + +def _MaskReferenceConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['mask_mode']) is not None: + raise ValueError('mask_mode parameter is not supported in Google AI.') + + if getv(from_object, ['segmentation_classes']) is not None: + raise ValueError( + 'segmentation_classes parameter is not supported in Google AI.' + ) + + if getv(from_object, ['mask_dilation']) is not None: + raise ValueError('mask_dilation parameter is not supported in Google AI.') + + return to_object + + +def _MaskReferenceConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['mask_mode']) is not None: + setv(to_object, ['maskMode'], getv(from_object, ['mask_mode'])) + + if getv(from_object, ['segmentation_classes']) is not None: + setv( + to_object, ['maskClasses'], getv(from_object, ['segmentation_classes']) + ) + + if getv(from_object, ['mask_dilation']) is not None: + setv(to_object, ['dilation'], getv(from_object, ['mask_dilation'])) + + return to_object + + +def _ControlReferenceConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['control_type']) is not None: + raise ValueError('control_type parameter is not supported in Google AI.') + + if getv(from_object, ['enable_control_image_computation']) is not None: + raise ValueError( + 'enable_control_image_computation parameter is not supported in' + ' Google AI.' + ) + + return to_object + + +def _ControlReferenceConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['control_type']) is not None: + setv(to_object, ['controlType'], getv(from_object, ['control_type'])) + + if getv(from_object, ['enable_control_image_computation']) is not None: + setv( + to_object, + ['computeControl'], + getv(from_object, ['enable_control_image_computation']), + ) + + return to_object + + +def _StyleReferenceConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['style_description']) is not None: + raise ValueError( + 'style_description parameter is not supported in Google AI.' + ) + + return to_object + + +def _StyleReferenceConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['style_description']) is not None: + setv( + to_object, + ['styleDescription'], + getv(from_object, ['style_description']), + ) + + return to_object + + +def _SubjectReferenceConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['subject_type']) is not None: + raise ValueError('subject_type parameter is not supported in Google AI.') + + if getv(from_object, ['subject_description']) is not None: + raise ValueError( + 'subject_description parameter is not supported in Google AI.' + ) + + return to_object + + +def _SubjectReferenceConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['subject_type']) is not None: + setv(to_object, ['subjectType'], getv(from_object, ['subject_type'])) + + if getv(from_object, ['subject_description']) is not None: + setv( + to_object, + ['subjectDescription'], + getv(from_object, ['subject_description']), + ) + + return to_object + + +def _ReferenceImageAPI_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['reference_image']) is not None: + raise ValueError('reference_image parameter is not supported in Google AI.') + + if getv(from_object, ['reference_id']) is not None: + raise ValueError('reference_id parameter is not supported in Google AI.') + + if getv(from_object, ['reference_type']) is not None: + raise ValueError('reference_type parameter is not supported in Google AI.') + + if getv(from_object, ['mask_image_config']) is not None: + raise ValueError( + 'mask_image_config parameter is not supported in Google AI.' + ) + + if getv(from_object, ['control_image_config']) is not None: + raise ValueError( + 'control_image_config parameter is not supported in Google AI.' + ) + + if getv(from_object, ['style_image_config']) is not None: + raise ValueError( + 'style_image_config parameter is not supported in Google AI.' + ) + + if getv(from_object, ['subject_image_config']) is not None: + raise ValueError( + 'subject_image_config parameter is not supported in Google AI.' + ) + + return to_object + + +def _ReferenceImageAPI_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['reference_image']) is not None: + setv( + to_object, + ['referenceImage'], + _Image_to_vertex( + api_client, getv(from_object, ['reference_image']), to_object + ), + ) + + if getv(from_object, ['reference_id']) is not None: + setv(to_object, ['referenceId'], getv(from_object, ['reference_id'])) + + if getv(from_object, ['reference_type']) is not None: + setv(to_object, ['referenceType'], getv(from_object, ['reference_type'])) + + if getv(from_object, ['mask_image_config']) is not None: + setv( + to_object, + ['maskImageConfig'], + _MaskReferenceConfig_to_vertex( + api_client, getv(from_object, ['mask_image_config']), to_object + ), + ) + + if getv(from_object, ['control_image_config']) is not None: + setv( + to_object, + ['controlImageConfig'], + _ControlReferenceConfig_to_vertex( + api_client, getv(from_object, ['control_image_config']), to_object + ), + ) + + if getv(from_object, ['style_image_config']) is not None: + setv( + to_object, + ['styleImageConfig'], + _StyleReferenceConfig_to_vertex( + api_client, getv(from_object, ['style_image_config']), to_object + ), + ) + + if getv(from_object, ['subject_image_config']) is not None: + setv( + to_object, + ['subjectImageConfig'], + _SubjectReferenceConfig_to_vertex( + api_client, getv(from_object, ['subject_image_config']), to_object + ), + ) + + return to_object + + +def _EditImageConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['output_gcs_uri']) is not None: + raise ValueError('output_gcs_uri parameter is not supported in Google AI.') + + if getv(from_object, ['negative_prompt']) is not None: + setv( + parent_object, + ['parameters', 'negativePrompt'], + getv(from_object, ['negative_prompt']), + ) + + if getv(from_object, ['number_of_images']) is not None: + setv( + parent_object, + ['parameters', 'sampleCount'], + getv(from_object, ['number_of_images']), + ) + + if getv(from_object, ['guidance_scale']) is not None: + setv( + parent_object, + ['parameters', 'guidanceScale'], + getv(from_object, ['guidance_scale']), + ) + + if getv(from_object, ['seed']) is not None: + raise ValueError('seed parameter is not supported in Google AI.') + + if getv(from_object, ['safety_filter_level']) is not None: + setv( + parent_object, + ['parameters', 'safetySetting'], + getv(from_object, ['safety_filter_level']), + ) + + if getv(from_object, ['person_generation']) is not None: + setv( + parent_object, + ['parameters', 'personGeneration'], + getv(from_object, ['person_generation']), + ) + + if getv(from_object, ['include_safety_attributes']) is not None: + setv( + parent_object, + ['parameters', 'includeSafetyAttributes'], + getv(from_object, ['include_safety_attributes']), + ) + + if getv(from_object, ['include_rai_reason']) is not None: + setv( + parent_object, + ['parameters', 'includeRaiReason'], + getv(from_object, ['include_rai_reason']), + ) + + if getv(from_object, ['language']) is not None: + setv( + parent_object, + ['parameters', 'language'], + getv(from_object, ['language']), + ) + + if getv(from_object, ['output_mime_type']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'mimeType'], + getv(from_object, ['output_mime_type']), + ) + + if getv(from_object, ['output_compression_quality']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'compressionQuality'], + getv(from_object, ['output_compression_quality']), + ) + + if getv(from_object, ['edit_mode']) is not None: + setv( + parent_object, + ['parameters', 'editMode'], + getv(from_object, ['edit_mode']), + ) + + return to_object + + +def _EditImageConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['output_gcs_uri']) is not None: + setv( + parent_object, + ['parameters', 'storageUri'], + getv(from_object, ['output_gcs_uri']), + ) + + if getv(from_object, ['negative_prompt']) is not None: + setv( + parent_object, + ['parameters', 'negativePrompt'], + getv(from_object, ['negative_prompt']), + ) + + if getv(from_object, ['number_of_images']) is not None: + setv( + parent_object, + ['parameters', 'sampleCount'], + getv(from_object, ['number_of_images']), + ) + + if getv(from_object, ['guidance_scale']) is not None: + setv( + parent_object, + ['parameters', 'guidanceScale'], + getv(from_object, ['guidance_scale']), + ) + + if getv(from_object, ['seed']) is not None: + setv(parent_object, ['parameters', 'seed'], getv(from_object, ['seed'])) + + if getv(from_object, ['safety_filter_level']) is not None: + setv( + parent_object, + ['parameters', 'safetySetting'], + getv(from_object, ['safety_filter_level']), + ) + + if getv(from_object, ['person_generation']) is not None: + setv( + parent_object, + ['parameters', 'personGeneration'], + getv(from_object, ['person_generation']), + ) + + if getv(from_object, ['include_safety_attributes']) is not None: + setv( + parent_object, + ['parameters', 'includeSafetyAttributes'], + getv(from_object, ['include_safety_attributes']), + ) + + if getv(from_object, ['include_rai_reason']) is not None: + setv( + parent_object, + ['parameters', 'includeRaiReason'], + getv(from_object, ['include_rai_reason']), + ) + + if getv(from_object, ['language']) is not None: + setv( + parent_object, + ['parameters', 'language'], + getv(from_object, ['language']), + ) + + if getv(from_object, ['output_mime_type']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'mimeType'], + getv(from_object, ['output_mime_type']), + ) + + if getv(from_object, ['output_compression_quality']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'compressionQuality'], + getv(from_object, ['output_compression_quality']), + ) + + if getv(from_object, ['edit_mode']) is not None: + setv( + parent_object, + ['parameters', 'editMode'], + getv(from_object, ['edit_mode']), + ) + + return to_object + + +def _EditImageParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['prompt']) is not None: + setv(to_object, ['instances', 'prompt'], getv(from_object, ['prompt'])) + + if getv(from_object, ['reference_images']) is not None: + setv( + to_object, + ['instances', 'referenceImages'], + [ + _ReferenceImageAPI_to_mldev(api_client, item, to_object) + for item in getv(from_object, ['reference_images']) + ], + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _EditImageConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _EditImageParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['prompt']) is not None: + setv(to_object, ['instances', 'prompt'], getv(from_object, ['prompt'])) + + if getv(from_object, ['reference_images']) is not None: + setv( + to_object, + ['instances', 'referenceImages'], + [ + _ReferenceImageAPI_to_vertex(api_client, item, to_object) + for item in getv(from_object, ['reference_images']) + ], + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _EditImageConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _UpscaleImageAPIConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['include_rai_reason']) is not None: + setv( + parent_object, + ['parameters', 'includeRaiReason'], + getv(from_object, ['include_rai_reason']), + ) + + if getv(from_object, ['output_mime_type']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'mimeType'], + getv(from_object, ['output_mime_type']), + ) + + if getv(from_object, ['output_compression_quality']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'compressionQuality'], + getv(from_object, ['output_compression_quality']), + ) + + if getv(from_object, ['number_of_images']) is not None: + setv( + parent_object, + ['parameters', 'sampleCount'], + getv(from_object, ['number_of_images']), + ) + + if getv(from_object, ['mode']) is not None: + setv(parent_object, ['parameters', 'mode'], getv(from_object, ['mode'])) + + return to_object + + +def _UpscaleImageAPIConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['include_rai_reason']) is not None: + setv( + parent_object, + ['parameters', 'includeRaiReason'], + getv(from_object, ['include_rai_reason']), + ) + + if getv(from_object, ['output_mime_type']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'mimeType'], + getv(from_object, ['output_mime_type']), + ) + + if getv(from_object, ['output_compression_quality']) is not None: + setv( + parent_object, + ['parameters', 'outputOptions', 'compressionQuality'], + getv(from_object, ['output_compression_quality']), + ) + + if getv(from_object, ['number_of_images']) is not None: + setv( + parent_object, + ['parameters', 'sampleCount'], + getv(from_object, ['number_of_images']), + ) + + if getv(from_object, ['mode']) is not None: + setv(parent_object, ['parameters', 'mode'], getv(from_object, ['mode'])) + + return to_object + + +def _UpscaleImageAPIParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['image']) is not None: + setv( + to_object, + ['instances', 'image'], + _Image_to_mldev(api_client, getv(from_object, ['image']), to_object), + ) + + if getv(from_object, ['upscale_factor']) is not None: + setv( + to_object, + ['parameters', 'upscaleConfig', 'upscaleFactor'], + getv(from_object, ['upscale_factor']), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _UpscaleImageAPIConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _UpscaleImageAPIParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['image']) is not None: + setv( + to_object, + ['instances', 'image'], + _Image_to_vertex(api_client, getv(from_object, ['image']), to_object), + ) + + if getv(from_object, ['upscale_factor']) is not None: + setv( + to_object, + ['parameters', 'upscaleConfig', 'upscaleFactor'], + getv(from_object, ['upscale_factor']), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _UpscaleImageAPIConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _GetModelParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + return to_object + + +def _GetModelParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + return to_object + + +def _ListModelsConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['page_size']) is not None: + setv( + parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size']) + ) + + if getv(from_object, ['page_token']) is not None: + setv( + parent_object, + ['_query', 'pageToken'], + getv(from_object, ['page_token']), + ) + + if getv(from_object, ['filter']) is not None: + setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter'])) + + if getv(from_object, ['query_base']) is not None: + setv( + parent_object, + ['_url', 'models_url'], + t.t_models_url(api_client, getv(from_object, ['query_base'])), + ) + + return to_object + + +def _ListModelsConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['page_size']) is not None: + setv( + parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size']) + ) + + if getv(from_object, ['page_token']) is not None: + setv( + parent_object, + ['_query', 'pageToken'], + getv(from_object, ['page_token']), + ) + + if getv(from_object, ['filter']) is not None: + setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter'])) + + if getv(from_object, ['query_base']) is not None: + setv( + parent_object, + ['_url', 'models_url'], + t.t_models_url(api_client, getv(from_object, ['query_base'])), + ) + + return to_object + + +def _ListModelsParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _ListModelsConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _ListModelsParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _ListModelsConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _UpdateModelConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['display_name']) is not None: + setv(parent_object, ['displayName'], getv(from_object, ['display_name'])) + + if getv(from_object, ['description']) is not None: + setv(parent_object, ['description'], getv(from_object, ['description'])) + + return to_object + + +def _UpdateModelConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['display_name']) is not None: + setv(parent_object, ['displayName'], getv(from_object, ['display_name'])) + + if getv(from_object, ['description']) is not None: + setv(parent_object, ['description'], getv(from_object, ['description'])) + + return to_object + + +def _UpdateModelParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _UpdateModelConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _UpdateModelParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _UpdateModelConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _DeleteModelParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + return to_object + + +def _DeleteModelParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'name'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + return to_object + + +def _CountTokensConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['system_instruction']) is not None: + setv( + parent_object, + ['generateContentRequest', 'systemInstruction'], + _Content_to_mldev( + api_client, + t.t_content(api_client, getv(from_object, ['system_instruction'])), + to_object, + ), + ) + + if getv(from_object, ['tools']) is not None: + setv( + parent_object, + ['generateContentRequest', 'tools'], + [ + _Tool_to_mldev(api_client, item, to_object) + for item in getv(from_object, ['tools']) + ], + ) + + if getv(from_object, ['generation_config']) is not None: + raise ValueError( + 'generation_config parameter is not supported in Google AI.' + ) + + return to_object + + +def _CountTokensConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['system_instruction']) is not None: + setv( + parent_object, + ['systemInstruction'], + _Content_to_vertex( + api_client, + t.t_content(api_client, getv(from_object, ['system_instruction'])), + to_object, + ), + ) + + if getv(from_object, ['tools']) is not None: + setv( + parent_object, + ['tools'], + [ + _Tool_to_vertex(api_client, item, to_object) + for item in getv(from_object, ['tools']) + ], + ) + + if getv(from_object, ['generation_config']) is not None: + setv( + parent_object, + ['generationConfig'], + getv(from_object, ['generation_config']), + ) + + return to_object + + +def _CountTokensParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['contents']) is not None: + setv( + to_object, + ['contents'], + [ + _Content_to_mldev(api_client, item, to_object) + for item in t.t_contents( + api_client, getv(from_object, ['contents']) + ) + ], + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CountTokensConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _CountTokensParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['contents']) is not None: + setv( + to_object, + ['contents'], + [ + _Content_to_vertex(api_client, item, to_object) + for item in t.t_contents( + api_client, getv(from_object, ['contents']) + ) + ], + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CountTokensConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _ComputeTokensConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _ComputeTokensConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _ComputeTokensParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['contents']) is not None: + raise ValueError('contents parameter is not supported in Google AI.') + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _ComputeTokensConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _ComputeTokensParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv( + to_object, + ['_url', 'model'], + t.t_model(api_client, getv(from_object, ['model'])), + ) + + if getv(from_object, ['contents']) is not None: + setv( + to_object, + ['contents'], + [ + _Content_to_vertex(api_client, item, to_object) + for item in t.t_contents( + api_client, getv(from_object, ['contents']) + ) + ], + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _ComputeTokensConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _Part_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + if getv(from_object, ['thought']) is not None: + setv(to_object, ['thought'], getv(from_object, ['thought'])) + + if getv(from_object, ['codeExecutionResult']) is not None: + setv( + to_object, + ['code_execution_result'], + getv(from_object, ['codeExecutionResult']), + ) + + if getv(from_object, ['executableCode']) is not None: + setv(to_object, ['executable_code'], getv(from_object, ['executableCode'])) + + if getv(from_object, ['fileData']) is not None: + setv(to_object, ['file_data'], getv(from_object, ['fileData'])) + + if getv(from_object, ['functionCall']) is not None: + setv(to_object, ['function_call'], getv(from_object, ['functionCall'])) + + if getv(from_object, ['functionResponse']) is not None: + setv( + to_object, + ['function_response'], + getv(from_object, ['functionResponse']), + ) + + if getv(from_object, ['inlineData']) is not None: + setv(to_object, ['inline_data'], getv(from_object, ['inlineData'])) + + if getv(from_object, ['text']) is not None: + setv(to_object, ['text'], getv(from_object, ['text'])) + + return to_object + + +def _Part_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['videoMetadata']) is not None: + setv(to_object, ['video_metadata'], getv(from_object, ['videoMetadata'])) + + if getv(from_object, ['thought']) is not None: + setv(to_object, ['thought'], getv(from_object, ['thought'])) + + if getv(from_object, ['codeExecutionResult']) is not None: + setv( + to_object, + ['code_execution_result'], + getv(from_object, ['codeExecutionResult']), + ) + + if getv(from_object, ['executableCode']) is not None: + setv(to_object, ['executable_code'], getv(from_object, ['executableCode'])) + + if getv(from_object, ['fileData']) is not None: + setv(to_object, ['file_data'], getv(from_object, ['fileData'])) + + if getv(from_object, ['functionCall']) is not None: + setv(to_object, ['function_call'], getv(from_object, ['functionCall'])) + + if getv(from_object, ['functionResponse']) is not None: + setv( + to_object, + ['function_response'], + getv(from_object, ['functionResponse']), + ) + + if getv(from_object, ['inlineData']) is not None: + setv(to_object, ['inline_data'], getv(from_object, ['inlineData'])) + + if getv(from_object, ['text']) is not None: + setv(to_object, ['text'], getv(from_object, ['text'])) + + return to_object + + +def _Content_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['parts']) is not None: + setv( + to_object, + ['parts'], + [ + _Part_from_mldev(api_client, item, to_object) + for item in getv(from_object, ['parts']) + ], + ) + + if getv(from_object, ['role']) is not None: + setv(to_object, ['role'], getv(from_object, ['role'])) + + return to_object + + +def _Content_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['parts']) is not None: + setv( + to_object, + ['parts'], + [ + _Part_from_vertex(api_client, item, to_object) + for item in getv(from_object, ['parts']) + ], + ) + + if getv(from_object, ['role']) is not None: + setv(to_object, ['role'], getv(from_object, ['role'])) + + return to_object + + +def _CitationMetadata_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['citationSources']) is not None: + setv(to_object, ['citations'], getv(from_object, ['citationSources'])) + + return to_object + + +def _CitationMetadata_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['citations']) is not None: + setv(to_object, ['citations'], getv(from_object, ['citations'])) + + return to_object + + +def _Candidate_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['content']) is not None: + setv( + to_object, + ['content'], + _Content_from_mldev( + api_client, getv(from_object, ['content']), to_object + ), + ) + + if getv(from_object, ['citationMetadata']) is not None: + setv( + to_object, + ['citation_metadata'], + _CitationMetadata_from_mldev( + api_client, getv(from_object, ['citationMetadata']), to_object + ), + ) + + if getv(from_object, ['tokenCount']) is not None: + setv(to_object, ['token_count'], getv(from_object, ['tokenCount'])) + + if getv(from_object, ['avgLogprobs']) is not None: + setv(to_object, ['avg_logprobs'], getv(from_object, ['avgLogprobs'])) + + if getv(from_object, ['finishReason']) is not None: + setv(to_object, ['finish_reason'], getv(from_object, ['finishReason'])) + + if getv(from_object, ['groundingMetadata']) is not None: + setv( + to_object, + ['grounding_metadata'], + getv(from_object, ['groundingMetadata']), + ) + + if getv(from_object, ['index']) is not None: + setv(to_object, ['index'], getv(from_object, ['index'])) + + if getv(from_object, ['logprobsResult']) is not None: + setv(to_object, ['logprobs_result'], getv(from_object, ['logprobsResult'])) + + if getv(from_object, ['safetyRatings']) is not None: + setv(to_object, ['safety_ratings'], getv(from_object, ['safetyRatings'])) + + return to_object + + +def _Candidate_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['content']) is not None: + setv( + to_object, + ['content'], + _Content_from_vertex( + api_client, getv(from_object, ['content']), to_object + ), + ) + + if getv(from_object, ['citationMetadata']) is not None: + setv( + to_object, + ['citation_metadata'], + _CitationMetadata_from_vertex( + api_client, getv(from_object, ['citationMetadata']), to_object + ), + ) + + if getv(from_object, ['finishMessage']) is not None: + setv(to_object, ['finish_message'], getv(from_object, ['finishMessage'])) + + if getv(from_object, ['avgLogprobs']) is not None: + setv(to_object, ['avg_logprobs'], getv(from_object, ['avgLogprobs'])) + + if getv(from_object, ['finishReason']) is not None: + setv(to_object, ['finish_reason'], getv(from_object, ['finishReason'])) + + if getv(from_object, ['groundingMetadata']) is not None: + setv( + to_object, + ['grounding_metadata'], + getv(from_object, ['groundingMetadata']), + ) + + if getv(from_object, ['index']) is not None: + setv(to_object, ['index'], getv(from_object, ['index'])) + + if getv(from_object, ['logprobsResult']) is not None: + setv(to_object, ['logprobs_result'], getv(from_object, ['logprobsResult'])) + + if getv(from_object, ['safetyRatings']) is not None: + setv(to_object, ['safety_ratings'], getv(from_object, ['safetyRatings'])) + + return to_object + + +def _GenerateContentResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['candidates']) is not None: + setv( + to_object, + ['candidates'], + [ + _Candidate_from_mldev(api_client, item, to_object) + for item in getv(from_object, ['candidates']) + ], + ) + + if getv(from_object, ['modelVersion']) is not None: + setv(to_object, ['model_version'], getv(from_object, ['modelVersion'])) + + if getv(from_object, ['promptFeedback']) is not None: + setv(to_object, ['prompt_feedback'], getv(from_object, ['promptFeedback'])) + + if getv(from_object, ['usageMetadata']) is not None: + setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata'])) + + return to_object + + +def _GenerateContentResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['candidates']) is not None: + setv( + to_object, + ['candidates'], + [ + _Candidate_from_vertex(api_client, item, to_object) + for item in getv(from_object, ['candidates']) + ], + ) + + if getv(from_object, ['modelVersion']) is not None: + setv(to_object, ['model_version'], getv(from_object, ['modelVersion'])) + + if getv(from_object, ['promptFeedback']) is not None: + setv(to_object, ['prompt_feedback'], getv(from_object, ['promptFeedback'])) + + if getv(from_object, ['usageMetadata']) is not None: + setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata'])) + + return to_object + + +def _ContentEmbeddingStatistics_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _ContentEmbeddingStatistics_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['truncated']) is not None: + setv(to_object, ['truncated'], getv(from_object, ['truncated'])) + + if getv(from_object, ['token_count']) is not None: + setv(to_object, ['token_count'], getv(from_object, ['token_count'])) + + return to_object + + +def _ContentEmbedding_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['values']) is not None: + setv(to_object, ['values'], getv(from_object, ['values'])) + + return to_object + + +def _ContentEmbedding_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['values']) is not None: + setv(to_object, ['values'], getv(from_object, ['values'])) + + if getv(from_object, ['statistics']) is not None: + setv( + to_object, + ['statistics'], + _ContentEmbeddingStatistics_from_vertex( + api_client, getv(from_object, ['statistics']), to_object + ), + ) + + return to_object + + +def _EmbedContentMetadata_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _EmbedContentMetadata_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['billableCharacterCount']) is not None: + setv( + to_object, + ['billable_character_count'], + getv(from_object, ['billableCharacterCount']), + ) + + return to_object + + +def _EmbedContentResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['embeddings']) is not None: + setv( + to_object, + ['embeddings'], + [ + _ContentEmbedding_from_mldev(api_client, item, to_object) + for item in getv(from_object, ['embeddings']) + ], + ) + + if getv(from_object, ['metadata']) is not None: + setv( + to_object, + ['metadata'], + _EmbedContentMetadata_from_mldev( + api_client, getv(from_object, ['metadata']), to_object + ), + ) + + return to_object + + +def _EmbedContentResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['predictions[]', 'embeddings']) is not None: + setv( + to_object, + ['embeddings'], + [ + _ContentEmbedding_from_vertex(api_client, item, to_object) + for item in getv(from_object, ['predictions[]', 'embeddings']) + ], + ) + + if getv(from_object, ['metadata']) is not None: + setv( + to_object, + ['metadata'], + _EmbedContentMetadata_from_vertex( + api_client, getv(from_object, ['metadata']), to_object + ), + ) + + return to_object + + +def _Image_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + if getv(from_object, ['bytesBase64Encoded']) is not None: + setv( + to_object, + ['image_bytes'], + t.t_bytes(api_client, getv(from_object, ['bytesBase64Encoded'])), + ) + + return to_object + + +def _Image_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcsUri']) is not None: + setv(to_object, ['gcs_uri'], getv(from_object, ['gcsUri'])) + + if getv(from_object, ['bytesBase64Encoded']) is not None: + setv( + to_object, + ['image_bytes'], + t.t_bytes(api_client, getv(from_object, ['bytesBase64Encoded'])), + ) + + return to_object + + +def _GeneratedImage_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['_self']) is not None: + setv( + to_object, + ['image'], + _Image_from_mldev(api_client, getv(from_object, ['_self']), to_object), + ) + + if getv(from_object, ['raiFilteredReason']) is not None: + setv( + to_object, + ['rai_filtered_reason'], + getv(from_object, ['raiFilteredReason']), + ) + + return to_object + + +def _GeneratedImage_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['_self']) is not None: + setv( + to_object, + ['image'], + _Image_from_vertex(api_client, getv(from_object, ['_self']), to_object), + ) + + if getv(from_object, ['raiFilteredReason']) is not None: + setv( + to_object, + ['rai_filtered_reason'], + getv(from_object, ['raiFilteredReason']), + ) + + return to_object + + +def _GenerateImageResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['predictions']) is not None: + setv( + to_object, + ['generated_images'], + [ + _GeneratedImage_from_mldev(api_client, item, to_object) + for item in getv(from_object, ['predictions']) + ], + ) + + return to_object + + +def _GenerateImageResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['predictions']) is not None: + setv( + to_object, + ['generated_images'], + [ + _GeneratedImage_from_vertex(api_client, item, to_object) + for item in getv(from_object, ['predictions']) + ], + ) + + return to_object + + +def _EditImageResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['predictions']) is not None: + setv( + to_object, + ['generated_images'], + [ + _GeneratedImage_from_mldev(api_client, item, to_object) + for item in getv(from_object, ['predictions']) + ], + ) + + return to_object + + +def _EditImageResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['predictions']) is not None: + setv( + to_object, + ['generated_images'], + [ + _GeneratedImage_from_vertex(api_client, item, to_object) + for item in getv(from_object, ['predictions']) + ], + ) + + return to_object + + +def _UpscaleImageResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['predictions']) is not None: + setv( + to_object, + ['generated_images'], + [ + _GeneratedImage_from_mldev(api_client, item, to_object) + for item in getv(from_object, ['predictions']) + ], + ) + + return to_object + + +def _UpscaleImageResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['predictions']) is not None: + setv( + to_object, + ['generated_images'], + [ + _GeneratedImage_from_vertex(api_client, item, to_object) + for item in getv(from_object, ['predictions']) + ], + ) + + return to_object + + +def _Endpoint_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _Endpoint_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['endpoint']) is not None: + setv(to_object, ['name'], getv(from_object, ['endpoint'])) + + if getv(from_object, ['deployedModelId']) is not None: + setv( + to_object, ['deployed_model_id'], getv(from_object, ['deployedModelId']) + ) + + return to_object + + +def _TunedModelInfo_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['baseModel']) is not None: + setv(to_object, ['base_model'], getv(from_object, ['baseModel'])) + + if getv(from_object, ['createTime']) is not None: + setv(to_object, ['create_time'], getv(from_object, ['createTime'])) + + if getv(from_object, ['updateTime']) is not None: + setv(to_object, ['update_time'], getv(from_object, ['updateTime'])) + + return to_object + + +def _TunedModelInfo_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if ( + getv(from_object, ['labels', 'google-vertex-llm-tuning-base-model-id']) + is not None + ): + setv( + to_object, + ['base_model'], + getv(from_object, ['labels', 'google-vertex-llm-tuning-base-model-id']), + ) + + if getv(from_object, ['createTime']) is not None: + setv(to_object, ['create_time'], getv(from_object, ['createTime'])) + + if getv(from_object, ['updateTime']) is not None: + setv(to_object, ['update_time'], getv(from_object, ['updateTime'])) + + return to_object + + +def _Model_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['displayName']) is not None: + setv(to_object, ['display_name'], getv(from_object, ['displayName'])) + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['version']) is not None: + setv(to_object, ['version'], getv(from_object, ['version'])) + + if getv(from_object, ['_self']) is not None: + setv( + to_object, + ['tuned_model_info'], + _TunedModelInfo_from_mldev( + api_client, getv(from_object, ['_self']), to_object + ), + ) + + if getv(from_object, ['inputTokenLimit']) is not None: + setv( + to_object, ['input_token_limit'], getv(from_object, ['inputTokenLimit']) + ) + + if getv(from_object, ['outputTokenLimit']) is not None: + setv( + to_object, + ['output_token_limit'], + getv(from_object, ['outputTokenLimit']), + ) + + if getv(from_object, ['supportedGenerationMethods']) is not None: + setv( + to_object, + ['supported_actions'], + getv(from_object, ['supportedGenerationMethods']), + ) + + return to_object + + +def _Model_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['displayName']) is not None: + setv(to_object, ['display_name'], getv(from_object, ['displayName'])) + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['versionId']) is not None: + setv(to_object, ['version'], getv(from_object, ['versionId'])) + + if getv(from_object, ['deployedModels']) is not None: + setv( + to_object, + ['endpoints'], + [ + _Endpoint_from_vertex(api_client, item, to_object) + for item in getv(from_object, ['deployedModels']) + ], + ) + + if getv(from_object, ['labels']) is not None: + setv(to_object, ['labels'], getv(from_object, ['labels'])) + + if getv(from_object, ['_self']) is not None: + setv( + to_object, + ['tuned_model_info'], + _TunedModelInfo_from_vertex( + api_client, getv(from_object, ['_self']), to_object + ), + ) + + return to_object + + +def _ListModelsResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['nextPageToken']) is not None: + setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken'])) + + if getv(from_object, ['_self']) is not None: + setv( + to_object, + ['models'], + [ + _Model_from_mldev(api_client, item, to_object) + for item in t.t_extract_models( + api_client, getv(from_object, ['_self']) + ) + ], + ) + + return to_object + + +def _ListModelsResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['nextPageToken']) is not None: + setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken'])) + + if getv(from_object, ['_self']) is not None: + setv( + to_object, + ['models'], + [ + _Model_from_vertex(api_client, item, to_object) + for item in t.t_extract_models( + api_client, getv(from_object, ['_self']) + ) + ], + ) + + return to_object + + +def _DeleteModelResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _DeleteModelResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + + return to_object + + +def _CountTokensResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['totalTokens']) is not None: + setv(to_object, ['total_tokens'], getv(from_object, ['totalTokens'])) + + if getv(from_object, ['cachedContentTokenCount']) is not None: + setv( + to_object, + ['cached_content_token_count'], + getv(from_object, ['cachedContentTokenCount']), + ) + + return to_object + + +def _CountTokensResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['totalTokens']) is not None: + setv(to_object, ['total_tokens'], getv(from_object, ['totalTokens'])) + + return to_object + + +def _ComputeTokensResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['tokensInfo']) is not None: + setv(to_object, ['tokens_info'], getv(from_object, ['tokensInfo'])) + + return to_object + + +def _ComputeTokensResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['tokensInfo']) is not None: + setv(to_object, ['tokens_info'], getv(from_object, ['tokensInfo'])) + + return to_object + + +class Models(_common.BaseModule): + + def _generate_content( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.GenerateContentConfigOrDict] = None, + ) -> types.GenerateContentResponse: + parameter_model = types._GenerateContentParameters( + model=model, + contents=contents, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _GenerateContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:generateContent'.format_map(request_dict.get('_url')) + else: + request_dict = _GenerateContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{model}:generateContent'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _GenerateContentResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _GenerateContentResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.GenerateContentResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def generate_content_stream( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.GenerateContentConfigOrDict] = None, + ) -> Iterator[types.GenerateContentResponse]: + parameter_model = types._GenerateContentParameters( + model=model, + contents=contents, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _GenerateContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:streamGenerateContent?alt=sse'.format_map( + request_dict.get('_url') + ) + else: + request_dict = _GenerateContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{model}:streamGenerateContent?alt=sse'.format_map( + request_dict.get('_url') + ) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + for response_dict in self._api_client.request_streamed( + 'post', path, request_dict, http_options + ): + + if self._api_client.vertexai: + response_dict = _GenerateContentResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _GenerateContentResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.GenerateContentResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + yield return_value + + def embed_content( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.EmbedContentConfigOrDict] = None, + ) -> types.EmbedContentResponse: + """Calculates embeddings for the given contents(only text is supported). + + Args: + model (str): The model to use. + contents (list[Content]): The contents to embed. + config (EmbedContentConfig): Optional configuration for embeddings. + + Usage: + + .. code-block:: python + + embeddings = client.models.embed_content( + model= 'text-embedding-004', + contents=[ + 'What is your name?', + 'What is your favorite color?', + ], + config={ + 'output_dimensionality': 64 + }, + ) + """ + + parameter_model = types._EmbedContentParameters( + model=model, + contents=contents, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _EmbedContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:predict'.format_map(request_dict.get('_url')) + else: + request_dict = _EmbedContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{model}:batchEmbedContents'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _EmbedContentResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _EmbedContentResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.EmbedContentResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def generate_image( + self, + *, + model: str, + prompt: str, + config: Optional[types.GenerateImageConfigOrDict] = None, + ) -> types.GenerateImageResponse: + """Generates an image based on a text description and configuration. + + Args: + model (str): The model to use. + prompt (str): A text description of the image to generate. + config (GenerateImageConfig): Configuration for generation. + + Usage: + + .. code-block:: python + + response = client.models.generate_image( + model='imagen-3.0-generate-001', + prompt='Man with a dog', + config=types.GenerateImageConfig( + number_of_images= 1, + include_rai_reason= True, + ) + ) + response.generated_images[0].image.show() + # Shows a man with a dog. + """ + + parameter_model = types._GenerateImageParameters( + model=model, + prompt=prompt, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _GenerateImageParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:predict'.format_map(request_dict.get('_url')) + else: + request_dict = _GenerateImageParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{model}:predict'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _GenerateImageResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _GenerateImageResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.GenerateImageResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def edit_image( + self, + *, + model: str, + prompt: str, + reference_images: list[types._ReferenceImageAPIOrDict], + config: Optional[types.EditImageConfigOrDict] = None, + ) -> types.EditImageResponse: + """Edits an image based on a text description and configuration. + + Args: + model (str): The model to use. + prompt (str): A text description of the edit to apply to the image. + reference_images (list[Union[RawReferenceImage, MaskReferenceImage, + ControlReferenceImage, StyleReferenceImage, SubjectReferenceImage]): The + reference images for editing. + config (EditImageConfig): Configuration for editing. + + Usage: + + .. code-block:: python + + from google.genai.types import RawReferenceImage, MaskReferenceImage + + raw_ref_image = RawReferenceImage( + reference_id=1, + reference_image=types.Image.from_file(IMAGE_FILE_PATH), + ) + + mask_ref_image = MaskReferenceImage( + reference_id=2, + config=types.MaskReferenceConfig( + mask_mode='MASK_MODE_FOREGROUND', + mask_dilation=0.06, + ), + ) + response = client.models.edit_image( + model='imagen-3.0-capability-preview-0930', + prompt='man with dog', + reference_images=[raw_ref_image, mask_ref_image], + config=types.EditImageConfig( + edit_mode= "EDIT_MODE_INPAINT_INSERTION", + number_of_images= 1, + include_rai_reason= True, + ) + ) + response.generated_images[0].image.show() + # Shows a man with a dog instead of a cat. + """ + + parameter_model = types._EditImageParameters( + model=model, + prompt=prompt, + reference_images=reference_images, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _EditImageParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:predict'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _EditImageResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _EditImageResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.EditImageResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def _upscale_image( + self, + *, + model: str, + image: types.ImageOrDict, + upscale_factor: str, + config: Optional[types._UpscaleImageAPIConfigOrDict] = None, + ) -> types.UpscaleImageResponse: + """Upscales an image. + + Args: + model (str): The model to use. + image (Image): The input image for upscaling. + upscale_factor (str): The factor to upscale the image (x2 or x4). + config (_UpscaleImageAPIConfig): Configuration for upscaling. + """ + + parameter_model = types._UpscaleImageAPIParameters( + model=model, + image=image, + upscale_factor=upscale_factor, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _UpscaleImageAPIParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:predict'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _UpscaleImageResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _UpscaleImageResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.UpscaleImageResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def get(self, *, model: str) -> types.Model: + parameter_model = types._GetModelParameters( + model=model, + ) + + if self._api_client.vertexai: + request_dict = _GetModelParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _GetModelParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _Model_from_vertex(self._api_client, response_dict) + else: + response_dict = _Model_from_mldev(self._api_client, response_dict) + + return_value = types.Model._from_response(response_dict, parameter_model) + self._api_client._verify_response(return_value) + return return_value + + def _list( + self, *, config: Optional[types.ListModelsConfigOrDict] = None + ) -> types.ListModelsResponse: + parameter_model = types._ListModelsParameters( + config=config, + ) + + if self._api_client.vertexai: + request_dict = _ListModelsParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{models_url}'.format_map(request_dict.get('_url')) + else: + request_dict = _ListModelsParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{models_url}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ListModelsResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ListModelsResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ListModelsResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def update( + self, + *, + model: str, + config: Optional[types.UpdateModelConfigOrDict] = None, + ) -> types.Model: + parameter_model = types._UpdateModelParameters( + model=model, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _UpdateModelParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}'.format_map(request_dict.get('_url')) + else: + request_dict = _UpdateModelParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'patch', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _Model_from_vertex(self._api_client, response_dict) + else: + response_dict = _Model_from_mldev(self._api_client, response_dict) + + return_value = types.Model._from_response(response_dict, parameter_model) + self._api_client._verify_response(return_value) + return return_value + + def delete(self, *, model: str) -> types.DeleteModelResponse: + parameter_model = types._DeleteModelParameters( + model=model, + ) + + if self._api_client.vertexai: + request_dict = _DeleteModelParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _DeleteModelParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'delete', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _DeleteModelResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _DeleteModelResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.DeleteModelResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def count_tokens( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.CountTokensConfigOrDict] = None, + ) -> types.CountTokensResponse: + """Counts the number of tokens in the given content. + + Args: + model (str): The model to use for counting tokens. + contents (list[types.Content]): The content to count tokens for. + Multimodal input is supported for Gemini models. + config (CountTokensConfig): The configuration for counting tokens. + + Usage: + + .. code-block:: python + + response = client.models.count_tokens( + model='gemini-1.5-flash', + contents='What is your name?', + ) + print(response) + # total_tokens=5 cached_content_token_count=None + """ + + parameter_model = types._CountTokensParameters( + model=model, + contents=contents, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _CountTokensParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:countTokens'.format_map(request_dict.get('_url')) + else: + request_dict = _CountTokensParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{model}:countTokens'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _CountTokensResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _CountTokensResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.CountTokensResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def compute_tokens( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.ComputeTokensConfigOrDict] = None, + ) -> types.ComputeTokensResponse: + """Return a list of tokens based on the input text. + + This method is not supported by the Gemini Developer API. + + Args: + model (str): The model to use. + contents (list[shared.Content]): The content to compute tokens for. Only + text is supported. + + Usage: + + .. code-block:: python + + response = client.models.compute_tokens( + model='gemini-1.5-flash', + contents='What is your name?', + ) + print(response) + # tokens_info=[TokensInfo(role='user', token_ids=['1841', ...], + # tokens=[b'What', b' is', b' your', b' name', b'?'])] + """ + + parameter_model = types._ComputeTokensParameters( + model=model, + contents=contents, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _ComputeTokensParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:computeTokens'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ComputeTokensResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ComputeTokensResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ComputeTokensResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def generate_content( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.GenerateContentConfigOrDict] = None, + ) -> types.GenerateContentResponse: + """Makes an API request to generate content using a model. + + Some models support multimodal input and output. + + Usage: + + .. code-block:: python + + from google.genai import types + from google import genai + + client = genai.Client( + vertexai=True, project='my-project-id', location='us-central1' + ) + + response = client.models.generate_content( + model='gemini-1.5-flash-002', + contents='''What is a good name for a flower shop that specializes in + selling bouquets of dried flowers?''' + ) + print(response.text) + # **Elegant & Classic:** + # * The Dried Bloom + # * Everlasting Florals + # * Timeless Petals + + response = client.models.generate_content( + model='gemini-1.5-flash-002', + contents=[ + types.Part.from_text('What is shown in this image?'), + types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg', + 'image/jpeg') + ] + ) + print(response.text) + # The image shows a flat lay arrangement of freshly baked blueberry + # scones. + """ + + if _extra_utils.should_disable_afc(config): + return self._generate_content( + model=model, contents=contents, config=config + ) + remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config) + logging.info( + f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.' + ) + automatic_function_calling_history = [] + while remaining_remote_calls_afc > 0: + response = self._generate_content( + model=model, contents=contents, config=config + ) + remaining_remote_calls_afc -= 1 + if remaining_remote_calls_afc == 0: + logging.info('Reached max remote calls for automatic function calling.') + + function_map = _extra_utils.get_function_map(config) + if not function_map: + break + if ( + not response.candidates + or not response.candidates[0].content + or not response.candidates[0].content.parts + ): + break + func_response_parts = _extra_utils.get_function_response_parts( + response, function_map + ) + if not func_response_parts: + break + contents = t.t_contents(self._api_client, contents) + contents.append(response.candidates[0].content) + contents.append( + types.Content( + role='user', + parts=func_response_parts, + ) + ) + automatic_function_calling_history.extend(contents) + if _extra_utils.should_append_afc_history(config): + response.automatic_function_calling_history = ( + automatic_function_calling_history + ) + return response + + def upscale_image( + self, + *, + model: str, + image: types.ImageOrDict, + upscale_factor: str, + config: Optional[types.UpscaleImageConfigOrDict] = None, + ) -> types.UpscaleImageResponse: + """Makes an API request to upscale a provided image. + + Args: + model (str): The model to use. + image (Image): The input image for upscaling. + upscale_factor (str): The factor to upscale the image (x2 or x4). + config (UpscaleImageConfig): Configuration for upscaling. + + Usage: + + .. code-block:: python + + from google.genai.types import Image + + IMAGE_FILE_PATH="my-image.png" + response=client.models.upscale_image( + model='imagen-3.0-generate-001', + image=types.Image.from_file(IMAGE_FILE_PATH), + upscale_factor='x2', + ) + response.generated_images[0].image.show() + # Opens my-image.png which is upscaled by a factor of 2. + """ + + # Validate config. + types.UpscaleImageParameters( + model=model, + image=image, + upscale_factor=upscale_factor, + config=config, + ) + + # Convert to API config. + config = config or {} + config_dct = config if isinstance(config, dict) else config.dict() + api_config = types._UpscaleImageAPIConfigDict(**config_dct) # pylint: disable=protected-access + + # Provide default values through API config. + api_config['mode'] = 'upscale' + api_config['number_of_images'] = 1 + + return self._upscale_image( + model=model, + image=image, + upscale_factor=upscale_factor, + config=api_config, + ) + + def list( + self, + *, + config: Optional[types.ListModelsConfigOrDict] = None, + ) -> Pager[types.Model]: + """Makes an API request to list the available models. + + If `query_base` is set to True in the config, the API will return all + available base models. If set to False or not set (default), it will return + all tuned models. + + Args: + config (ListModelsConfigOrDict): Configuration for retrieving models. + + Usage: + + .. code-block:: python + + response=client.models.list(config={'page_size': 5}) + print(response.page) + # [Model(name='projects/./locations/./models/123', display_name='my_model' + + response=client.models.list(config={'page_size': 5, 'query_base': True}) + print(response.page) + # [Model(name='publishers/google/models/gemini-2.0-flash-exp' ... + """ + + config = ( + types._ListModelsParameters(config=config).config + or types.ListModelsConfig() + ) + if self._api_client.vertexai: + config = config.copy() + if config.query_base: + http_options = ( + config.http_options if config.http_options else HttpOptionsDict() + ) + http_options['skip_project_and_location_in_path'] = True + config.http_options = http_options + else: + # Filter for tuning jobs artifacts by labels. + filter_value = config.filter + config.filter = ( + filter_value + '&filter=labels.tune-type:*' + if filter_value + else 'labels.tune-type:*' + ) + if not config.query_base: + config.query_base = False + return Pager( + 'models', + self._list, + self._list(config=config), + config, + ) + + +class AsyncModels(_common.BaseModule): + + async def _generate_content( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.GenerateContentConfigOrDict] = None, + ) -> types.GenerateContentResponse: + parameter_model = types._GenerateContentParameters( + model=model, + contents=contents, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _GenerateContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:generateContent'.format_map(request_dict.get('_url')) + else: + request_dict = _GenerateContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{model}:generateContent'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _GenerateContentResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _GenerateContentResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.GenerateContentResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def generate_content_stream( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.GenerateContentConfigOrDict] = None, + ) -> AsyncIterator[types.GenerateContentResponse]: + parameter_model = types._GenerateContentParameters( + model=model, + contents=contents, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _GenerateContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:streamGenerateContent?alt=sse'.format_map( + request_dict.get('_url') + ) + else: + request_dict = _GenerateContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{model}:streamGenerateContent?alt=sse'.format_map( + request_dict.get('_url') + ) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + async for response_dict in self._api_client.async_request_streamed( + 'post', path, request_dict, http_options + ): + + if self._api_client.vertexai: + response_dict = _GenerateContentResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _GenerateContentResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.GenerateContentResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + yield return_value + + async def embed_content( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.EmbedContentConfigOrDict] = None, + ) -> types.EmbedContentResponse: + """Calculates embeddings for the given contents(only text is supported). + + Args: + model (str): The model to use. + contents (list[Content]): The contents to embed. + config (EmbedContentConfig): Optional configuration for embeddings. + + Usage: + + .. code-block:: python + + embeddings = client.models.embed_content( + model= 'text-embedding-004', + contents=[ + 'What is your name?', + 'What is your favorite color?', + ], + config={ + 'output_dimensionality': 64 + }, + ) + """ + + parameter_model = types._EmbedContentParameters( + model=model, + contents=contents, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _EmbedContentParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:predict'.format_map(request_dict.get('_url')) + else: + request_dict = _EmbedContentParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{model}:batchEmbedContents'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _EmbedContentResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _EmbedContentResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.EmbedContentResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def generate_image( + self, + *, + model: str, + prompt: str, + config: Optional[types.GenerateImageConfigOrDict] = None, + ) -> types.GenerateImageResponse: + """Generates an image based on a text description and configuration. + + Args: + model (str): The model to use. + prompt (str): A text description of the image to generate. + config (GenerateImageConfig): Configuration for generation. + + Usage: + + .. code-block:: python + + response = client.models.generate_image( + model='imagen-3.0-generate-001', + prompt='Man with a dog', + config=types.GenerateImageConfig( + number_of_images= 1, + include_rai_reason= True, + ) + ) + response.generated_images[0].image.show() + # Shows a man with a dog. + """ + + parameter_model = types._GenerateImageParameters( + model=model, + prompt=prompt, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _GenerateImageParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:predict'.format_map(request_dict.get('_url')) + else: + request_dict = _GenerateImageParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{model}:predict'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _GenerateImageResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _GenerateImageResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.GenerateImageResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def edit_image( + self, + *, + model: str, + prompt: str, + reference_images: list[types._ReferenceImageAPIOrDict], + config: Optional[types.EditImageConfigOrDict] = None, + ) -> types.EditImageResponse: + """Edits an image based on a text description and configuration. + + Args: + model (str): The model to use. + prompt (str): A text description of the edit to apply to the image. + reference_images (list[Union[RawReferenceImage, MaskReferenceImage, + ControlReferenceImage, StyleReferenceImage, SubjectReferenceImage]): The + reference images for editing. + config (EditImageConfig): Configuration for editing. + + Usage: + + .. code-block:: python + + from google.genai.types import RawReferenceImage, MaskReferenceImage + + raw_ref_image = RawReferenceImage( + reference_id=1, + reference_image=types.Image.from_file(IMAGE_FILE_PATH), + ) + + mask_ref_image = MaskReferenceImage( + reference_id=2, + config=types.MaskReferenceConfig( + mask_mode='MASK_MODE_FOREGROUND', + mask_dilation=0.06, + ), + ) + response = client.models.edit_image( + model='imagen-3.0-capability-preview-0930', + prompt='man with dog', + reference_images=[raw_ref_image, mask_ref_image], + config=types.EditImageConfig( + edit_mode= "EDIT_MODE_INPAINT_INSERTION", + number_of_images= 1, + include_rai_reason= True, + ) + ) + response.generated_images[0].image.show() + # Shows a man with a dog instead of a cat. + """ + + parameter_model = types._EditImageParameters( + model=model, + prompt=prompt, + reference_images=reference_images, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _EditImageParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:predict'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _EditImageResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _EditImageResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.EditImageResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def _upscale_image( + self, + *, + model: str, + image: types.ImageOrDict, + upscale_factor: str, + config: Optional[types._UpscaleImageAPIConfigOrDict] = None, + ) -> types.UpscaleImageResponse: + """Upscales an image. + + Args: + model (str): The model to use. + image (Image): The input image for upscaling. + upscale_factor (str): The factor to upscale the image (x2 or x4). + config (_UpscaleImageAPIConfig): Configuration for upscaling. + """ + + parameter_model = types._UpscaleImageAPIParameters( + model=model, + image=image, + upscale_factor=upscale_factor, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _UpscaleImageAPIParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:predict'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _UpscaleImageResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _UpscaleImageResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.UpscaleImageResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def get(self, *, model: str) -> types.Model: + parameter_model = types._GetModelParameters( + model=model, + ) + + if self._api_client.vertexai: + request_dict = _GetModelParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _GetModelParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _Model_from_vertex(self._api_client, response_dict) + else: + response_dict = _Model_from_mldev(self._api_client, response_dict) + + return_value = types.Model._from_response(response_dict, parameter_model) + self._api_client._verify_response(return_value) + return return_value + + async def _list( + self, *, config: Optional[types.ListModelsConfigOrDict] = None + ) -> types.ListModelsResponse: + parameter_model = types._ListModelsParameters( + config=config, + ) + + if self._api_client.vertexai: + request_dict = _ListModelsParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{models_url}'.format_map(request_dict.get('_url')) + else: + request_dict = _ListModelsParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{models_url}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ListModelsResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ListModelsResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ListModelsResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def update( + self, + *, + model: str, + config: Optional[types.UpdateModelConfigOrDict] = None, + ) -> types.Model: + parameter_model = types._UpdateModelParameters( + model=model, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _UpdateModelParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}'.format_map(request_dict.get('_url')) + else: + request_dict = _UpdateModelParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'patch', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _Model_from_vertex(self._api_client, response_dict) + else: + response_dict = _Model_from_mldev(self._api_client, response_dict) + + return_value = types.Model._from_response(response_dict, parameter_model) + self._api_client._verify_response(return_value) + return return_value + + async def delete(self, *, model: str) -> types.DeleteModelResponse: + parameter_model = types._DeleteModelParameters( + model=model, + ) + + if self._api_client.vertexai: + request_dict = _DeleteModelParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _DeleteModelParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'delete', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _DeleteModelResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _DeleteModelResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.DeleteModelResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def count_tokens( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.CountTokensConfigOrDict] = None, + ) -> types.CountTokensResponse: + """Counts the number of tokens in the given content. + + Args: + model (str): The model to use for counting tokens. + contents (list[types.Content]): The content to count tokens for. + Multimodal input is supported for Gemini models. + config (CountTokensConfig): The configuration for counting tokens. + + Usage: + + .. code-block:: python + + response = client.models.count_tokens( + model='gemini-1.5-flash', + contents='What is your name?', + ) + print(response) + # total_tokens=5 cached_content_token_count=None + """ + + parameter_model = types._CountTokensParameters( + model=model, + contents=contents, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _CountTokensParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:countTokens'.format_map(request_dict.get('_url')) + else: + request_dict = _CountTokensParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{model}:countTokens'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _CountTokensResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _CountTokensResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.CountTokensResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def compute_tokens( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.ComputeTokensConfigOrDict] = None, + ) -> types.ComputeTokensResponse: + """Return a list of tokens based on the input text. + + This method is not supported by the Gemini Developer API. + + Args: + model (str): The model to use. + contents (list[shared.Content]): The content to compute tokens for. Only + text is supported. + + Usage: + + .. code-block:: python + + response = client.models.compute_tokens( + model='gemini-1.5-flash', + contents='What is your name?', + ) + print(response) + # tokens_info=[TokensInfo(role='user', token_ids=['1841', ...], + # tokens=[b'What', b' is', b' your', b' name', b'?'])] + """ + + parameter_model = types._ComputeTokensParameters( + model=model, + contents=contents, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _ComputeTokensParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{model}:computeTokens'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ComputeTokensResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ComputeTokensResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ComputeTokensResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def generate_content( + self, + *, + model: str, + contents: Union[types.ContentListUnion, types.ContentListUnionDict], + config: Optional[types.GenerateContentConfigOrDict] = None, + ) -> types.GenerateContentResponse: + """Makes an API request to generate content using a model. + + Some models support multimodal input and output. + + Usage: + + .. code-block:: python + + from google.genai import types + from google import genai + + client = genai.Client( + vertexai=True, project='my-project-id', location='us-central1' + ) + + response = await client.aio.models.generate_content( + model='gemini-1.5-flash-002', + contents='User input: I like bagels. Answer:', + config=types.GenerateContentConfig( + system_instruction= + [ + 'You are a helpful language translator.', + 'Your mission is to translate text in English to French.' + ] + ), + ) + print(response.text) + # J'aime les bagels. + """ + if _extra_utils.should_disable_afc(config): + return await self._generate_content( + model=model, contents=contents, config=config + ) + remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config) + logging.info( + f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.' + ) + automatic_function_calling_history = [] + while remaining_remote_calls_afc > 0: + response = await self._generate_content( + model=model, contents=contents, config=config + ) + remaining_remote_calls_afc -= 1 + if remaining_remote_calls_afc == 0: + logging.info('Reached max remote calls for automatic function calling.') + + function_map = _extra_utils.get_function_map(config) + if not function_map: + break + if ( + not response.candidates + or not response.candidates[0].content + or not response.candidates[0].content.parts + ): + break + func_response_parts = _extra_utils.get_function_response_parts( + response, function_map + ) + if not func_response_parts: + break + contents = t.t_contents(self._api_client, contents) + contents.append(response.candidates[0].content) + contents.append( + types.Content( + role='user', + parts=func_response_parts, + ) + ) + automatic_function_calling_history.extend(contents) + + if _extra_utils.should_append_afc_history(config): + response.automatic_function_calling_history = ( + automatic_function_calling_history + ) + return response + + async def list( + self, + *, + config: Optional[types.ListModelsConfigOrDict] = None, + ) -> AsyncPager[types.Model]: + """Makes an API request to list the available models. + + If `query_base` is set to True in the config, the API will return all + available base models. If set to False or not set (default), it will return + all tuned models. + + Args: + config (ListModelsConfigOrDict): Configuration for retrieving models. + + Usage: + + .. code-block:: python + + response = await client.aio.models.list(config={'page_size': 5}) + print(response.page) + # [Model(name='projects/./locations/./models/123', display_name='my_model' + + response = await client.aio.models.list( + config={'page_size': 5, 'query_base': True} + ) + print(response.page) + # [Model(name='publishers/google/models/gemini-2.0-flash-exp' ... + """ + + config = ( + types._ListModelsParameters(config=config).config + or types.ListModelsConfig() + ) + if self._api_client.vertexai: + config = config.copy() + if config.query_base: + http_options = ( + config.http_options if config.http_options else HttpOptionsDict() + ) + http_options['skip_project_and_location_in_path'] = True + config.http_options = http_options + else: + # Filter for tuning jobs artifacts by labels. + filter_value = config.filter + config.filter = ( + filter_value + '&filter=labels.tune-type:*' + if filter_value + else 'labels.tune-type:*' + ) + if not config.query_base: + config.query_base = False + return AsyncPager( + 'models', + self._list, + await self._list(config=config), + config, + ) + + async def upscale_image( + self, + *, + model: str, + image: types.ImageOrDict, + upscale_factor: str, + config: Optional[types.UpscaleImageConfigOrDict] = None, + ) -> types.UpscaleImageResponse: + """Makes an API request to upscale a provided image. + + Args: + model (str): The model to use. + image (Image): The input image for upscaling. + upscale_factor (str): The factor to upscale the image (x2 or x4). + config (UpscaleImageConfig): Configuration for upscaling. + + Usage: + + .. code-block:: python + + from google.genai.types import Image + + IMAGE_FILE_PATH="my-image.png" + response = await client.aio.models.upscale_image( + model='imagen-3.0-generate-001', + image=types.Image.from_file(IMAGE_FILE_PATH), + upscale_factor='x2', + ) + response.generated_images[0].image.show() + # Opens my-image.png which is upscaled by a factor of 2. + """ + + # Validate config. + types.UpscaleImageParameters( + model=model, + image=image, + upscale_factor=upscale_factor, + config=config, + ) + + # Convert to API config. + config = config or {} + config_dct = config if isinstance(config, dict) else config.dict() + api_config = types._UpscaleImageAPIConfigDict(**config_dct) # pylint: disable=protected-access + + # Provide default values through API config. + api_config['mode'] = 'upscale' + api_config['number_of_images'] = 1 + + return await self._upscale_image( + model=model, + image=image, + upscale_factor=upscale_factor, + config=api_config, + ) diff --git a/.venv/lib/python3.12/site-packages/google/genai/pagers.py b/.venv/lib/python3.12/site-packages/google/genai/pagers.py new file mode 100644 index 00000000..a90465c3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/pagers.py @@ -0,0 +1,245 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Pagers for the GenAI List APIs.""" + +# pylint: disable=protected-access + +import copy +from typing import Any, AsyncIterator, Awaitable, Callable, Generic, Iterator, Literal, TypeVar + +T = TypeVar('T') + +PagedItem = Literal[ + 'batch_jobs', 'models', 'tuning_jobs', 'files', 'cached_contents' +] + + +class _BasePager(Generic[T]): + """Base pager class for iterating through paginated results.""" + + def __init__( + self, + name: PagedItem, + request: Callable[Any, Any], + response: Any, + config: Any, + ): + self._name = name + self._request = request + + self._page = getattr(response, self._name) or [] + self._idx = 0 + + if not config: + request_config = {} + elif isinstance(config, dict): + request_config = copy.deepcopy(config) + else: + request_config = dict(config) + request_config['page_token'] = getattr(response, 'next_page_token') + self._config = request_config + + self._page_size = request_config.get('page_size', len(self._page)) + + @property + def page(self) -> list[T]: + """Returns the current page, which is a list of items. + + The returned list of items is a subset of the entire list. + + Usage: + + .. code-block:: python + + batch_jobs_pager = client.batches.list(config={'page_size': 5}) + print(f"first page: {batch_jobs_pager.page}") + # first page: [BatchJob(name='projects/./locations/./batchPredictionJobs/1 + """ + + return self._page + + @property + def name(self) -> str: + """Returns the type of paged item (for example, ``batch_jobs``). + + Usage: + + .. code-block:: python + + batch_jobs_pager = client.batches.list(config={'page_size': 5}) + print(f"name: {batch_jobs_pager.name}") + # name: batch_jobs + """ + + return self._name + + @property + def page_size(self) -> int: + """Returns the length of the page fetched each time by this pager. + + The number of items in the page is less than or equal to the page length. + + Usage: + + .. code-block:: python + + batch_jobs_pager = client.batches.list(config={'page_size': 5}) + print(f"page_size: {batch_jobs_pager.page_size}") + # page_size: 5 + """ + + return self._page_size + + @property + def config(self) -> dict[str, Any]: + """Returns the configuration when making the API request for the next page. + + A configuration is a set of optional parameters and arguments that can be + used to customize the API request. For example, the ``page_token`` parameter + contains the token to request the next page. + + Usage: + + .. code-block:: python + + batch_jobs_pager = client.batches.list(config={'page_size': 5}) + print(f"config: {batch_jobs_pager.config}") + # config: {'page_size': 5, 'page_token': 'AMEw9yO5jnsGnZJLHSKDFHJJu'} + """ + + return self._config + + def __len__(self) -> int: + """Returns the total number of items in the current page.""" + return len(self.page) + + def __getitem__(self, index: int) -> T: + """Returns the item at the given index.""" + return self.page[index] + + def _init_next_page(self, response: Any) -> None: + """Initializes the next page from the response. + + This is an internal method that should be called by subclasses after + fetching the next page. + + Args: + response: The response object from the API request. + """ + self.__init__(self.name, self._request, response, self.config) + + +class Pager(_BasePager[T]): + """Pager class for iterating through paginated results.""" + + def __next__(self) -> T: + """Returns the next item.""" + if self._idx >= len(self): + try: + self.next_page() + except IndexError: + raise StopIteration + + item = self.page[self._idx] + self._idx += 1 + return item + + def __iter__(self) -> Iterator[T]: + """Returns an iterator over the items.""" + self._idx = 0 + return self + + def next_page(self) -> list[T]: + """Fetches the next page of items. This makes a new API request. + + Usage: + + .. code-block:: python + + batch_jobs_pager = client.batches.list(config={'page_size': 5}) + print(f"current page: {batch_jobs_pager.page}") + batch_jobs_pager.next_page() + print(f"next page: {batch_jobs_pager.page}") + # current page: [BatchJob(name='projects/.../batchPredictionJobs/1 + # next page: [BatchJob(name='projects/.../batchPredictionJobs/6 + """ + + if not self.config.get('page_token'): + raise IndexError('No more pages to fetch.') + + response = self._request(config=self.config) + self._init_next_page(response) + return self.page + + +class AsyncPager(_BasePager[T]): + """AsyncPager class for iterating through paginated results.""" + + def __init__( + self, + name: PagedItem, + request: Callable[Any, Awaitable[Any]], + response: Any, + config: Any, + ): + super().__init__(name, request, response, config) + + def __aiter__(self) -> AsyncIterator[T]: + """Returns an async iterator over the items.""" + self._idx = 0 + return self + + async def __anext__(self) -> Awaitable[T]: + """Returns the next item asynchronously.""" + if self._idx >= len(self): + try: + await self.next_page() + except IndexError: + raise StopAsyncIteration + + item = self.page[self._idx] + self._idx += 1 + return item + + async def next_page(self) -> list[T]: + """Fetches the next page of items asynchronously. + + This makes a new API request. + + Returns: + The next page of items. + + Raises: + IndexError: No more pages to fetch. + + Usage: + + .. code-block:: python + + batch_jobs_pager = await client.aio.batches.list(config={'page_size': 5}) + print(f"current page: {batch_jobs_pager.page}") + await batch_jobs_pager.next_page() + print(f"next page: {batch_jobs_pager.page}") + # current page: [BatchJob(name='projects/.../batchPredictionJobs/1 + # next page: [BatchJob(name='projects/.../batchPredictionJobs/6 + """ + + if not self.config.get('page_token'): + raise IndexError('No more pages to fetch.') + + response = await self._request(config=self.config) + self._init_next_page(response) + return self.page diff --git a/.venv/lib/python3.12/site-packages/google/genai/tunings.py b/.venv/lib/python3.12/site-packages/google/genai/tunings.py new file mode 100644 index 00000000..a215a195 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/tunings.py @@ -0,0 +1,1681 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Code generated by the Google Gen AI SDK generator DO NOT EDIT. + +from typing import Optional, Union +from urllib.parse import urlencode +from . import _common +from . import _transformers as t +from . import types +from ._api_client import ApiClient +from ._common import get_value_by_path as getv +from ._common import set_value_by_path as setv +from .pagers import AsyncPager, Pager + + +def _GetTuningJobConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _GetTuningJobConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + return to_object + + +def _GetTuningJobParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['_url', 'name'], getv(from_object, ['name'])) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _GetTuningJobConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _GetTuningJobParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['_url', 'name'], getv(from_object, ['name'])) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _GetTuningJobConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _ListTuningJobsConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['page_size']) is not None: + setv( + parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size']) + ) + + if getv(from_object, ['page_token']) is not None: + setv( + parent_object, + ['_query', 'pageToken'], + getv(from_object, ['page_token']), + ) + + if getv(from_object, ['filter']) is not None: + setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter'])) + + return to_object + + +def _ListTuningJobsConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['page_size']) is not None: + setv( + parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size']) + ) + + if getv(from_object, ['page_token']) is not None: + setv( + parent_object, + ['_query', 'pageToken'], + getv(from_object, ['page_token']), + ) + + if getv(from_object, ['filter']) is not None: + setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter'])) + + return to_object + + +def _ListTuningJobsParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _ListTuningJobsConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _ListTuningJobsParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _ListTuningJobsConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _TuningExample_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['text_input']) is not None: + setv(to_object, ['textInput'], getv(from_object, ['text_input'])) + + if getv(from_object, ['output']) is not None: + setv(to_object, ['output'], getv(from_object, ['output'])) + + return to_object + + +def _TuningExample_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['text_input']) is not None: + raise ValueError('text_input parameter is not supported in Vertex AI.') + + if getv(from_object, ['output']) is not None: + raise ValueError('output parameter is not supported in Vertex AI.') + + return to_object + + +def _TuningDataset_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcs_uri']) is not None: + raise ValueError('gcs_uri parameter is not supported in Google AI.') + + if getv(from_object, ['examples']) is not None: + setv( + to_object, + ['examples', 'examples'], + [ + _TuningExample_to_mldev(api_client, item, to_object) + for item in getv(from_object, ['examples']) + ], + ) + + return to_object + + +def _TuningDataset_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcs_uri']) is not None: + setv( + parent_object, + ['supervisedTuningSpec', 'trainingDatasetUri'], + getv(from_object, ['gcs_uri']), + ) + + if getv(from_object, ['examples']) is not None: + raise ValueError('examples parameter is not supported in Vertex AI.') + + return to_object + + +def _TuningValidationDataset_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcs_uri']) is not None: + raise ValueError('gcs_uri parameter is not supported in Google AI.') + + return to_object + + +def _TuningValidationDataset_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcs_uri']) is not None: + setv(to_object, ['validationDatasetUri'], getv(from_object, ['gcs_uri'])) + + return to_object + + +def _CreateTuningJobConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['validation_dataset']) is not None: + raise ValueError( + 'validation_dataset parameter is not supported in Google AI.' + ) + + if getv(from_object, ['tuned_model_display_name']) is not None: + setv( + parent_object, + ['displayName'], + getv(from_object, ['tuned_model_display_name']), + ) + + if getv(from_object, ['description']) is not None: + raise ValueError('description parameter is not supported in Google AI.') + + if getv(from_object, ['epoch_count']) is not None: + setv( + parent_object, + ['tuningTask', 'hyperparameters', 'epochCount'], + getv(from_object, ['epoch_count']), + ) + + if getv(from_object, ['learning_rate_multiplier']) is not None: + setv( + to_object, + ['tuningTask', 'hyperparameters', 'learningRateMultiplier'], + getv(from_object, ['learning_rate_multiplier']), + ) + + if getv(from_object, ['adapter_size']) is not None: + raise ValueError('adapter_size parameter is not supported in Google AI.') + + if getv(from_object, ['batch_size']) is not None: + setv( + parent_object, + ['tuningTask', 'hyperparameters', 'batchSize'], + getv(from_object, ['batch_size']), + ) + + if getv(from_object, ['learning_rate']) is not None: + setv( + parent_object, + ['tuningTask', 'hyperparameters', 'learningRate'], + getv(from_object, ['learning_rate']), + ) + + return to_object + + +def _CreateTuningJobConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['validation_dataset']) is not None: + setv( + parent_object, + ['supervisedTuningSpec'], + _TuningValidationDataset_to_vertex( + api_client, getv(from_object, ['validation_dataset']), to_object + ), + ) + + if getv(from_object, ['tuned_model_display_name']) is not None: + setv( + parent_object, + ['tunedModelDisplayName'], + getv(from_object, ['tuned_model_display_name']), + ) + + if getv(from_object, ['description']) is not None: + setv(parent_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['epoch_count']) is not None: + setv( + parent_object, + ['supervisedTuningSpec', 'hyperParameters', 'epochCount'], + getv(from_object, ['epoch_count']), + ) + + if getv(from_object, ['learning_rate_multiplier']) is not None: + setv( + to_object, + ['supervisedTuningSpec', 'hyperParameters', 'learningRateMultiplier'], + getv(from_object, ['learning_rate_multiplier']), + ) + + if getv(from_object, ['adapter_size']) is not None: + setv( + parent_object, + ['supervisedTuningSpec', 'hyperParameters', 'adapterSize'], + getv(from_object, ['adapter_size']), + ) + + if getv(from_object, ['batch_size']) is not None: + raise ValueError('batch_size parameter is not supported in Vertex AI.') + + if getv(from_object, ['learning_rate']) is not None: + raise ValueError('learning_rate parameter is not supported in Vertex AI.') + + return to_object + + +def _CreateTuningJobParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['base_model']) is not None: + setv(to_object, ['baseModel'], getv(from_object, ['base_model'])) + + if getv(from_object, ['training_dataset']) is not None: + setv( + to_object, + ['tuningTask', 'trainingData'], + _TuningDataset_to_mldev( + api_client, getv(from_object, ['training_dataset']), to_object + ), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CreateTuningJobConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _CreateTuningJobParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['base_model']) is not None: + setv(to_object, ['baseModel'], getv(from_object, ['base_model'])) + + if getv(from_object, ['training_dataset']) is not None: + setv( + to_object, + ['supervisedTuningSpec', 'trainingDatasetUri'], + _TuningDataset_to_vertex( + api_client, getv(from_object, ['training_dataset']), to_object + ), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CreateTuningJobConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _DistillationDataset_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcs_uri']) is not None: + raise ValueError('gcs_uri parameter is not supported in Google AI.') + + return to_object + + +def _DistillationDataset_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcs_uri']) is not None: + setv( + parent_object, + ['distillationSpec', 'trainingDatasetUri'], + getv(from_object, ['gcs_uri']), + ) + + return to_object + + +def _DistillationValidationDataset_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcs_uri']) is not None: + raise ValueError('gcs_uri parameter is not supported in Google AI.') + + return to_object + + +def _DistillationValidationDataset_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['gcs_uri']) is not None: + setv(to_object, ['validationDatasetUri'], getv(from_object, ['gcs_uri'])) + + return to_object + + +def _CreateDistillationJobConfig_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['validation_dataset']) is not None: + raise ValueError( + 'validation_dataset parameter is not supported in Google AI.' + ) + + if getv(from_object, ['tuned_model_display_name']) is not None: + setv( + parent_object, + ['displayName'], + getv(from_object, ['tuned_model_display_name']), + ) + + if getv(from_object, ['epoch_count']) is not None: + setv( + parent_object, + ['tuningTask', 'hyperparameters', 'epochCount'], + getv(from_object, ['epoch_count']), + ) + + if getv(from_object, ['learning_rate_multiplier']) is not None: + setv( + parent_object, + ['tuningTask', 'hyperparameters', 'learningRateMultiplier'], + getv(from_object, ['learning_rate_multiplier']), + ) + + if getv(from_object, ['adapter_size']) is not None: + raise ValueError('adapter_size parameter is not supported in Google AI.') + + if getv(from_object, ['pipeline_root_directory']) is not None: + raise ValueError( + 'pipeline_root_directory parameter is not supported in Google AI.' + ) + + return to_object + + +def _CreateDistillationJobConfig_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['http_options']) is not None: + setv(to_object, ['httpOptions'], getv(from_object, ['http_options'])) + + if getv(from_object, ['validation_dataset']) is not None: + setv( + parent_object, + ['distillationSpec'], + _DistillationValidationDataset_to_vertex( + api_client, getv(from_object, ['validation_dataset']), to_object + ), + ) + + if getv(from_object, ['tuned_model_display_name']) is not None: + setv( + parent_object, + ['tunedModelDisplayName'], + getv(from_object, ['tuned_model_display_name']), + ) + + if getv(from_object, ['epoch_count']) is not None: + setv( + parent_object, + ['distillationSpec', 'hyperParameters', 'epochCount'], + getv(from_object, ['epoch_count']), + ) + + if getv(from_object, ['learning_rate_multiplier']) is not None: + setv( + parent_object, + ['distillationSpec', 'hyperParameters', 'learningRateMultiplier'], + getv(from_object, ['learning_rate_multiplier']), + ) + + if getv(from_object, ['adapter_size']) is not None: + setv( + parent_object, + ['distillationSpec', 'hyperParameters', 'adapterSize'], + getv(from_object, ['adapter_size']), + ) + + if getv(from_object, ['pipeline_root_directory']) is not None: + setv( + parent_object, + ['distillationSpec', 'pipelineRootDirectory'], + getv(from_object, ['pipeline_root_directory']), + ) + + return to_object + + +def _CreateDistillationJobParameters_to_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['student_model']) is not None: + raise ValueError('student_model parameter is not supported in Google AI.') + + if getv(from_object, ['teacher_model']) is not None: + raise ValueError('teacher_model parameter is not supported in Google AI.') + + if getv(from_object, ['training_dataset']) is not None: + setv( + to_object, + ['tuningTask', 'trainingData'], + _DistillationDataset_to_mldev( + api_client, getv(from_object, ['training_dataset']), to_object + ), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CreateDistillationJobConfig_to_mldev( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _CreateDistillationJobParameters_to_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['student_model']) is not None: + setv( + to_object, + ['distillationSpec', 'studentModel'], + getv(from_object, ['student_model']), + ) + + if getv(from_object, ['teacher_model']) is not None: + setv( + to_object, + ['distillationSpec', 'baseTeacherModel'], + getv(from_object, ['teacher_model']), + ) + + if getv(from_object, ['training_dataset']) is not None: + setv( + to_object, + ['distillationSpec', 'trainingDatasetUri'], + _DistillationDataset_to_vertex( + api_client, getv(from_object, ['training_dataset']), to_object + ), + ) + + if getv(from_object, ['config']) is not None: + setv( + to_object, + ['config'], + _CreateDistillationJobConfig_to_vertex( + api_client, getv(from_object, ['config']), to_object + ), + ) + + return to_object + + +def _TunedModel_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['model'], getv(from_object, ['name'])) + + if getv(from_object, ['name']) is not None: + setv(to_object, ['endpoint'], getv(from_object, ['name'])) + + return to_object + + +def _TunedModel_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['model']) is not None: + setv(to_object, ['model'], getv(from_object, ['model'])) + + if getv(from_object, ['endpoint']) is not None: + setv(to_object, ['endpoint'], getv(from_object, ['endpoint'])) + + return to_object + + +def _TuningJob_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['state']) is not None: + setv( + to_object, + ['state'], + t.t_tuning_job_status(api_client, getv(from_object, ['state'])), + ) + + if getv(from_object, ['createTime']) is not None: + setv(to_object, ['create_time'], getv(from_object, ['createTime'])) + + if getv(from_object, ['tuningTask', 'startTime']) is not None: + setv( + to_object, + ['start_time'], + getv(from_object, ['tuningTask', 'startTime']), + ) + + if getv(from_object, ['tuningTask', 'completeTime']) is not None: + setv( + to_object, + ['end_time'], + getv(from_object, ['tuningTask', 'completeTime']), + ) + + if getv(from_object, ['updateTime']) is not None: + setv(to_object, ['update_time'], getv(from_object, ['updateTime'])) + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['baseModel']) is not None: + setv(to_object, ['base_model'], getv(from_object, ['baseModel'])) + + if getv(from_object, ['_self']) is not None: + setv( + to_object, + ['tuned_model'], + _TunedModel_from_mldev( + api_client, getv(from_object, ['_self']), to_object + ), + ) + + if getv(from_object, ['experiment']) is not None: + setv(to_object, ['experiment'], getv(from_object, ['experiment'])) + + if getv(from_object, ['labels']) is not None: + setv(to_object, ['labels'], getv(from_object, ['labels'])) + + if getv(from_object, ['tunedModelDisplayName']) is not None: + setv( + to_object, + ['tuned_model_display_name'], + getv(from_object, ['tunedModelDisplayName']), + ) + + return to_object + + +def _TuningJob_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['name']) is not None: + setv(to_object, ['name'], getv(from_object, ['name'])) + + if getv(from_object, ['state']) is not None: + setv( + to_object, + ['state'], + t.t_tuning_job_status(api_client, getv(from_object, ['state'])), + ) + + if getv(from_object, ['createTime']) is not None: + setv(to_object, ['create_time'], getv(from_object, ['createTime'])) + + if getv(from_object, ['startTime']) is not None: + setv(to_object, ['start_time'], getv(from_object, ['startTime'])) + + if getv(from_object, ['endTime']) is not None: + setv(to_object, ['end_time'], getv(from_object, ['endTime'])) + + if getv(from_object, ['updateTime']) is not None: + setv(to_object, ['update_time'], getv(from_object, ['updateTime'])) + + if getv(from_object, ['error']) is not None: + setv(to_object, ['error'], getv(from_object, ['error'])) + + if getv(from_object, ['description']) is not None: + setv(to_object, ['description'], getv(from_object, ['description'])) + + if getv(from_object, ['baseModel']) is not None: + setv(to_object, ['base_model'], getv(from_object, ['baseModel'])) + + if getv(from_object, ['tunedModel']) is not None: + setv( + to_object, + ['tuned_model'], + _TunedModel_from_vertex( + api_client, getv(from_object, ['tunedModel']), to_object + ), + ) + + if getv(from_object, ['supervisedTuningSpec']) is not None: + setv( + to_object, + ['supervised_tuning_spec'], + getv(from_object, ['supervisedTuningSpec']), + ) + + if getv(from_object, ['tuningDataStats']) is not None: + setv( + to_object, ['tuning_data_stats'], getv(from_object, ['tuningDataStats']) + ) + + if getv(from_object, ['encryptionSpec']) is not None: + setv(to_object, ['encryption_spec'], getv(from_object, ['encryptionSpec'])) + + if getv(from_object, ['distillationSpec']) is not None: + setv( + to_object, + ['distillation_spec'], + getv(from_object, ['distillationSpec']), + ) + + if getv(from_object, ['partnerModelTuningSpec']) is not None: + setv( + to_object, + ['partner_model_tuning_spec'], + getv(from_object, ['partnerModelTuningSpec']), + ) + + if getv(from_object, ['pipelineJob']) is not None: + setv(to_object, ['pipeline_job'], getv(from_object, ['pipelineJob'])) + + if getv(from_object, ['experiment']) is not None: + setv(to_object, ['experiment'], getv(from_object, ['experiment'])) + + if getv(from_object, ['labels']) is not None: + setv(to_object, ['labels'], getv(from_object, ['labels'])) + + if getv(from_object, ['tunedModelDisplayName']) is not None: + setv( + to_object, + ['tuned_model_display_name'], + getv(from_object, ['tunedModelDisplayName']), + ) + + return to_object + + +def _ListTuningJobsResponse_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['nextPageToken']) is not None: + setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken'])) + + if getv(from_object, ['tunedModels']) is not None: + setv( + to_object, + ['tuning_jobs'], + [ + _TuningJob_from_mldev(api_client, item, to_object) + for item in getv(from_object, ['tunedModels']) + ], + ) + + return to_object + + +def _ListTuningJobsResponse_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['nextPageToken']) is not None: + setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken'])) + + if getv(from_object, ['tuningJobs']) is not None: + setv( + to_object, + ['tuning_jobs'], + [ + _TuningJob_from_vertex(api_client, item, to_object) + for item in getv(from_object, ['tuningJobs']) + ], + ) + + return to_object + + +def _TuningJobOrOperation_from_mldev( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['_self']) is not None: + setv( + to_object, + ['tuning_job'], + _TuningJob_from_mldev( + api_client, + t.t_resolve_operation(api_client, getv(from_object, ['_self'])), + to_object, + ), + ) + + return to_object + + +def _TuningJobOrOperation_from_vertex( + api_client: ApiClient, + from_object: Union[dict, object], + parent_object: dict = None, +) -> dict: + to_object = {} + if getv(from_object, ['_self']) is not None: + setv( + to_object, + ['tuning_job'], + _TuningJob_from_vertex( + api_client, + t.t_resolve_operation(api_client, getv(from_object, ['_self'])), + to_object, + ), + ) + + return to_object + + +class Tunings(_common.BaseModule): + + def _get( + self, + *, + name: str, + config: Optional[types.GetTuningJobConfigOrDict] = None, + ) -> types.TuningJob: + """Gets a TuningJob. + + Args: + name: The resource name of the tuning job. + + Returns: + A TuningJob object. + """ + + parameter_model = types._GetTuningJobParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _GetTuningJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _GetTuningJobParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _TuningJob_from_vertex(self._api_client, response_dict) + else: + response_dict = _TuningJob_from_mldev(self._api_client, response_dict) + + return_value = types.TuningJob._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def _list( + self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None + ) -> types.ListTuningJobsResponse: + """Lists tuning jobs. + + Args: + config: The configuration for the list request. + + Returns: + A list of tuning jobs. + """ + + parameter_model = types._ListTuningJobsParameters( + config=config, + ) + + if self._api_client.vertexai: + request_dict = _ListTuningJobsParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'tuningJobs'.format_map(request_dict.get('_url')) + else: + request_dict = _ListTuningJobsParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'tunedModels'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ListTuningJobsResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ListTuningJobsResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ListTuningJobsResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def _tune( + self, + *, + base_model: str, + training_dataset: types.TuningDatasetOrDict, + config: Optional[types.CreateTuningJobConfigOrDict] = None, + ) -> types.TuningJobOrOperation: + """Creates a supervised fine-tuning job. + + Args: + base_model: The name of the model to tune. + training_dataset: The training dataset to use. + config: The configuration to use for the tuning job. + + Returns: + A TuningJob object. + """ + + parameter_model = types._CreateTuningJobParameters( + base_model=base_model, + training_dataset=training_dataset, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _CreateTuningJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'tuningJobs'.format_map(request_dict.get('_url')) + else: + request_dict = _CreateTuningJobParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'tunedModels'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _TuningJobOrOperation_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _TuningJobOrOperation_from_mldev( + self._api_client, response_dict + ) + + return_value = types.TuningJobOrOperation._from_response( + response_dict, parameter_model + ).tuning_job + self._api_client._verify_response(return_value) + return return_value + + def distill( + self, + *, + student_model: str, + teacher_model: str, + training_dataset: types.DistillationDatasetOrDict, + config: Optional[types.CreateDistillationJobConfigOrDict] = None, + ) -> types.TuningJob: + """Creates a distillation job. + + Args: + student_model: The name of the model to tune. + teacher_model: The name of the model to distill from. + training_dataset: The training dataset to use. + config: The configuration to use for the distillation job. + + Returns: + A TuningJob object. + """ + + parameter_model = types._CreateDistillationJobParameters( + student_model=student_model, + teacher_model=teacher_model, + training_dataset=training_dataset, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _CreateDistillationJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'tuningJobs'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = self._api_client.request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _TuningJob_from_vertex(self._api_client, response_dict) + else: + response_dict = _TuningJob_from_mldev(self._api_client, response_dict) + + return_value = types.TuningJob._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + def list( + self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None + ) -> Pager[types.TuningJob]: + return Pager( + 'tuning_jobs', + self._list, + self._list(config=config), + config, + ) + + def get( + self, + *, + name: str, + config: Optional[types.GetTuningJobConfigOrDict] = None, + ) -> types.TuningJob: + job = self._get(name=name, config=config) + if job.experiment and self._api_client.vertexai: + _IpythonUtils.display_experiment_button( + experiment=job.experiment, + project=self._api_client.project, + ) + return job + + def tune( + self, + *, + base_model: str, + training_dataset: types.TuningDatasetOrDict, + config: Optional[types.CreateTuningJobConfigOrDict] = None, + ) -> types.TuningJobOrOperation: + result = self._tune( + base_model=base_model, + training_dataset=training_dataset, + config=config, + ) + if result.name and self._api_client.vertexai: + _IpythonUtils.display_model_tuning_button(tuning_job_resource=result.name) + return result + + +class AsyncTunings(_common.BaseModule): + + async def _get( + self, + *, + name: str, + config: Optional[types.GetTuningJobConfigOrDict] = None, + ) -> types.TuningJob: + """Gets a TuningJob. + + Args: + name: The resource name of the tuning job. + + Returns: + A TuningJob object. + """ + + parameter_model = types._GetTuningJobParameters( + name=name, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _GetTuningJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + else: + request_dict = _GetTuningJobParameters_to_mldev( + self._api_client, parameter_model + ) + path = '{name}'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _TuningJob_from_vertex(self._api_client, response_dict) + else: + response_dict = _TuningJob_from_mldev(self._api_client, response_dict) + + return_value = types.TuningJob._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def _list( + self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None + ) -> types.ListTuningJobsResponse: + """Lists tuning jobs. + + Args: + config: The configuration for the list request. + + Returns: + A list of tuning jobs. + """ + + parameter_model = types._ListTuningJobsParameters( + config=config, + ) + + if self._api_client.vertexai: + request_dict = _ListTuningJobsParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'tuningJobs'.format_map(request_dict.get('_url')) + else: + request_dict = _ListTuningJobsParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'tunedModels'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'get', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _ListTuningJobsResponse_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _ListTuningJobsResponse_from_mldev( + self._api_client, response_dict + ) + + return_value = types.ListTuningJobsResponse._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def _tune( + self, + *, + base_model: str, + training_dataset: types.TuningDatasetOrDict, + config: Optional[types.CreateTuningJobConfigOrDict] = None, + ) -> types.TuningJobOrOperation: + """Creates a supervised fine-tuning job. + + Args: + base_model: The name of the model to tune. + training_dataset: The training dataset to use. + config: The configuration to use for the tuning job. + + Returns: + A TuningJob object. + """ + + parameter_model = types._CreateTuningJobParameters( + base_model=base_model, + training_dataset=training_dataset, + config=config, + ) + + if self._api_client.vertexai: + request_dict = _CreateTuningJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'tuningJobs'.format_map(request_dict.get('_url')) + else: + request_dict = _CreateTuningJobParameters_to_mldev( + self._api_client, parameter_model + ) + path = 'tunedModels'.format_map(request_dict.get('_url')) + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _TuningJobOrOperation_from_vertex( + self._api_client, response_dict + ) + else: + response_dict = _TuningJobOrOperation_from_mldev( + self._api_client, response_dict + ) + + return_value = types.TuningJobOrOperation._from_response( + response_dict, parameter_model + ).tuning_job + self._api_client._verify_response(return_value) + return return_value + + async def distill( + self, + *, + student_model: str, + teacher_model: str, + training_dataset: types.DistillationDatasetOrDict, + config: Optional[types.CreateDistillationJobConfigOrDict] = None, + ) -> types.TuningJob: + """Creates a distillation job. + + Args: + student_model: The name of the model to tune. + teacher_model: The name of the model to distill from. + training_dataset: The training dataset to use. + config: The configuration to use for the distillation job. + + Returns: + A TuningJob object. + """ + + parameter_model = types._CreateDistillationJobParameters( + student_model=student_model, + teacher_model=teacher_model, + training_dataset=training_dataset, + config=config, + ) + + if not self._api_client.vertexai: + raise ValueError('This method is only supported in the Vertex AI client.') + else: + request_dict = _CreateDistillationJobParameters_to_vertex( + self._api_client, parameter_model + ) + path = 'tuningJobs'.format_map(request_dict.get('_url')) + + query_params = request_dict.get('_query') + if query_params: + path = f'{path}?{urlencode(query_params)}' + # TODO: remove the hack that pops config. + config = request_dict.pop('config', None) + http_options = config.pop('httpOptions', None) if config else None + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response_dict = await self._api_client.async_request( + 'post', path, request_dict, http_options + ) + + if self._api_client.vertexai: + response_dict = _TuningJob_from_vertex(self._api_client, response_dict) + else: + response_dict = _TuningJob_from_mldev(self._api_client, response_dict) + + return_value = types.TuningJob._from_response( + response_dict, parameter_model + ) + self._api_client._verify_response(return_value) + return return_value + + async def list( + self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None + ) -> AsyncPager[types.TuningJob]: + return AsyncPager( + 'tuning_jobs', + self._list, + await self._list(config=config), + config, + ) + + async def get( + self, + *, + name: str, + config: Optional[types.GetTuningJobConfigOrDict] = None, + ) -> types.TuningJob: + job = await self._get(name=name, config=config) + if job.experiment and self._api_client.vertexai: + _IpythonUtils.display_experiment_button( + experiment=job.experiment, + project=self._api_client.project, + ) + return job + + async def tune( + self, + *, + base_model: str, + training_dataset: types.TuningDatasetOrDict, + config: Optional[types.CreateTuningJobConfigOrDict] = None, + ) -> types.TuningJobOrOperation: + result = await self._tune( + base_model=base_model, + training_dataset=training_dataset, + config=config, + ) + if result.name and self._api_client.vertexai: + _IpythonUtils.display_model_tuning_button(tuning_job_resource=result.name) + return result + + +class _IpythonUtils: + """Temporary class to hold the IPython related functions.""" + + displayed_experiments = set() + + @staticmethod + def _get_ipython_shell_name() -> str: + import sys + + if 'IPython' in sys.modules: + from IPython import get_ipython + + return get_ipython().__class__.__name__ + return '' + + @staticmethod + def is_ipython_available() -> bool: + return bool(_IpythonUtils._get_ipython_shell_name()) + + @staticmethod + def _get_styles() -> None: + """Returns the HTML style markup to support custom buttons.""" + return """ + <link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons"> + <style> + .view-vertex-resource, + .view-vertex-resource:hover, + .view-vertex-resource:visited { + position: relative; + display: inline-flex; + flex-direction: row; + height: 32px; + padding: 0 12px; + margin: 4px 18px; + gap: 4px; + border-radius: 4px; + + align-items: center; + justify-content: center; + background-color: rgb(255, 255, 255); + color: rgb(51, 103, 214); + + font-family: Roboto,"Helvetica Neue",sans-serif; + font-size: 13px; + font-weight: 500; + text-transform: uppercase; + text-decoration: none !important; + + transition: box-shadow 280ms cubic-bezier(0.4, 0, 0.2, 1) 0s; + box-shadow: 0px 3px 1px -2px rgba(0,0,0,0.2), 0px 2px 2px 0px rgba(0,0,0,0.14), 0px 1px 5px 0px rgba(0,0,0,0.12); + } + .view-vertex-resource:active { + box-shadow: 0px 5px 5px -3px rgba(0,0,0,0.2),0px 8px 10px 1px rgba(0,0,0,0.14),0px 3px 14px 2px rgba(0,0,0,0.12); + } + .view-vertex-resource:active .view-vertex-ripple::before { + position: absolute; + top: 0; + bottom: 0; + left: 0; + right: 0; + border-radius: 4px; + pointer-events: none; + + content: ''; + background-color: rgb(51, 103, 214); + opacity: 0.12; + } + .view-vertex-icon { + font-size: 18px; + } + </style> + """ + + @staticmethod + def _parse_resource_name(marker: str, resource_parts: list[str]) -> str: + """Returns the part after the marker text part.""" + for i in range(len(resource_parts)): + if resource_parts[i] == marker and i + 1 < len(resource_parts): + return resource_parts[i + 1] + return '' + + @staticmethod + def _display_link( + text: str, url: str, icon: Optional[str] = 'open_in_new' + ) -> None: + """Creates and displays the link to open the Vertex resource. + + Args: + text: The text displayed on the clickable button. + url: The url that the button will lead to. Only cloud console URIs are + allowed. + icon: The icon name on the button (from material-icons library) + """ + CLOUD_UI_URL = 'https://console.cloud.google.com' # pylint: disable=invalid-name + if not url.startswith(CLOUD_UI_URL): + raise ValueError(f'Only urls starting with {CLOUD_UI_URL} are allowed.') + + import uuid + + button_id = f'view-vertex-resource-{str(uuid.uuid4())}' + + # Add the markup for the CSS and link component + html = f""" + {_IpythonUtils._get_styles()} + <a class="view-vertex-resource" id="{button_id}" href="#view-{button_id}"> + <span class="material-icons view-vertex-icon">{icon}</span> + <span>{text}</span> + </a> + """ + + # Add the click handler for the link + html += f""" + <script> + (function () {{ + const link = document.getElementById('{button_id}'); + link.addEventListener('click', (e) => {{ + if (window.google?.colab?.openUrl) {{ + window.google.colab.openUrl('{url}'); + }} else {{ + window.open('{url}', '_blank'); + }} + e.stopPropagation(); + e.preventDefault(); + }}); + }})(); + </script> + """ + + from IPython.core.display import display + from IPython.display import HTML + + display(HTML(html)) + + @staticmethod + def display_experiment_button(experiment: str, project: str) -> None: + """Function to generate a link bound to the Vertex experiment. + + Args: + experiment: The Vertex experiment name. Example format: + projects/{project_id}/locations/{location}/metadataStores/default/contexts/{experiment_name} + project: The project (alphanumeric) name. + """ + if ( + not _IpythonUtils.is_ipython_available() + or experiment in _IpythonUtils.displayed_experiments + ): + return + # Experiment gives the numeric id, but we need the alphanumeric project + # name. So we get the project from the api client object as an argument. + resource_parts = experiment.split('/') + location = resource_parts[3] + experiment_name = resource_parts[-1] + + uri = ( + 'https://console.cloud.google.com/vertex-ai/experiments/locations/' + + f'{location}/experiments/{experiment_name}/' + + f'runs?project={project}' + ) + _IpythonUtils._display_link('View Experiment', uri, 'science') + + # Avoid repeatedly showing the button + _IpythonUtils.displayed_experiments.add(experiment) + + @staticmethod + def display_model_tuning_button(tuning_job_resource: str) -> None: + """Function to generate a link bound to the Vertex model tuning job. + + Args: + tuning_job_resource: The Vertex tuning job name. Example format: + projects/{project_id}/locations/{location}/tuningJobs/{tuning_job_id} + """ + if not _IpythonUtils.is_ipython_available(): + return + + resource_parts = tuning_job_resource.split('/') + project = resource_parts[1] + location = resource_parts[3] + tuning_job_id = resource_parts[-1] + + uri = ( + 'https://console.cloud.google.com/vertex-ai/generative/language/' + + f'locations/{location}/tuning/tuningJob/{tuning_job_id}' + + f'?project={project}' + ) + _IpythonUtils._display_link('View Tuning Job', uri, 'tune') diff --git a/.venv/lib/python3.12/site-packages/google/genai/types.py b/.venv/lib/python3.12/site-packages/google/genai/types.py new file mode 100644 index 00000000..e8cb4620 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/types.py @@ -0,0 +1,8332 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Code generated by the Google Gen AI SDK generator DO NOT EDIT. + +import datetime +import inspect +import json +import logging +from typing import Any, Callable, GenericAlias, Literal, Optional, TypedDict, Union +import PIL.Image +import pydantic +from pydantic import Field +from . import _common + + +class Outcome(_common.CaseInSensitiveEnum): + """Required. Outcome of the code execution.""" + + OUTCOME_UNSPECIFIED = 'OUTCOME_UNSPECIFIED' + OUTCOME_OK = 'OUTCOME_OK' + OUTCOME_FAILED = 'OUTCOME_FAILED' + OUTCOME_DEADLINE_EXCEEDED = 'OUTCOME_DEADLINE_EXCEEDED' + + +class Language(_common.CaseInSensitiveEnum): + """Required. Programming language of the `code`.""" + + LANGUAGE_UNSPECIFIED = 'LANGUAGE_UNSPECIFIED' + PYTHON = 'PYTHON' + + +class Type(_common.CaseInSensitiveEnum): + """A basic data type.""" + + TYPE_UNSPECIFIED = 'TYPE_UNSPECIFIED' + STRING = 'STRING' + NUMBER = 'NUMBER' + INTEGER = 'INTEGER' + BOOLEAN = 'BOOLEAN' + ARRAY = 'ARRAY' + OBJECT = 'OBJECT' + + +class HarmCategory(_common.CaseInSensitiveEnum): + """Required. Harm category.""" + + HARM_CATEGORY_UNSPECIFIED = 'HARM_CATEGORY_UNSPECIFIED' + HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH' + HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT' + HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT' + HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT' + HARM_CATEGORY_CIVIC_INTEGRITY = 'HARM_CATEGORY_CIVIC_INTEGRITY' + + +class HarmBlockMethod(_common.CaseInSensitiveEnum): + """Optional. + + Specify if the threshold is used for probability or severity score. If not + specified, the threshold is used for probability score. + """ + + HARM_BLOCK_METHOD_UNSPECIFIED = 'HARM_BLOCK_METHOD_UNSPECIFIED' + SEVERITY = 'SEVERITY' + PROBABILITY = 'PROBABILITY' + + +class HarmBlockThreshold(_common.CaseInSensitiveEnum): + """Required. The harm block threshold.""" + + HARM_BLOCK_THRESHOLD_UNSPECIFIED = 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' + BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE' + BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE' + BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH' + BLOCK_NONE = 'BLOCK_NONE' + OFF = 'OFF' + + +class Mode(_common.CaseInSensitiveEnum): + """The mode of the predictor to be used in dynamic retrieval.""" + + MODE_UNSPECIFIED = 'MODE_UNSPECIFIED' + MODE_DYNAMIC = 'MODE_DYNAMIC' + + +class State(_common.CaseInSensitiveEnum): + """Output only. RagFile state.""" + + STATE_UNSPECIFIED = 'STATE_UNSPECIFIED' + ACTIVE = 'ACTIVE' + ERROR = 'ERROR' + + +class FinishReason(_common.CaseInSensitiveEnum): + """Output only. + + The reason why the model stopped generating tokens. If empty, the model has + not stopped generating the tokens. + """ + + FINISH_REASON_UNSPECIFIED = 'FINISH_REASON_UNSPECIFIED' + STOP = 'STOP' + MAX_TOKENS = 'MAX_TOKENS' + SAFETY = 'SAFETY' + RECITATION = 'RECITATION' + OTHER = 'OTHER' + BLOCKLIST = 'BLOCKLIST' + PROHIBITED_CONTENT = 'PROHIBITED_CONTENT' + SPII = 'SPII' + MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL' + + +class HarmProbability(_common.CaseInSensitiveEnum): + """Output only. Harm probability levels in the content.""" + + HARM_PROBABILITY_UNSPECIFIED = 'HARM_PROBABILITY_UNSPECIFIED' + NEGLIGIBLE = 'NEGLIGIBLE' + LOW = 'LOW' + MEDIUM = 'MEDIUM' + HIGH = 'HIGH' + + +class HarmSeverity(_common.CaseInSensitiveEnum): + """Output only. Harm severity levels in the content.""" + + HARM_SEVERITY_UNSPECIFIED = 'HARM_SEVERITY_UNSPECIFIED' + HARM_SEVERITY_NEGLIGIBLE = 'HARM_SEVERITY_NEGLIGIBLE' + HARM_SEVERITY_LOW = 'HARM_SEVERITY_LOW' + HARM_SEVERITY_MEDIUM = 'HARM_SEVERITY_MEDIUM' + HARM_SEVERITY_HIGH = 'HARM_SEVERITY_HIGH' + + +class BlockedReason(_common.CaseInSensitiveEnum): + """Output only. Blocked reason.""" + + BLOCKED_REASON_UNSPECIFIED = 'BLOCKED_REASON_UNSPECIFIED' + SAFETY = 'SAFETY' + OTHER = 'OTHER' + BLOCKLIST = 'BLOCKLIST' + PROHIBITED_CONTENT = 'PROHIBITED_CONTENT' + + +class DeploymentResourcesType(_common.CaseInSensitiveEnum): + """""" + + DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = ( + 'DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED' + ) + DEDICATED_RESOURCES = 'DEDICATED_RESOURCES' + AUTOMATIC_RESOURCES = 'AUTOMATIC_RESOURCES' + SHARED_RESOURCES = 'SHARED_RESOURCES' + + +class JobState(_common.CaseInSensitiveEnum): + """Config class for the job state.""" + + JOB_STATE_UNSPECIFIED = 'JOB_STATE_UNSPECIFIED' + JOB_STATE_QUEUED = 'JOB_STATE_QUEUED' + JOB_STATE_PENDING = 'JOB_STATE_PENDING' + JOB_STATE_RUNNING = 'JOB_STATE_RUNNING' + JOB_STATE_SUCCEEDED = 'JOB_STATE_SUCCEEDED' + JOB_STATE_FAILED = 'JOB_STATE_FAILED' + JOB_STATE_CANCELLING = 'JOB_STATE_CANCELLING' + JOB_STATE_CANCELLED = 'JOB_STATE_CANCELLED' + JOB_STATE_PAUSED = 'JOB_STATE_PAUSED' + JOB_STATE_EXPIRED = 'JOB_STATE_EXPIRED' + JOB_STATE_UPDATING = 'JOB_STATE_UPDATING' + JOB_STATE_PARTIALLY_SUCCEEDED = 'JOB_STATE_PARTIALLY_SUCCEEDED' + + +class AdapterSize(_common.CaseInSensitiveEnum): + """Optional. Adapter size for tuning.""" + + ADAPTER_SIZE_UNSPECIFIED = 'ADAPTER_SIZE_UNSPECIFIED' + ADAPTER_SIZE_ONE = 'ADAPTER_SIZE_ONE' + ADAPTER_SIZE_FOUR = 'ADAPTER_SIZE_FOUR' + ADAPTER_SIZE_EIGHT = 'ADAPTER_SIZE_EIGHT' + ADAPTER_SIZE_SIXTEEN = 'ADAPTER_SIZE_SIXTEEN' + ADAPTER_SIZE_THIRTY_TWO = 'ADAPTER_SIZE_THIRTY_TWO' + + +class DynamicRetrievalConfigMode(_common.CaseInSensitiveEnum): + """Config class for the dynamic retrieval config mode.""" + + MODE_UNSPECIFIED = 'MODE_UNSPECIFIED' + MODE_DYNAMIC = 'MODE_DYNAMIC' + + +class FunctionCallingConfigMode(_common.CaseInSensitiveEnum): + """Config class for the function calling config mode.""" + + MODE_UNSPECIFIED = 'MODE_UNSPECIFIED' + AUTO = 'AUTO' + ANY = 'ANY' + NONE = 'NONE' + + +class MediaResolution(_common.CaseInSensitiveEnum): + """The media resolution to use.""" + + MEDIA_RESOLUTION_UNSPECIFIED = 'MEDIA_RESOLUTION_UNSPECIFIED' + MEDIA_RESOLUTION_LOW = 'MEDIA_RESOLUTION_LOW' + MEDIA_RESOLUTION_MEDIUM = 'MEDIA_RESOLUTION_MEDIUM' + MEDIA_RESOLUTION_HIGH = 'MEDIA_RESOLUTION_HIGH' + + +class SafetyFilterLevel(_common.CaseInSensitiveEnum): + """Enum that controls the safety filter level for objectionable content.""" + + BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE' + BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE' + BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH' + BLOCK_NONE = 'BLOCK_NONE' + + +class PersonGeneration(_common.CaseInSensitiveEnum): + """Enum that controls the generation of people.""" + + DONT_ALLOW = 'DONT_ALLOW' + ALLOW_ADULT = 'ALLOW_ADULT' + ALLOW_ALL = 'ALLOW_ALL' + + +class ImagePromptLanguage(_common.CaseInSensitiveEnum): + """Enum that specifies the language of the text in the prompt.""" + + auto = 'auto' + en = 'en' + ja = 'ja' + ko = 'ko' + hi = 'hi' + + +class MaskReferenceMode(_common.CaseInSensitiveEnum): + """Enum representing the mask mode of a mask reference image.""" + + MASK_MODE_DEFAULT = 'MASK_MODE_DEFAULT' + MASK_MODE_USER_PROVIDED = 'MASK_MODE_USER_PROVIDED' + MASK_MODE_BACKGROUND = 'MASK_MODE_BACKGROUND' + MASK_MODE_FOREGROUND = 'MASK_MODE_FOREGROUND' + MASK_MODE_SEMANTIC = 'MASK_MODE_SEMANTIC' + + +class ControlReferenceType(_common.CaseInSensitiveEnum): + """Enum representing the control type of a control reference image.""" + + CONTROL_TYPE_DEFAULT = 'CONTROL_TYPE_DEFAULT' + CONTROL_TYPE_CANNY = 'CONTROL_TYPE_CANNY' + CONTROL_TYPE_SCRIBBLE = 'CONTROL_TYPE_SCRIBBLE' + CONTROL_TYPE_FACE_MESH = 'CONTROL_TYPE_FACE_MESH' + + +class SubjectReferenceType(_common.CaseInSensitiveEnum): + """Enum representing the subject type of a subject reference image.""" + + SUBJECT_TYPE_DEFAULT = 'SUBJECT_TYPE_DEFAULT' + SUBJECT_TYPE_PERSON = 'SUBJECT_TYPE_PERSON' + SUBJECT_TYPE_ANIMAL = 'SUBJECT_TYPE_ANIMAL' + SUBJECT_TYPE_PRODUCT = 'SUBJECT_TYPE_PRODUCT' + + +class EditMode(_common.CaseInSensitiveEnum): + """Enum representing the Imagen 3 Edit mode.""" + + EDIT_MODE_DEFAULT = 'EDIT_MODE_DEFAULT' + EDIT_MODE_INPAINT_REMOVAL = 'EDIT_MODE_INPAINT_REMOVAL' + EDIT_MODE_INPAINT_INSERTION = 'EDIT_MODE_INPAINT_INSERTION' + EDIT_MODE_OUTPAINT = 'EDIT_MODE_OUTPAINT' + EDIT_MODE_CONTROLLED_EDITING = 'EDIT_MODE_CONTROLLED_EDITING' + EDIT_MODE_STYLE = 'EDIT_MODE_STYLE' + EDIT_MODE_BGSWAP = 'EDIT_MODE_BGSWAP' + EDIT_MODE_PRODUCT_IMAGE = 'EDIT_MODE_PRODUCT_IMAGE' + + +class FileState(_common.CaseInSensitiveEnum): + """State for the lifecycle of a File.""" + + STATE_UNSPECIFIED = 'STATE_UNSPECIFIED' + PROCESSING = 'PROCESSING' + ACTIVE = 'ACTIVE' + FAILED = 'FAILED' + + +class FileSource(_common.CaseInSensitiveEnum): + """Source of the File.""" + + SOURCE_UNSPECIFIED = 'SOURCE_UNSPECIFIED' + UPLOADED = 'UPLOADED' + GENERATED = 'GENERATED' + + +class Modality(_common.CaseInSensitiveEnum): + """Config class for the server content modalities.""" + + MODALITY_UNSPECIFIED = 'MODALITY_UNSPECIFIED' + TEXT = 'TEXT' + IMAGE = 'IMAGE' + AUDIO = 'AUDIO' + + +class VideoMetadata(_common.BaseModel): + """Metadata describes the input video content.""" + + end_offset: Optional[str] = Field( + default=None, description="""Optional. The end offset of the video.""" + ) + start_offset: Optional[str] = Field( + default=None, description="""Optional. The start offset of the video.""" + ) + + +class VideoMetadataDict(TypedDict, total=False): + """Metadata describes the input video content.""" + + end_offset: Optional[str] + """Optional. The end offset of the video.""" + + start_offset: Optional[str] + """Optional. The start offset of the video.""" + + +VideoMetadataOrDict = Union[VideoMetadata, VideoMetadataDict] + + +class CodeExecutionResult(_common.BaseModel): + """Result of executing the [ExecutableCode]. + + Always follows a `part` containing the [ExecutableCode]. + """ + + outcome: Optional[Outcome] = Field( + default=None, description="""Required. Outcome of the code execution.""" + ) + output: Optional[str] = Field( + default=None, + description="""Optional. Contains stdout when code execution is successful, stderr or other description otherwise.""", + ) + + +class CodeExecutionResultDict(TypedDict, total=False): + """Result of executing the [ExecutableCode]. + + Always follows a `part` containing the [ExecutableCode]. + """ + + outcome: Optional[Outcome] + """Required. Outcome of the code execution.""" + + output: Optional[str] + """Optional. Contains stdout when code execution is successful, stderr or other description otherwise.""" + + +CodeExecutionResultOrDict = Union[CodeExecutionResult, CodeExecutionResultDict] + + +class ExecutableCode(_common.BaseModel): + """Code generated by the model that is meant to be executed, and the result returned to the model. + + Generated when using the [FunctionDeclaration] tool and + [FunctionCallingConfig] mode is set to [Mode.CODE]. + """ + + code: Optional[str] = Field( + default=None, description="""Required. The code to be executed.""" + ) + language: Optional[Language] = Field( + default=None, + description="""Required. Programming language of the `code`.""", + ) + + +class ExecutableCodeDict(TypedDict, total=False): + """Code generated by the model that is meant to be executed, and the result returned to the model. + + Generated when using the [FunctionDeclaration] tool and + [FunctionCallingConfig] mode is set to [Mode.CODE]. + """ + + code: Optional[str] + """Required. The code to be executed.""" + + language: Optional[Language] + """Required. Programming language of the `code`.""" + + +ExecutableCodeOrDict = Union[ExecutableCode, ExecutableCodeDict] + + +class FileData(_common.BaseModel): + """URI based data.""" + + file_uri: Optional[str] = Field( + default=None, description="""Required. URI.""" + ) + mime_type: Optional[str] = Field( + default=None, + description="""Required. The IANA standard MIME type of the source data.""", + ) + + +class FileDataDict(TypedDict, total=False): + """URI based data.""" + + file_uri: Optional[str] + """Required. URI.""" + + mime_type: Optional[str] + """Required. The IANA standard MIME type of the source data.""" + + +FileDataOrDict = Union[FileData, FileDataDict] + + +class FunctionCall(_common.BaseModel): + """A function call.""" + + id: Optional[str] = Field( + default=None, + description="""The unique id of the function call. If populated, the client to execute the + `function_call` and return the response with the matching `id`.""", + ) + args: Optional[dict[str, Any]] = Field( + default=None, + description="""Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.""", + ) + name: Optional[str] = Field( + default=None, + description="""Required. The name of the function to call. Matches [FunctionDeclaration.name].""", + ) + + +class FunctionCallDict(TypedDict, total=False): + """A function call.""" + + id: Optional[str] + """The unique id of the function call. If populated, the client to execute the + `function_call` and return the response with the matching `id`.""" + + args: Optional[dict[str, Any]] + """Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.""" + + name: Optional[str] + """Required. The name of the function to call. Matches [FunctionDeclaration.name].""" + + +FunctionCallOrDict = Union[FunctionCall, FunctionCallDict] + + +class FunctionResponse(_common.BaseModel): + """A function response.""" + + id: Optional[str] = Field( + default=None, + description="""The id of the function call this response is for. Populated by the client + to match the corresponding function call `id`.""", + ) + name: Optional[str] = Field( + default=None, + description="""Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name].""", + ) + response: Optional[dict[str, Any]] = Field( + default=None, + description="""Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output.""", + ) + + +class FunctionResponseDict(TypedDict, total=False): + """A function response.""" + + id: Optional[str] + """The id of the function call this response is for. Populated by the client + to match the corresponding function call `id`.""" + + name: Optional[str] + """Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name].""" + + response: Optional[dict[str, Any]] + """Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output.""" + + +FunctionResponseOrDict = Union[FunctionResponse, FunctionResponseDict] + + +class Blob(_common.BaseModel): + """Content blob. + + It's preferred to send as text directly rather than raw bytes. + """ + + data: Optional[bytes] = Field( + default=None, description="""Required. Raw bytes.""" + ) + mime_type: Optional[str] = Field( + default=None, + description="""Required. The IANA standard MIME type of the source data.""", + ) + + +class BlobDict(TypedDict, total=False): + """Content blob. + + It's preferred to send as text directly rather than raw bytes. + """ + + data: Optional[bytes] + """Required. Raw bytes.""" + + mime_type: Optional[str] + """Required. The IANA standard MIME type of the source data.""" + + +BlobOrDict = Union[Blob, BlobDict] + + +class Part(_common.BaseModel): + """A datatype containing media content. + + Exactly one field within a Part should be set, representing the specific type + of content being conveyed. Using multiple fields within the same `Part` + instance is considered invalid. + """ + + video_metadata: Optional[VideoMetadata] = Field( + default=None, description="""Metadata for a given video.""" + ) + thought: Optional[bool] = Field( + default=None, + description="""Indicates if the part is thought from the model.""", + ) + code_execution_result: Optional[CodeExecutionResult] = Field( + default=None, + description="""Optional. Result of executing the [ExecutableCode].""", + ) + executable_code: Optional[ExecutableCode] = Field( + default=None, + description="""Optional. Code generated by the model that is meant to be executed.""", + ) + file_data: Optional[FileData] = Field( + default=None, description="""Optional. URI based data.""" + ) + function_call: Optional[FunctionCall] = Field( + default=None, + description="""Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.""", + ) + function_response: Optional[FunctionResponse] = Field( + default=None, + description="""Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model.""", + ) + inline_data: Optional[Blob] = Field( + default=None, description="""Optional. Inlined bytes data.""" + ) + text: Optional[str] = Field( + default=None, description="""Optional. Text part (can be code).""" + ) + + @classmethod + def from_uri(cls, file_uri: str, mime_type: str) -> 'Part': + file_data = FileData(file_uri=file_uri, mime_type=mime_type) + return cls(file_data=file_data) + + @classmethod + def from_text(cls, text: str) -> 'Part': + return cls(text=text) + + @classmethod + def from_bytes(cls, data: bytes, mime_type: str) -> 'Part': + inline_data = Blob( + data=data, + mime_type=mime_type, + ) + return cls(inline_data=inline_data) + + @classmethod + def from_function_call(cls, name: str, args: dict[str, Any]) -> 'Part': + function_call = FunctionCall(name=name, args=args) + return cls(function_call=function_call) + + @classmethod + def from_function_response( + cls, name: str, response: dict[str, Any] + ) -> 'Part': + function_response = FunctionResponse(name=name, response=response) + return cls(function_response=function_response) + + @classmethod + def from_video_metadata(cls, end_offset: str, start_offset: str) -> 'Part': + video_metadata = VideoMetadata( + end_offset=end_offset, start_offset=start_offset + ) + return cls(video_metadata=video_metadata) + + @classmethod + def from_executable_code(cls, code: str, language: Language) -> 'Part': + executable_code = ExecutableCode(code=code, language=language) + return cls(executable_code=executable_code) + + @classmethod + def from_code_execution_result(cls, outcome: Outcome, output: str) -> 'Part': + code_execution_result = CodeExecutionResult(outcome=outcome, output=output) + return cls(code_execution_result=code_execution_result) + + +class PartDict(TypedDict, total=False): + """A datatype containing media content. + + Exactly one field within a Part should be set, representing the specific type + of content being conveyed. Using multiple fields within the same `Part` + instance is considered invalid. + """ + + video_metadata: Optional[VideoMetadataDict] + """Metadata for a given video.""" + + thought: Optional[bool] + """Indicates if the part is thought from the model.""" + + code_execution_result: Optional[CodeExecutionResultDict] + """Optional. Result of executing the [ExecutableCode].""" + + executable_code: Optional[ExecutableCodeDict] + """Optional. Code generated by the model that is meant to be executed.""" + + file_data: Optional[FileDataDict] + """Optional. URI based data.""" + + function_call: Optional[FunctionCallDict] + """Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.""" + + function_response: Optional[FunctionResponseDict] + """Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model.""" + + inline_data: Optional[BlobDict] + """Optional. Inlined bytes data.""" + + text: Optional[str] + """Optional. Text part (can be code).""" + + +PartOrDict = Union[Part, PartDict] + + +class Content(_common.BaseModel): + """Contains the multi-part content of a message.""" + + parts: Optional[list[Part]] = Field( + default=None, + description="""List of parts that constitute a single message. Each part may have + a different IANA MIME type.""", + ) + role: Optional[str] = Field( + default=None, + description="""Optional. The producer of the content. Must be either 'user' or + 'model'. Useful to set for multi-turn conversations, otherwise can be + left blank or unset. If role is not specified, SDK will determine the role.""", + ) + + +class ContentDict(TypedDict, total=False): + """Contains the multi-part content of a message.""" + + parts: Optional[list[PartDict]] + """List of parts that constitute a single message. Each part may have + a different IANA MIME type.""" + + role: Optional[str] + """Optional. The producer of the content. Must be either 'user' or + 'model'. Useful to set for multi-turn conversations, otherwise can be + left blank or unset. If role is not specified, SDK will determine the role.""" + + +ContentOrDict = Union[Content, ContentDict] + + +class Schema(_common.BaseModel): + """Schema that defines the format of input and output data. + + Represents a select subset of an OpenAPI 3.0 schema object. + """ + + min_items: Optional[int] = Field( + default=None, + description="""Optional. Minimum number of the elements for Type.ARRAY.""", + ) + example: Optional[Any] = Field( + default=None, + description="""Optional. Example of the object. Will only populated when the object is the root.""", + ) + property_ordering: Optional[list[str]] = Field( + default=None, + description="""Optional. The order of the properties. Not a standard field in open api spec. Only used to support the order of the properties.""", + ) + pattern: Optional[str] = Field( + default=None, + description="""Optional. Pattern of the Type.STRING to restrict a string to a regular expression.""", + ) + minimum: Optional[float] = Field( + default=None, + description="""Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER""", + ) + default: Optional[Any] = Field( + default=None, description="""Optional. Default value of the data.""" + ) + any_of: list['Schema'] = Field( + default=None, + description="""Optional. The value should be validated against any (one or more) of the subschemas in the list.""", + ) + max_length: Optional[int] = Field( + default=None, + description="""Optional. Maximum length of the Type.STRING""", + ) + title: Optional[str] = Field( + default=None, description="""Optional. The title of the Schema.""" + ) + min_length: Optional[int] = Field( + default=None, + description="""Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING""", + ) + min_properties: Optional[int] = Field( + default=None, + description="""Optional. Minimum number of the properties for Type.OBJECT.""", + ) + max_items: Optional[int] = Field( + default=None, + description="""Optional. Maximum number of the elements for Type.ARRAY.""", + ) + maximum: Optional[float] = Field( + default=None, + description="""Optional. Maximum value of the Type.INTEGER and Type.NUMBER""", + ) + nullable: Optional[bool] = Field( + default=None, + description="""Optional. Indicates if the value may be null.""", + ) + max_properties: Optional[int] = Field( + default=None, + description="""Optional. Maximum number of the properties for Type.OBJECT.""", + ) + type: Optional[Type] = Field( + default=None, description="""Optional. The type of the data.""" + ) + description: Optional[str] = Field( + default=None, description="""Optional. The description of the data.""" + ) + enum: Optional[list[str]] = Field( + default=None, + description="""Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]}""", + ) + format: Optional[str] = Field( + default=None, + description="""Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc""", + ) + items: 'Schema' = Field( + default=None, + description="""Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY.""", + ) + properties: dict[str, 'Schema'] = Field( + default=None, + description="""Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT.""", + ) + required: Optional[list[str]] = Field( + default=None, + description="""Optional. Required properties of Type.OBJECT.""", + ) + + +class SchemaDict(TypedDict, total=False): + """Schema that defines the format of input and output data. + + Represents a select subset of an OpenAPI 3.0 schema object. + """ + + min_items: Optional[int] + """Optional. Minimum number of the elements for Type.ARRAY.""" + + example: Optional[Any] + """Optional. Example of the object. Will only populated when the object is the root.""" + + property_ordering: Optional[list[str]] + """Optional. The order of the properties. Not a standard field in open api spec. Only used to support the order of the properties.""" + + pattern: Optional[str] + """Optional. Pattern of the Type.STRING to restrict a string to a regular expression.""" + + minimum: Optional[float] + """Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER""" + + default: Optional[Any] + """Optional. Default value of the data.""" + + any_of: list['SchemaDict'] + """Optional. The value should be validated against any (one or more) of the subschemas in the list.""" + + max_length: Optional[int] + """Optional. Maximum length of the Type.STRING""" + + title: Optional[str] + """Optional. The title of the Schema.""" + + min_length: Optional[int] + """Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING""" + + min_properties: Optional[int] + """Optional. Minimum number of the properties for Type.OBJECT.""" + + max_items: Optional[int] + """Optional. Maximum number of the elements for Type.ARRAY.""" + + maximum: Optional[float] + """Optional. Maximum value of the Type.INTEGER and Type.NUMBER""" + + nullable: Optional[bool] + """Optional. Indicates if the value may be null.""" + + max_properties: Optional[int] + """Optional. Maximum number of the properties for Type.OBJECT.""" + + type: Optional[Type] + """Optional. The type of the data.""" + + description: Optional[str] + """Optional. The description of the data.""" + + enum: Optional[list[str]] + """Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]}""" + + format: Optional[str] + """Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc""" + + items: 'SchemaDict' + """Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY.""" + + properties: dict[str, 'SchemaDict'] + """Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT.""" + + required: Optional[list[str]] + """Optional. Required properties of Type.OBJECT.""" + + +SchemaOrDict = Union[Schema, SchemaDict] + + +class SafetySetting(_common.BaseModel): + """Safety settings.""" + + method: Optional[HarmBlockMethod] = Field( + default=None, + description="""Determines if the harm block method uses probability or probability + and severity scores.""", + ) + category: Optional[HarmCategory] = Field( + default=None, description="""Required. Harm category.""" + ) + threshold: Optional[HarmBlockThreshold] = Field( + default=None, description="""Required. The harm block threshold.""" + ) + + +class SafetySettingDict(TypedDict, total=False): + """Safety settings.""" + + method: Optional[HarmBlockMethod] + """Determines if the harm block method uses probability or probability + and severity scores.""" + + category: Optional[HarmCategory] + """Required. Harm category.""" + + threshold: Optional[HarmBlockThreshold] + """Required. The harm block threshold.""" + + +SafetySettingOrDict = Union[SafetySetting, SafetySettingDict] + + +class FunctionDeclaration(_common.BaseModel): + """Defines a function that the model can generate JSON inputs for. + + The inputs are based on `OpenAPI 3.0 specifications + <https://spec.openapis.org/oas/v3.0.3>`_. + """ + + response: Optional[Schema] = Field( + default=None, + description="""Describes the output from the function in the OpenAPI JSON Schema + Object format.""", + ) + description: Optional[str] = Field( + default=None, + description="""Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function.""", + ) + name: Optional[str] = Field( + default=None, + description="""Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64.""", + ) + parameters: Optional[Schema] = Field( + default=None, + description="""Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1""", + ) + + @classmethod + def _get_variant(cls, client) -> str: + """Returns the function variant based on the provided client object.""" + if client.vertexai: + return 'VERTEX_AI' + else: + return 'GOOGLE_AI' + + @classmethod + def from_function_with_options( + cls, + func: Callable, + variant: Literal['GOOGLE_AI', 'VERTEX_AI', 'DEFAULT'] = 'GOOGLE_AI', + ) -> 'FunctionDeclaration': + """Converts a function to a FunctionDeclaration based on an API endpoint. + + Supported endpoints are: 'GOOGLE_AI', 'VERTEX_AI', or 'DEFAULT'. + """ + from . import _automatic_function_calling_util + + supported_variants = ['GOOGLE_AI', 'VERTEX_AI', 'DEFAULT'] + if variant not in supported_variants: + raise ValueError( + f'Unsupported variant: {variant}. Supported variants are:' + f' {", ".join(supported_variants)}' + ) + + # TODO: b/382524014 - Add support for DEFAULT API endpoint. + + parameters_properties = {} + for name, param in inspect.signature(func).parameters.items(): + if param.kind in ( + inspect.Parameter.POSITIONAL_OR_KEYWORD, + inspect.Parameter.KEYWORD_ONLY, + inspect.Parameter.POSITIONAL_ONLY, + ): + schema = _automatic_function_calling_util._parse_schema_from_parameter( + variant, param, func.__name__ + ) + parameters_properties[name] = schema + declaration = FunctionDeclaration( + name=func.__name__, + description=func.__doc__, + ) + if parameters_properties: + declaration.parameters = Schema( + type='OBJECT', + properties=parameters_properties, + ) + if variant == 'VERTEX_AI': + declaration.parameters.required = ( + _automatic_function_calling_util._get_required_fields( + declaration.parameters + ) + ) + if not variant == 'VERTEX_AI': + return declaration + + return_annotation = inspect.signature(func).return_annotation + if return_annotation is inspect._empty: + return declaration + + declaration.response = ( + _automatic_function_calling_util._parse_schema_from_parameter( + variant, + inspect.Parameter( + 'return_value', + inspect.Parameter.POSITIONAL_OR_KEYWORD, + annotation=return_annotation, + ), + func.__name__, + ) + ) + return declaration + + @classmethod + def from_callable(cls, client, func: Callable) -> 'FunctionDeclaration': + """Converts a function to a FunctionDeclaration.""" + return cls.from_function_with_options( + variant=cls._get_variant(client), + func=func, + ) + + +class FunctionDeclarationDict(TypedDict, total=False): + """Defines a function that the model can generate JSON inputs for. + + The inputs are based on `OpenAPI 3.0 specifications + <https://spec.openapis.org/oas/v3.0.3>`_. + """ + + response: Optional[SchemaDict] + """Describes the output from the function in the OpenAPI JSON Schema + Object format.""" + + description: Optional[str] + """Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function.""" + + name: Optional[str] + """Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64.""" + + parameters: Optional[SchemaDict] + """Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1""" + + +FunctionDeclarationOrDict = Union[FunctionDeclaration, FunctionDeclarationDict] + + +class GoogleSearch(_common.BaseModel): + """Tool to support Google Search in Model. Powered by Google.""" + + pass + + +class GoogleSearchDict(TypedDict, total=False): + """Tool to support Google Search in Model. Powered by Google.""" + + pass + + +GoogleSearchOrDict = Union[GoogleSearch, GoogleSearchDict] + + +class DynamicRetrievalConfig(_common.BaseModel): + """Describes the options to customize dynamic retrieval.""" + + mode: Optional[DynamicRetrievalConfigMode] = Field( + default=None, + description="""The mode of the predictor to be used in dynamic retrieval.""", + ) + dynamic_threshold: Optional[float] = Field( + default=None, + description="""Optional. The threshold to be used in dynamic retrieval. If not set, a system default value is used.""", + ) + + +class DynamicRetrievalConfigDict(TypedDict, total=False): + """Describes the options to customize dynamic retrieval.""" + + mode: Optional[DynamicRetrievalConfigMode] + """The mode of the predictor to be used in dynamic retrieval.""" + + dynamic_threshold: Optional[float] + """Optional. The threshold to be used in dynamic retrieval. If not set, a system default value is used.""" + + +DynamicRetrievalConfigOrDict = Union[ + DynamicRetrievalConfig, DynamicRetrievalConfigDict +] + + +class GoogleSearchRetrieval(_common.BaseModel): + """Tool to retrieve public web data for grounding, powered by Google.""" + + dynamic_retrieval_config: Optional[DynamicRetrievalConfig] = Field( + default=None, + description="""Specifies the dynamic retrieval configuration for the given source.""", + ) + + +class GoogleSearchRetrievalDict(TypedDict, total=False): + """Tool to retrieve public web data for grounding, powered by Google.""" + + dynamic_retrieval_config: Optional[DynamicRetrievalConfigDict] + """Specifies the dynamic retrieval configuration for the given source.""" + + +GoogleSearchRetrievalOrDict = Union[ + GoogleSearchRetrieval, GoogleSearchRetrievalDict +] + + +class VertexAISearch(_common.BaseModel): + """Retrieve from Vertex AI Search datastore for grounding. + + See https://cloud.google.com/products/agent-builder + """ + + datastore: Optional[str] = Field( + default=None, + description="""Required. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`""", + ) + + +class VertexAISearchDict(TypedDict, total=False): + """Retrieve from Vertex AI Search datastore for grounding. + + See https://cloud.google.com/products/agent-builder + """ + + datastore: Optional[str] + """Required. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`""" + + +VertexAISearchOrDict = Union[VertexAISearch, VertexAISearchDict] + + +class VertexRagStoreRagResource(_common.BaseModel): + """The definition of the Rag resource.""" + + rag_corpus: Optional[str] = Field( + default=None, + description="""Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`""", + ) + rag_file_ids: Optional[list[str]] = Field( + default=None, + description="""Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field.""", + ) + + +class VertexRagStoreRagResourceDict(TypedDict, total=False): + """The definition of the Rag resource.""" + + rag_corpus: Optional[str] + """Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`""" + + rag_file_ids: Optional[list[str]] + """Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field.""" + + +VertexRagStoreRagResourceOrDict = Union[ + VertexRagStoreRagResource, VertexRagStoreRagResourceDict +] + + +class VertexRagStore(_common.BaseModel): + """Retrieve from Vertex RAG Store for grounding.""" + + rag_corpora: Optional[list[str]] = Field( + default=None, + description="""Optional. Deprecated. Please use rag_resources instead.""", + ) + rag_resources: Optional[list[VertexRagStoreRagResource]] = Field( + default=None, + description="""Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support.""", + ) + similarity_top_k: Optional[int] = Field( + default=None, + description="""Optional. Number of top k results to return from the selected corpora.""", + ) + vector_distance_threshold: Optional[float] = Field( + default=None, + description="""Optional. Only return results with vector distance smaller than the threshold.""", + ) + + +class VertexRagStoreDict(TypedDict, total=False): + """Retrieve from Vertex RAG Store for grounding.""" + + rag_corpora: Optional[list[str]] + """Optional. Deprecated. Please use rag_resources instead.""" + + rag_resources: Optional[list[VertexRagStoreRagResourceDict]] + """Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support.""" + + similarity_top_k: Optional[int] + """Optional. Number of top k results to return from the selected corpora.""" + + vector_distance_threshold: Optional[float] + """Optional. Only return results with vector distance smaller than the threshold.""" + + +VertexRagStoreOrDict = Union[VertexRagStore, VertexRagStoreDict] + + +class Retrieval(_common.BaseModel): + """Defines a retrieval tool that model can call to access external knowledge.""" + + disable_attribution: Optional[bool] = Field( + default=None, + description="""Optional. Deprecated. This option is no longer supported.""", + ) + vertex_ai_search: Optional[VertexAISearch] = Field( + default=None, + description="""Set to use data source powered by Vertex AI Search.""", + ) + vertex_rag_store: Optional[VertexRagStore] = Field( + default=None, + description="""Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService.""", + ) + + +class RetrievalDict(TypedDict, total=False): + """Defines a retrieval tool that model can call to access external knowledge.""" + + disable_attribution: Optional[bool] + """Optional. Deprecated. This option is no longer supported.""" + + vertex_ai_search: Optional[VertexAISearchDict] + """Set to use data source powered by Vertex AI Search.""" + + vertex_rag_store: Optional[VertexRagStoreDict] + """Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService.""" + + +RetrievalOrDict = Union[Retrieval, RetrievalDict] + + +class ToolCodeExecution(_common.BaseModel): + """Tool that executes code generated by the model, and automatically returns the result to the model. + + See also [ExecutableCode]and [CodeExecutionResult] which are input and output + to this tool. + """ + + pass + + +class ToolCodeExecutionDict(TypedDict, total=False): + """Tool that executes code generated by the model, and automatically returns the result to the model. + + See also [ExecutableCode]and [CodeExecutionResult] which are input and output + to this tool. + """ + + pass + + +ToolCodeExecutionOrDict = Union[ToolCodeExecution, ToolCodeExecutionDict] + + +class Tool(_common.BaseModel): + """Tool details of a tool that the model may use to generate a response.""" + + function_declarations: Optional[list[FunctionDeclaration]] = Field( + default=None, + description="""List of function declarations that the tool supports.""", + ) + retrieval: Optional[Retrieval] = Field( + default=None, + description="""Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation.""", + ) + google_search: Optional[GoogleSearch] = Field( + default=None, + description="""Optional. Google Search tool type. Specialized retrieval tool + that is powered by Google Search.""", + ) + google_search_retrieval: Optional[GoogleSearchRetrieval] = Field( + default=None, + description="""Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search.""", + ) + code_execution: Optional[ToolCodeExecution] = Field( + default=None, + description="""Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services.""", + ) + + +class ToolDict(TypedDict, total=False): + """Tool details of a tool that the model may use to generate a response.""" + + function_declarations: Optional[list[FunctionDeclarationDict]] + """List of function declarations that the tool supports.""" + + retrieval: Optional[RetrievalDict] + """Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation.""" + + google_search: Optional[GoogleSearchDict] + """Optional. Google Search tool type. Specialized retrieval tool + that is powered by Google Search.""" + + google_search_retrieval: Optional[GoogleSearchRetrievalDict] + """Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search.""" + + code_execution: Optional[ToolCodeExecutionDict] + """Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services.""" + + +ToolOrDict = Union[Tool, ToolDict] +ToolListUnion = list[Union[Tool, Callable]] +ToolListUnionDict = list[Union[ToolDict, Callable]] + + +class FunctionCallingConfig(_common.BaseModel): + """Function calling config.""" + + mode: Optional[FunctionCallingConfigMode] = Field( + default=None, description="""Optional. Function calling mode.""" + ) + allowed_function_names: Optional[list[str]] = Field( + default=None, + description="""Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided.""", + ) + + +class FunctionCallingConfigDict(TypedDict, total=False): + """Function calling config.""" + + mode: Optional[FunctionCallingConfigMode] + """Optional. Function calling mode.""" + + allowed_function_names: Optional[list[str]] + """Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided.""" + + +FunctionCallingConfigOrDict = Union[ + FunctionCallingConfig, FunctionCallingConfigDict +] + + +class ToolConfig(_common.BaseModel): + """Tool config. + + This config is shared for all tools provided in the request. + """ + + function_calling_config: Optional[FunctionCallingConfig] = Field( + default=None, description="""Optional. Function calling config.""" + ) + + +class ToolConfigDict(TypedDict, total=False): + """Tool config. + + This config is shared for all tools provided in the request. + """ + + function_calling_config: Optional[FunctionCallingConfigDict] + """Optional. Function calling config.""" + + +ToolConfigOrDict = Union[ToolConfig, ToolConfigDict] + + +class PrebuiltVoiceConfig(_common.BaseModel): + """The configuration for the prebuilt speaker to use.""" + + voice_name: Optional[str] = Field( + default=None, + description="""The name of the prebuilt voice to use. + """, + ) + + +class PrebuiltVoiceConfigDict(TypedDict, total=False): + """The configuration for the prebuilt speaker to use.""" + + voice_name: Optional[str] + """The name of the prebuilt voice to use. + """ + + +PrebuiltVoiceConfigOrDict = Union[PrebuiltVoiceConfig, PrebuiltVoiceConfigDict] + + +class VoiceConfig(_common.BaseModel): + """The configuration for the voice to use.""" + + prebuilt_voice_config: Optional[PrebuiltVoiceConfig] = Field( + default=None, + description="""The configuration for the speaker to use. + """, + ) + + +class VoiceConfigDict(TypedDict, total=False): + """The configuration for the voice to use.""" + + prebuilt_voice_config: Optional[PrebuiltVoiceConfigDict] + """The configuration for the speaker to use. + """ + + +VoiceConfigOrDict = Union[VoiceConfig, VoiceConfigDict] + + +class SpeechConfig(_common.BaseModel): + """The speech generation configuration.""" + + voice_config: Optional[VoiceConfig] = Field( + default=None, + description="""The configuration for the speaker to use. + """, + ) + + +class SpeechConfigDict(TypedDict, total=False): + """The speech generation configuration.""" + + voice_config: Optional[VoiceConfigDict] + """The configuration for the speaker to use. + """ + + +SpeechConfigOrDict = Union[SpeechConfig, SpeechConfigDict] + + +class AutomaticFunctionCallingConfig(_common.BaseModel): + """The configuration for automatic function calling.""" + + disable: Optional[bool] = Field( + default=None, + description="""Whether to disable automatic function calling. + If not set or set to False, will enable automatic function calling. + If set to True, will disable automatic function calling. + """, + ) + maximum_remote_calls: Optional[int] = Field( + default=10, + description="""If automatic function calling is enabled, + maximum number of remote calls for automatic function calling. + This number should be a positive integer. + If not set, SDK will set maximum number of remote calls to 10. + """, + ) + ignore_call_history: Optional[bool] = Field( + default=None, + description="""If automatic function calling is enabled, + whether to ignore call history to the response. + If not set, SDK will set ignore_call_history to false, + and will append the call history to + GenerateContentResponse.automatic_function_calling_history. + """, + ) + + +class AutomaticFunctionCallingConfigDict(TypedDict, total=False): + """The configuration for automatic function calling.""" + + disable: Optional[bool] + """Whether to disable automatic function calling. + If not set or set to False, will enable automatic function calling. + If set to True, will disable automatic function calling. + """ + + maximum_remote_calls: Optional[int] + """If automatic function calling is enabled, + maximum number of remote calls for automatic function calling. + This number should be a positive integer. + If not set, SDK will set maximum number of remote calls to 10. + """ + + ignore_call_history: Optional[bool] + """If automatic function calling is enabled, + whether to ignore call history to the response. + If not set, SDK will set ignore_call_history to false, + and will append the call history to + GenerateContentResponse.automatic_function_calling_history. + """ + + +AutomaticFunctionCallingConfigOrDict = Union[ + AutomaticFunctionCallingConfig, AutomaticFunctionCallingConfigDict +] + + +class ThinkingConfig(_common.BaseModel): + """The thinking features configuration.""" + + include_thoughts: Optional[bool] = Field( + default=None, + description="""Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available. + """, + ) + + +class ThinkingConfigDict(TypedDict, total=False): + """The thinking features configuration.""" + + include_thoughts: Optional[bool] + """Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available. + """ + + +ThinkingConfigOrDict = Union[ThinkingConfig, ThinkingConfigDict] + + +class FileStatus(_common.BaseModel): + """Status of a File that uses a common error model.""" + + details: Optional[list[dict[str, Any]]] = Field( + default=None, + description="""A list of messages that carry the error details. There is a common set of message types for APIs to use.""", + ) + message: Optional[str] = Field( + default=None, + description="""A list of messages that carry the error details. There is a common set of message types for APIs to use.""", + ) + code: Optional[int] = Field( + default=None, description="""The status code. 0 for OK, 1 for CANCELLED""" + ) + + +class FileStatusDict(TypedDict, total=False): + """Status of a File that uses a common error model.""" + + details: Optional[list[dict[str, Any]]] + """A list of messages that carry the error details. There is a common set of message types for APIs to use.""" + + message: Optional[str] + """A list of messages that carry the error details. There is a common set of message types for APIs to use.""" + + code: Optional[int] + """The status code. 0 for OK, 1 for CANCELLED""" + + +FileStatusOrDict = Union[FileStatus, FileStatusDict] + + +class File(_common.BaseModel): + """A file uploaded to the API.""" + + name: Optional[str] = Field( + default=None, + description="""The `File` resource name. The ID (name excluding the "files/" prefix) can contain up to 40 characters that are lowercase alphanumeric or dashes (-). The ID cannot start or end with a dash. If the name is empty on create, a unique name will be generated. Example: `files/123-456`""", + ) + display_name: Optional[str] = Field( + default=None, + description="""Optional. The human-readable display name for the `File`. The display name must be no more than 512 characters in length, including spaces. Example: 'Welcome Image'""", + ) + mime_type: Optional[str] = Field( + default=None, description="""Output only. MIME type of the file.""" + ) + size_bytes: Optional[int] = Field( + default=None, description="""Output only. Size of the file in bytes.""" + ) + create_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. The timestamp of when the `File` was created.""", + ) + expiration_time: Optional[datetime.datetime] = Field( + default=None, + description="""Optional. The human-readable display name for the `File`. The display name must be no more than 512 characters in length, including spaces. Example: 'Welcome Image'""", + ) + update_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. The timestamp of when the `File` was last updated.""", + ) + sha256_hash: Optional[str] = Field( + default=None, + description="""Output only. SHA-256 hash of the uploaded bytes.""", + ) + uri: Optional[str] = Field( + default=None, description="""Output only. The URI of the `File`.""" + ) + download_uri: Optional[str] = Field( + default=None, + description="""Output only. The URI of the `File`, only set for downloadable (generated) files.""", + ) + state: Optional[FileState] = Field( + default=None, description="""Output only. Processing state of the File.""" + ) + source: Optional[FileSource] = Field( + default=None, description="""Output only. The source of the `File`.""" + ) + video_metadata: Optional[dict[str, Any]] = Field( + default=None, description="""Output only. Metadata for a video.""" + ) + error: Optional[FileStatus] = Field( + default=None, + description="""Output only. Error status if File processing failed.""", + ) + + +class FileDict(TypedDict, total=False): + """A file uploaded to the API.""" + + name: Optional[str] + """The `File` resource name. The ID (name excluding the "files/" prefix) can contain up to 40 characters that are lowercase alphanumeric or dashes (-). The ID cannot start or end with a dash. If the name is empty on create, a unique name will be generated. Example: `files/123-456`""" + + display_name: Optional[str] + """Optional. The human-readable display name for the `File`. The display name must be no more than 512 characters in length, including spaces. Example: 'Welcome Image'""" + + mime_type: Optional[str] + """Output only. MIME type of the file.""" + + size_bytes: Optional[int] + """Output only. Size of the file in bytes.""" + + create_time: Optional[datetime.datetime] + """Output only. The timestamp of when the `File` was created.""" + + expiration_time: Optional[datetime.datetime] + """Optional. The human-readable display name for the `File`. The display name must be no more than 512 characters in length, including spaces. Example: 'Welcome Image'""" + + update_time: Optional[datetime.datetime] + """Output only. The timestamp of when the `File` was last updated.""" + + sha256_hash: Optional[str] + """Output only. SHA-256 hash of the uploaded bytes.""" + + uri: Optional[str] + """Output only. The URI of the `File`.""" + + download_uri: Optional[str] + """Output only. The URI of the `File`, only set for downloadable (generated) files.""" + + state: Optional[FileState] + """Output only. Processing state of the File.""" + + source: Optional[FileSource] + """Output only. The source of the `File`.""" + + video_metadata: Optional[dict[str, Any]] + """Output only. Metadata for a video.""" + + error: Optional[FileStatusDict] + """Output only. Error status if File processing failed.""" + + +FileOrDict = Union[File, FileDict] + + +PartUnion = Union[File, Part, PIL.Image.Image, str] + + +PartUnionDict = Union[PartUnion, PartDict] + + +ContentUnion = Union[Content, list[PartUnion], PartUnion] + + +ContentUnionDict = Union[ContentUnion, ContentDict] + + +SchemaUnion = Union[dict, type, Schema, GenericAlias] + + +SchemaUnionDict = Union[SchemaUnion, SchemaDict] + + +class GenerationConfigRoutingConfigAutoRoutingMode(_common.BaseModel): + """When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference.""" + + model_routing_preference: Optional[ + Literal['UNKNOWN', 'PRIORITIZE_QUALITY', 'BALANCED', 'PRIORITIZE_COST'] + ] = Field(default=None, description="""The model routing preference.""") + + +class GenerationConfigRoutingConfigAutoRoutingModeDict(TypedDict, total=False): + """When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference.""" + + model_routing_preference: Optional[ + Literal['UNKNOWN', 'PRIORITIZE_QUALITY', 'BALANCED', 'PRIORITIZE_COST'] + ] + """The model routing preference.""" + + +GenerationConfigRoutingConfigAutoRoutingModeOrDict = Union[ + GenerationConfigRoutingConfigAutoRoutingMode, + GenerationConfigRoutingConfigAutoRoutingModeDict, +] + + +class GenerationConfigRoutingConfigManualRoutingMode(_common.BaseModel): + """When manual routing is set, the specified model will be used directly.""" + + model_name: Optional[str] = Field( + default=None, + description="""The model name to use. Only the public LLM models are accepted. e.g. 'gemini-1.5-pro-001'.""", + ) + + +class GenerationConfigRoutingConfigManualRoutingModeDict( + TypedDict, total=False +): + """When manual routing is set, the specified model will be used directly.""" + + model_name: Optional[str] + """The model name to use. Only the public LLM models are accepted. e.g. 'gemini-1.5-pro-001'.""" + + +GenerationConfigRoutingConfigManualRoutingModeOrDict = Union[ + GenerationConfigRoutingConfigManualRoutingMode, + GenerationConfigRoutingConfigManualRoutingModeDict, +] + + +class GenerationConfigRoutingConfig(_common.BaseModel): + """The configuration for routing the request to a specific model.""" + + auto_mode: Optional[GenerationConfigRoutingConfigAutoRoutingMode] = Field( + default=None, description="""Automated routing.""" + ) + manual_mode: Optional[GenerationConfigRoutingConfigManualRoutingMode] = Field( + default=None, description="""Manual routing.""" + ) + + +class GenerationConfigRoutingConfigDict(TypedDict, total=False): + """The configuration for routing the request to a specific model.""" + + auto_mode: Optional[GenerationConfigRoutingConfigAutoRoutingModeDict] + """Automated routing.""" + + manual_mode: Optional[GenerationConfigRoutingConfigManualRoutingModeDict] + """Manual routing.""" + + +GenerationConfigRoutingConfigOrDict = Union[ + GenerationConfigRoutingConfig, GenerationConfigRoutingConfigDict +] + + +SpeechConfigUnion = Union[SpeechConfig, str] + + +SpeechConfigUnionDict = Union[SpeechConfigUnion, SpeechConfigDict] + + +class GenerateContentConfig(_common.BaseModel): + """Class for configuring optional model parameters. + + For more information, see `Content generation parameters + <https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/content-generation-parameters>`_. + """ + + system_instruction: Optional[ContentUnion] = Field( + default=None, + description="""Instructions for the model to steer it toward better performance. + For example, "Answer as concisely as possible" or "Don't use technical + terms in your response". + """, + ) + temperature: Optional[float] = Field( + default=None, + description="""Value that controls the degree of randomness in token selection. + Lower temperatures are good for prompts that require a less open-ended or + creative response, while higher temperatures can lead to more diverse or + creative results. + """, + ) + top_p: Optional[float] = Field( + default=None, + description="""Tokens are selected from the most to least probable until the sum + of their probabilities equals this value. Use a lower value for less + random responses and a higher value for more random responses. + """, + ) + top_k: Optional[float] = Field( + default=None, + description="""For each token selection step, the ``top_k`` tokens with the + highest probabilities are sampled. Then tokens are further filtered based + on ``top_p`` with the final token selected using temperature sampling. Use + a lower number for less random responses and a higher number for more + random responses. + """, + ) + candidate_count: Optional[int] = Field( + default=None, + description="""Number of response variations to return. + """, + ) + max_output_tokens: Optional[int] = Field( + default=None, + description="""Maximum number of tokens that can be generated in the response. + """, + ) + stop_sequences: Optional[list[str]] = Field( + default=None, + description="""List of strings that tells the model to stop generating text if one + of the strings is encountered in the response. + """, + ) + response_logprobs: Optional[bool] = Field( + default=None, + description="""Whether to return the log probabilities of the tokens that were + chosen by the model at each step. + """, + ) + logprobs: Optional[int] = Field( + default=None, + description="""Number of top candidate tokens to return the log probabilities for + at each generation step. + """, + ) + presence_penalty: Optional[float] = Field( + default=None, + description="""Positive values penalize tokens that already appear in the + generated text, increasing the probability of generating more diverse + content. + """, + ) + frequency_penalty: Optional[float] = Field( + default=None, + description="""Positive values penalize tokens that repeatedly appear in the + generated text, increasing the probability of generating more diverse + content. + """, + ) + seed: Optional[int] = Field( + default=None, + description="""When ``seed`` is fixed to a specific number, the model makes a best + effort to provide the same response for repeated requests. By default, a + random number is used. + """, + ) + response_mime_type: Optional[str] = Field( + default=None, + description="""Output response media type of the generated candidate text. + """, + ) + response_schema: Optional[SchemaUnion] = Field( + default=None, + description="""Schema that the generated candidate text must adhere to. + """, + ) + routing_config: Optional[GenerationConfigRoutingConfig] = Field( + default=None, + description="""Configuration for model router requests. + """, + ) + safety_settings: Optional[list[SafetySetting]] = Field( + default=None, + description="""Safety settings in the request to block unsafe content in the + response. + """, + ) + tools: Optional[ToolListUnion] = Field( + default=None, + description="""Code that enables the system to interact with external systems to + perform an action outside of the knowledge and scope of the model. + """, + ) + tool_config: Optional[ToolConfig] = Field( + default=None, + description="""Associates model output to a specific function call. + """, + ) + cached_content: Optional[str] = Field( + default=None, + description="""Resource name of a context cache that can be used in subsequent + requests. + """, + ) + response_modalities: Optional[list[str]] = Field( + default=None, + description="""The requested modalities of the response. Represents the set of + modalities that the model can return. + """, + ) + media_resolution: Optional[MediaResolution] = Field( + default=None, + description="""If specified, the media resolution specified will be used. + """, + ) + speech_config: Optional[SpeechConfigUnion] = Field( + default=None, + description="""The speech generation configuration. + """, + ) + audio_timestamp: Optional[bool] = Field( + default=None, + description="""If enabled, audio timestamp will be included in the request to the + model. + """, + ) + automatic_function_calling: Optional[AutomaticFunctionCallingConfig] = Field( + default=None, + description="""The configuration for automatic function calling. + """, + ) + thinking_config: Optional[ThinkingConfig] = Field( + default=None, + description="""The thinking features configuration. + """, + ) + + +class GenerateContentConfigDict(TypedDict, total=False): + """Class for configuring optional model parameters. + + For more information, see `Content generation parameters + <https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/content-generation-parameters>`_. + """ + + system_instruction: Optional[ContentUnionDict] + """Instructions for the model to steer it toward better performance. + For example, "Answer as concisely as possible" or "Don't use technical + terms in your response". + """ + + temperature: Optional[float] + """Value that controls the degree of randomness in token selection. + Lower temperatures are good for prompts that require a less open-ended or + creative response, while higher temperatures can lead to more diverse or + creative results. + """ + + top_p: Optional[float] + """Tokens are selected from the most to least probable until the sum + of their probabilities equals this value. Use a lower value for less + random responses and a higher value for more random responses. + """ + + top_k: Optional[float] + """For each token selection step, the ``top_k`` tokens with the + highest probabilities are sampled. Then tokens are further filtered based + on ``top_p`` with the final token selected using temperature sampling. Use + a lower number for less random responses and a higher number for more + random responses. + """ + + candidate_count: Optional[int] + """Number of response variations to return. + """ + + max_output_tokens: Optional[int] + """Maximum number of tokens that can be generated in the response. + """ + + stop_sequences: Optional[list[str]] + """List of strings that tells the model to stop generating text if one + of the strings is encountered in the response. + """ + + response_logprobs: Optional[bool] + """Whether to return the log probabilities of the tokens that were + chosen by the model at each step. + """ + + logprobs: Optional[int] + """Number of top candidate tokens to return the log probabilities for + at each generation step. + """ + + presence_penalty: Optional[float] + """Positive values penalize tokens that already appear in the + generated text, increasing the probability of generating more diverse + content. + """ + + frequency_penalty: Optional[float] + """Positive values penalize tokens that repeatedly appear in the + generated text, increasing the probability of generating more diverse + content. + """ + + seed: Optional[int] + """When ``seed`` is fixed to a specific number, the model makes a best + effort to provide the same response for repeated requests. By default, a + random number is used. + """ + + response_mime_type: Optional[str] + """Output response media type of the generated candidate text. + """ + + response_schema: Optional[SchemaUnionDict] + """Schema that the generated candidate text must adhere to. + """ + + routing_config: Optional[GenerationConfigRoutingConfigDict] + """Configuration for model router requests. + """ + + safety_settings: Optional[list[SafetySettingDict]] + """Safety settings in the request to block unsafe content in the + response. + """ + + tools: Optional[ToolListUnionDict] + """Code that enables the system to interact with external systems to + perform an action outside of the knowledge and scope of the model. + """ + + tool_config: Optional[ToolConfigDict] + """Associates model output to a specific function call. + """ + + cached_content: Optional[str] + """Resource name of a context cache that can be used in subsequent + requests. + """ + + response_modalities: Optional[list[str]] + """The requested modalities of the response. Represents the set of + modalities that the model can return. + """ + + media_resolution: Optional[MediaResolution] + """If specified, the media resolution specified will be used. + """ + + speech_config: Optional[SpeechConfigUnionDict] + """The speech generation configuration. + """ + + audio_timestamp: Optional[bool] + """If enabled, audio timestamp will be included in the request to the + model. + """ + + automatic_function_calling: Optional[AutomaticFunctionCallingConfigDict] + """The configuration for automatic function calling. + """ + + thinking_config: Optional[ThinkingConfigDict] + """The thinking features configuration. + """ + + +GenerateContentConfigOrDict = Union[ + GenerateContentConfig, GenerateContentConfigDict +] + + +ContentListUnion = Union[list[ContentUnion], ContentUnion] + + +ContentListUnionDict = Union[list[ContentUnionDict], ContentUnionDict] + + +class _GenerateContentParameters(_common.BaseModel): + """Class for configuring the content of the request to the model.""" + + model: Optional[str] = Field( + default=None, + description="""ID of the model to use. For a list of models, see `Google models + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""", + ) + contents: Optional[ContentListUnion] = Field( + default=None, + description="""Content of the request. + """, + ) + config: Optional[GenerateContentConfig] = Field( + default=None, + description="""Configuration that contains optional model parameters. + """, + ) + + +class _GenerateContentParametersDict(TypedDict, total=False): + """Class for configuring the content of the request to the model.""" + + model: Optional[str] + """ID of the model to use. For a list of models, see `Google models + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""" + + contents: Optional[ContentListUnionDict] + """Content of the request. + """ + + config: Optional[GenerateContentConfigDict] + """Configuration that contains optional model parameters. + """ + + +_GenerateContentParametersOrDict = Union[ + _GenerateContentParameters, _GenerateContentParametersDict +] + + +class GoogleTypeDate(_common.BaseModel): + """Represents a whole or partial calendar date, such as a birthday. + + The time of day and time zone are either specified elsewhere or are + insignificant. The date is relative to the Gregorian Calendar. This can + represent one of the following: * A full date, with non-zero year, month, and + day values. * A month and day, with a zero year (for example, an anniversary). + * A year on its own, with a zero month and a zero day. * A year and month, + with a zero day (for example, a credit card expiration date). Related types: * + google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp + """ + + day: Optional[int] = Field( + default=None, + description="""Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.""", + ) + month: Optional[int] = Field( + default=None, + description="""Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.""", + ) + year: Optional[int] = Field( + default=None, + description="""Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.""", + ) + + +class GoogleTypeDateDict(TypedDict, total=False): + """Represents a whole or partial calendar date, such as a birthday. + + The time of day and time zone are either specified elsewhere or are + insignificant. The date is relative to the Gregorian Calendar. This can + represent one of the following: * A full date, with non-zero year, month, and + day values. * A month and day, with a zero year (for example, an anniversary). + * A year on its own, with a zero month and a zero day. * A year and month, + with a zero day (for example, a credit card expiration date). Related types: * + google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp + """ + + day: Optional[int] + """Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.""" + + month: Optional[int] + """Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.""" + + year: Optional[int] + """Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.""" + + +GoogleTypeDateOrDict = Union[GoogleTypeDate, GoogleTypeDateDict] + + +class Citation(_common.BaseModel): + """Source attributions for content.""" + + end_index: Optional[int] = Field( + default=None, description="""Output only. End index into the content.""" + ) + license: Optional[str] = Field( + default=None, description="""Output only. License of the attribution.""" + ) + publication_date: Optional[GoogleTypeDate] = Field( + default=None, + description="""Output only. Publication date of the attribution.""", + ) + start_index: Optional[int] = Field( + default=None, description="""Output only. Start index into the content.""" + ) + title: Optional[str] = Field( + default=None, description="""Output only. Title of the attribution.""" + ) + uri: Optional[str] = Field( + default=None, + description="""Output only. Url reference of the attribution.""", + ) + + +class CitationDict(TypedDict, total=False): + """Source attributions for content.""" + + end_index: Optional[int] + """Output only. End index into the content.""" + + license: Optional[str] + """Output only. License of the attribution.""" + + publication_date: Optional[GoogleTypeDateDict] + """Output only. Publication date of the attribution.""" + + start_index: Optional[int] + """Output only. Start index into the content.""" + + title: Optional[str] + """Output only. Title of the attribution.""" + + uri: Optional[str] + """Output only. Url reference of the attribution.""" + + +CitationOrDict = Union[Citation, CitationDict] + + +class CitationMetadata(_common.BaseModel): + """Class for citation information when the model quotes another source.""" + + citations: Optional[list[Citation]] = Field( + default=None, + description="""Contains citation information when the model directly quotes, at + length, from another source. Can include traditional websites and code + repositories. + """, + ) + + +class CitationMetadataDict(TypedDict, total=False): + """Class for citation information when the model quotes another source.""" + + citations: Optional[list[CitationDict]] + """Contains citation information when the model directly quotes, at + length, from another source. Can include traditional websites and code + repositories. + """ + + +CitationMetadataOrDict = Union[CitationMetadata, CitationMetadataDict] + + +class GroundingChunkRetrievedContext(_common.BaseModel): + """Chunk from context retrieved by the retrieval tools.""" + + text: Optional[str] = Field( + default=None, description="""Text of the attribution.""" + ) + title: Optional[str] = Field( + default=None, description="""Title of the attribution.""" + ) + uri: Optional[str] = Field( + default=None, description="""URI reference of the attribution.""" + ) + + +class GroundingChunkRetrievedContextDict(TypedDict, total=False): + """Chunk from context retrieved by the retrieval tools.""" + + text: Optional[str] + """Text of the attribution.""" + + title: Optional[str] + """Title of the attribution.""" + + uri: Optional[str] + """URI reference of the attribution.""" + + +GroundingChunkRetrievedContextOrDict = Union[ + GroundingChunkRetrievedContext, GroundingChunkRetrievedContextDict +] + + +class GroundingChunkWeb(_common.BaseModel): + """Chunk from the web.""" + + title: Optional[str] = Field( + default=None, description="""Title of the chunk.""" + ) + uri: Optional[str] = Field( + default=None, description="""URI reference of the chunk.""" + ) + + +class GroundingChunkWebDict(TypedDict, total=False): + """Chunk from the web.""" + + title: Optional[str] + """Title of the chunk.""" + + uri: Optional[str] + """URI reference of the chunk.""" + + +GroundingChunkWebOrDict = Union[GroundingChunkWeb, GroundingChunkWebDict] + + +class GroundingChunk(_common.BaseModel): + """Grounding chunk.""" + + retrieved_context: Optional[GroundingChunkRetrievedContext] = Field( + default=None, + description="""Grounding chunk from context retrieved by the retrieval tools.""", + ) + web: Optional[GroundingChunkWeb] = Field( + default=None, description="""Grounding chunk from the web.""" + ) + + +class GroundingChunkDict(TypedDict, total=False): + """Grounding chunk.""" + + retrieved_context: Optional[GroundingChunkRetrievedContextDict] + """Grounding chunk from context retrieved by the retrieval tools.""" + + web: Optional[GroundingChunkWebDict] + """Grounding chunk from the web.""" + + +GroundingChunkOrDict = Union[GroundingChunk, GroundingChunkDict] + + +class Segment(_common.BaseModel): + """Segment of the content.""" + + end_index: Optional[int] = Field( + default=None, + description="""Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero.""", + ) + part_index: Optional[int] = Field( + default=None, + description="""Output only. The index of a Part object within its parent Content object.""", + ) + start_index: Optional[int] = Field( + default=None, + description="""Output only. Start index in the given Part, measured in bytes. Offset from the start of the Part, inclusive, starting at zero.""", + ) + text: Optional[str] = Field( + default=None, + description="""Output only. The text corresponding to the segment from the response.""", + ) + + +class SegmentDict(TypedDict, total=False): + """Segment of the content.""" + + end_index: Optional[int] + """Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero.""" + + part_index: Optional[int] + """Output only. The index of a Part object within its parent Content object.""" + + start_index: Optional[int] + """Output only. Start index in the given Part, measured in bytes. Offset from the start of the Part, inclusive, starting at zero.""" + + text: Optional[str] + """Output only. The text corresponding to the segment from the response.""" + + +SegmentOrDict = Union[Segment, SegmentDict] + + +class GroundingSupport(_common.BaseModel): + """Grounding support.""" + + confidence_scores: Optional[list[float]] = Field( + default=None, + description="""Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. This list must have the same size as the grounding_chunk_indices.""", + ) + grounding_chunk_indices: Optional[list[int]] = Field( + default=None, + description="""A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim.""", + ) + segment: Optional[Segment] = Field( + default=None, + description="""Segment of the content this support belongs to.""", + ) + + +class GroundingSupportDict(TypedDict, total=False): + """Grounding support.""" + + confidence_scores: Optional[list[float]] + """Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. This list must have the same size as the grounding_chunk_indices.""" + + grounding_chunk_indices: Optional[list[int]] + """A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim.""" + + segment: Optional[SegmentDict] + """Segment of the content this support belongs to.""" + + +GroundingSupportOrDict = Union[GroundingSupport, GroundingSupportDict] + + +class RetrievalMetadata(_common.BaseModel): + """Metadata related to retrieval in the grounding flow.""" + + google_search_dynamic_retrieval_score: Optional[float] = Field( + default=None, + description="""Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search.""", + ) + + +class RetrievalMetadataDict(TypedDict, total=False): + """Metadata related to retrieval in the grounding flow.""" + + google_search_dynamic_retrieval_score: Optional[float] + """Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search.""" + + +RetrievalMetadataOrDict = Union[RetrievalMetadata, RetrievalMetadataDict] + + +class SearchEntryPoint(_common.BaseModel): + """Google search entry point.""" + + rendered_content: Optional[str] = Field( + default=None, + description="""Optional. Web content snippet that can be embedded in a web page or an app webview.""", + ) + sdk_blob: Optional[bytes] = Field( + default=None, + description="""Optional. Base64 encoded JSON representing array of tuple.""", + ) + + +class SearchEntryPointDict(TypedDict, total=False): + """Google search entry point.""" + + rendered_content: Optional[str] + """Optional. Web content snippet that can be embedded in a web page or an app webview.""" + + sdk_blob: Optional[bytes] + """Optional. Base64 encoded JSON representing array of tuple.""" + + +SearchEntryPointOrDict = Union[SearchEntryPoint, SearchEntryPointDict] + + +class GroundingMetadata(_common.BaseModel): + """Metadata returned to client when grounding is enabled.""" + + grounding_chunks: Optional[list[GroundingChunk]] = Field( + default=None, + description="""List of supporting references retrieved from specified grounding source.""", + ) + grounding_supports: Optional[list[GroundingSupport]] = Field( + default=None, description="""Optional. List of grounding support.""" + ) + retrieval_metadata: Optional[RetrievalMetadata] = Field( + default=None, description="""Optional. Output only. Retrieval metadata.""" + ) + retrieval_queries: Optional[list[str]] = Field( + default=None, + description="""Optional. Queries executed by the retrieval tools.""", + ) + search_entry_point: Optional[SearchEntryPoint] = Field( + default=None, + description="""Optional. Google search entry for the following-up web searches.""", + ) + web_search_queries: Optional[list[str]] = Field( + default=None, + description="""Optional. Web search queries for the following-up web search.""", + ) + + +class GroundingMetadataDict(TypedDict, total=False): + """Metadata returned to client when grounding is enabled.""" + + grounding_chunks: Optional[list[GroundingChunkDict]] + """List of supporting references retrieved from specified grounding source.""" + + grounding_supports: Optional[list[GroundingSupportDict]] + """Optional. List of grounding support.""" + + retrieval_metadata: Optional[RetrievalMetadataDict] + """Optional. Output only. Retrieval metadata.""" + + retrieval_queries: Optional[list[str]] + """Optional. Queries executed by the retrieval tools.""" + + search_entry_point: Optional[SearchEntryPointDict] + """Optional. Google search entry for the following-up web searches.""" + + web_search_queries: Optional[list[str]] + """Optional. Web search queries for the following-up web search.""" + + +GroundingMetadataOrDict = Union[GroundingMetadata, GroundingMetadataDict] + + +class LogprobsResultCandidate(_common.BaseModel): + """Candidate for the logprobs token and score.""" + + log_probability: Optional[float] = Field( + default=None, description="""The candidate's log probability.""" + ) + token: Optional[str] = Field( + default=None, description="""The candidate's token string value.""" + ) + token_id: Optional[int] = Field( + default=None, description="""The candidate's token id value.""" + ) + + +class LogprobsResultCandidateDict(TypedDict, total=False): + """Candidate for the logprobs token and score.""" + + log_probability: Optional[float] + """The candidate's log probability.""" + + token: Optional[str] + """The candidate's token string value.""" + + token_id: Optional[int] + """The candidate's token id value.""" + + +LogprobsResultCandidateOrDict = Union[ + LogprobsResultCandidate, LogprobsResultCandidateDict +] + + +class LogprobsResultTopCandidates(_common.BaseModel): + """Candidates with top log probabilities at each decoding step.""" + + candidates: Optional[list[LogprobsResultCandidate]] = Field( + default=None, + description="""Sorted by log probability in descending order.""", + ) + + +class LogprobsResultTopCandidatesDict(TypedDict, total=False): + """Candidates with top log probabilities at each decoding step.""" + + candidates: Optional[list[LogprobsResultCandidateDict]] + """Sorted by log probability in descending order.""" + + +LogprobsResultTopCandidatesOrDict = Union[ + LogprobsResultTopCandidates, LogprobsResultTopCandidatesDict +] + + +class LogprobsResult(_common.BaseModel): + """Logprobs Result""" + + chosen_candidates: Optional[list[LogprobsResultCandidate]] = Field( + default=None, + description="""Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates.""", + ) + top_candidates: Optional[list[LogprobsResultTopCandidates]] = Field( + default=None, description="""Length = total number of decoding steps.""" + ) + + +class LogprobsResultDict(TypedDict, total=False): + """Logprobs Result""" + + chosen_candidates: Optional[list[LogprobsResultCandidateDict]] + """Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates.""" + + top_candidates: Optional[list[LogprobsResultTopCandidatesDict]] + """Length = total number of decoding steps.""" + + +LogprobsResultOrDict = Union[LogprobsResult, LogprobsResultDict] + + +class SafetyRating(_common.BaseModel): + """Safety rating corresponding to the generated content.""" + + blocked: Optional[bool] = Field( + default=None, + description="""Output only. Indicates whether the content was filtered out because of this rating.""", + ) + category: Optional[HarmCategory] = Field( + default=None, description="""Output only. Harm category.""" + ) + probability: Optional[HarmProbability] = Field( + default=None, + description="""Output only. Harm probability levels in the content.""", + ) + probability_score: Optional[float] = Field( + default=None, description="""Output only. Harm probability score.""" + ) + severity: Optional[HarmSeverity] = Field( + default=None, + description="""Output only. Harm severity levels in the content.""", + ) + severity_score: Optional[float] = Field( + default=None, description="""Output only. Harm severity score.""" + ) + + +class SafetyRatingDict(TypedDict, total=False): + """Safety rating corresponding to the generated content.""" + + blocked: Optional[bool] + """Output only. Indicates whether the content was filtered out because of this rating.""" + + category: Optional[HarmCategory] + """Output only. Harm category.""" + + probability: Optional[HarmProbability] + """Output only. Harm probability levels in the content.""" + + probability_score: Optional[float] + """Output only. Harm probability score.""" + + severity: Optional[HarmSeverity] + """Output only. Harm severity levels in the content.""" + + severity_score: Optional[float] + """Output only. Harm severity score.""" + + +SafetyRatingOrDict = Union[SafetyRating, SafetyRatingDict] + + +class Candidate(_common.BaseModel): + """Class containing a response candidate generated from the model.""" + + content: Optional[Content] = Field( + default=None, + description="""Contains the multi-part content of the response. + """, + ) + citation_metadata: Optional[CitationMetadata] = Field( + default=None, + description="""Source attribution of the generated content. + """, + ) + finish_message: Optional[str] = Field( + default=None, + description="""Describes the reason the model stopped generating tokens. + """, + ) + token_count: Optional[int] = Field( + default=None, + description="""Number of tokens for this candidate. + """, + ) + avg_logprobs: Optional[float] = Field( + default=None, + description="""Output only. Average log probability score of the candidate.""", + ) + finish_reason: Optional[FinishReason] = Field( + default=None, + description="""Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens.""", + ) + grounding_metadata: Optional[GroundingMetadata] = Field( + default=None, + description="""Output only. Metadata specifies sources used to ground generated content.""", + ) + index: Optional[int] = Field( + default=None, description="""Output only. Index of the candidate.""" + ) + logprobs_result: Optional[LogprobsResult] = Field( + default=None, + description="""Output only. Log-likelihood scores for the response tokens and top tokens""", + ) + safety_ratings: Optional[list[SafetyRating]] = Field( + default=None, + description="""Output only. List of ratings for the safety of a response candidate. There is at most one rating per category.""", + ) + + +class CandidateDict(TypedDict, total=False): + """Class containing a response candidate generated from the model.""" + + content: Optional[ContentDict] + """Contains the multi-part content of the response. + """ + + citation_metadata: Optional[CitationMetadataDict] + """Source attribution of the generated content. + """ + + finish_message: Optional[str] + """Describes the reason the model stopped generating tokens. + """ + + token_count: Optional[int] + """Number of tokens for this candidate. + """ + + avg_logprobs: Optional[float] + """Output only. Average log probability score of the candidate.""" + + finish_reason: Optional[FinishReason] + """Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens.""" + + grounding_metadata: Optional[GroundingMetadataDict] + """Output only. Metadata specifies sources used to ground generated content.""" + + index: Optional[int] + """Output only. Index of the candidate.""" + + logprobs_result: Optional[LogprobsResultDict] + """Output only. Log-likelihood scores for the response tokens and top tokens""" + + safety_ratings: Optional[list[SafetyRatingDict]] + """Output only. List of ratings for the safety of a response candidate. There is at most one rating per category.""" + + +CandidateOrDict = Union[Candidate, CandidateDict] + + +class GenerateContentResponsePromptFeedback(_common.BaseModel): + """Content filter results for a prompt sent in the request.""" + + block_reason: Optional[BlockedReason] = Field( + default=None, description="""Output only. Blocked reason.""" + ) + block_reason_message: Optional[str] = Field( + default=None, + description="""Output only. A readable block reason message.""", + ) + safety_ratings: Optional[list[SafetyRating]] = Field( + default=None, description="""Output only. Safety ratings.""" + ) + + +class GenerateContentResponsePromptFeedbackDict(TypedDict, total=False): + """Content filter results for a prompt sent in the request.""" + + block_reason: Optional[BlockedReason] + """Output only. Blocked reason.""" + + block_reason_message: Optional[str] + """Output only. A readable block reason message.""" + + safety_ratings: Optional[list[SafetyRatingDict]] + """Output only. Safety ratings.""" + + +GenerateContentResponsePromptFeedbackOrDict = Union[ + GenerateContentResponsePromptFeedback, + GenerateContentResponsePromptFeedbackDict, +] + + +class GenerateContentResponseUsageMetadata(_common.BaseModel): + """Usage metadata about response(s).""" + + cached_content_token_count: Optional[int] = Field( + default=None, + description="""Output only. Number of tokens in the cached part in the input (the cached content).""", + ) + candidates_token_count: Optional[int] = Field( + default=None, description="""Number of tokens in the response(s).""" + ) + prompt_token_count: Optional[int] = Field( + default=None, + description="""Number of tokens in the request. When `cached_content` is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content.""", + ) + total_token_count: Optional[int] = Field( + default=None, + description="""Total token count for prompt and response candidates.""", + ) + + +class GenerateContentResponseUsageMetadataDict(TypedDict, total=False): + """Usage metadata about response(s).""" + + cached_content_token_count: Optional[int] + """Output only. Number of tokens in the cached part in the input (the cached content).""" + + candidates_token_count: Optional[int] + """Number of tokens in the response(s).""" + + prompt_token_count: Optional[int] + """Number of tokens in the request. When `cached_content` is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content.""" + + total_token_count: Optional[int] + """Total token count for prompt and response candidates.""" + + +GenerateContentResponseUsageMetadataOrDict = Union[ + GenerateContentResponseUsageMetadata, + GenerateContentResponseUsageMetadataDict, +] + + +class GenerateContentResponse(_common.BaseModel): + """Response message for PredictionService.GenerateContent.""" + + candidates: Optional[list[Candidate]] = Field( + default=None, + description="""Response variations returned by the model. + """, + ) + model_version: Optional[str] = Field( + default=None, + description="""Output only. The model version used to generate the response.""", + ) + prompt_feedback: Optional[GenerateContentResponsePromptFeedback] = Field( + default=None, + description="""Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations.""", + ) + usage_metadata: Optional[GenerateContentResponseUsageMetadata] = Field( + default=None, description="""Usage metadata about the response(s).""" + ) + automatic_function_calling_history: Optional[list[Content]] = None + parsed: Union[pydantic.BaseModel, dict] = Field( + default=None, + description="""Parsed response if response_schema is provided. Not available for streaming.""", + ) + + @property + def text(self) -> Optional[str]: + """Returns the concatenation of all text parts in the response.""" + if ( + not self.candidates + or not self.candidates[0].content + or not self.candidates[0].content.parts + ): + return None + if len(self.candidates) > 1: + logging.warning( + f'there are {len(self.candidates)} candidates, returning text from' + ' the first candidate.Access response.candidates directly to get' + ' text from other candidates.' + ) + text = '' + any_text_part_text = False + for part in self.candidates[0].content.parts: + for field_name, field_value in part.dict( + exclude={'text', 'thought'} + ).items(): + if field_value is not None: + raise ValueError( + 'GenerateContentResponse.text only supports text parts, but got' + f' {field_name} part{part}' + ) + if isinstance(part.text, str): + if isinstance(part.thought, bool) and part.thought: + continue + any_text_part_text = True + text += part.text + # part.text == '' is different from part.text is None + return text if any_text_part_text else None + + @property + def function_calls(self) -> Optional[list[FunctionCall]]: + """Returns the list of function calls in the response.""" + if ( + not self.candidates + or not self.candidates[0].content + or not self.candidates[0].content.parts + ): + return None + if len(self.candidates) > 1: + logging.warning( + 'Warning: there are multiple candidates in the response, returning' + ' function calls from the first one.' + ) + function_calls = [ + part.function_call + for part in self.candidates[0].content.parts + if part.function_call is not None + ] + + return function_calls if function_calls else None + + @classmethod + def _from_response( + cls, response: dict[str, object], kwargs: dict[str, object] + ): + result = super()._from_response(response, kwargs) + + # Handles response schema. + response_schema = _common.get_value_by_path( + kwargs, ['config', 'response_schema'] + ) + if inspect.isclass(response_schema) and issubclass( + response_schema, pydantic.BaseModel + ): + # Pydantic schema. + try: + result.parsed = response_schema.model_validate_json(result.text) + # may not be a valid json per stream response + except pydantic.ValidationError: + pass + + elif isinstance(response_schema, GenericAlias) and issubclass( + response_schema.__args__[0], pydantic.BaseModel + ): + # Handle cases where `list[pydantic.BaseModel]` was provided. + result.parsed = [] + pydantic_model_class = response_schema.__args__[0] + response_list_json = json.loads(result.text) + for json_instance in response_list_json: + try: + pydantic_model_instance = pydantic_model_class.model_validate_json( + json.dumps(json_instance) + ) + result.parsed.append(pydantic_model_instance) + # may not be a valid json per stream response + except pydantic.ValidationError: + pass + + elif isinstance(response_schema, dict) or isinstance( + response_schema, pydantic.BaseModel + ): + # JSON schema. + try: + result.parsed = json.loads(result.text) + # may not be a valid json per stream response + except json.decoder.JSONDecodeError: + pass + + return result + + +class GenerateContentResponseDict(TypedDict, total=False): + """Response message for PredictionService.GenerateContent.""" + + candidates: Optional[list[CandidateDict]] + """Response variations returned by the model. + """ + + model_version: Optional[str] + """Output only. The model version used to generate the response.""" + + prompt_feedback: Optional[GenerateContentResponsePromptFeedbackDict] + """Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations.""" + + usage_metadata: Optional[GenerateContentResponseUsageMetadataDict] + """Usage metadata about the response(s).""" + + +GenerateContentResponseOrDict = Union[ + GenerateContentResponse, GenerateContentResponseDict +] + + +class EmbedContentConfig(_common.BaseModel): + """Class for configuring optional parameters for the embed_content method.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + task_type: Optional[str] = Field( + default=None, + description="""Type of task for which the embedding will be used. + """, + ) + title: Optional[str] = Field( + default=None, + description="""Title for the text. Only applicable when TaskType is + `RETRIEVAL_DOCUMENT`. + """, + ) + output_dimensionality: Optional[int] = Field( + default=None, + description="""Reduced dimension for the output embedding. If set, + excessive values in the output embedding are truncated from the end. + Supported by newer models since 2024 only. You cannot set this value if + using the earlier model (`models/embedding-001`). + """, + ) + mime_type: Optional[str] = Field( + default=None, + description="""Vertex API only. The MIME type of the input. + """, + ) + auto_truncate: Optional[bool] = Field( + default=None, + description="""Vertex API only. Whether to silently truncate inputs longer than + the max sequence length. If this option is set to false, oversized inputs + will lead to an INVALID_ARGUMENT error, similar to other text APIs. + """, + ) + + +class EmbedContentConfigDict(TypedDict, total=False): + """Class for configuring optional parameters for the embed_content method.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + task_type: Optional[str] + """Type of task for which the embedding will be used. + """ + + title: Optional[str] + """Title for the text. Only applicable when TaskType is + `RETRIEVAL_DOCUMENT`. + """ + + output_dimensionality: Optional[int] + """Reduced dimension for the output embedding. If set, + excessive values in the output embedding are truncated from the end. + Supported by newer models since 2024 only. You cannot set this value if + using the earlier model (`models/embedding-001`). + """ + + mime_type: Optional[str] + """Vertex API only. The MIME type of the input. + """ + + auto_truncate: Optional[bool] + """Vertex API only. Whether to silently truncate inputs longer than + the max sequence length. If this option is set to false, oversized inputs + will lead to an INVALID_ARGUMENT error, similar to other text APIs. + """ + + +EmbedContentConfigOrDict = Union[EmbedContentConfig, EmbedContentConfigDict] + + +class _EmbedContentParameters(_common.BaseModel): + """Parameters for the embed_content method.""" + + model: Optional[str] = Field( + default=None, + description="""ID of the model to use. For a list of models, see `Google models + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""", + ) + contents: Optional[ContentListUnion] = Field( + default=None, + description="""The content to embed. Only the `parts.text` fields will be counted. + """, + ) + config: Optional[EmbedContentConfig] = Field( + default=None, + description="""Configuration that contains optional parameters. + """, + ) + + +class _EmbedContentParametersDict(TypedDict, total=False): + """Parameters for the embed_content method.""" + + model: Optional[str] + """ID of the model to use. For a list of models, see `Google models + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""" + + contents: Optional[ContentListUnionDict] + """The content to embed. Only the `parts.text` fields will be counted. + """ + + config: Optional[EmbedContentConfigDict] + """Configuration that contains optional parameters. + """ + + +_EmbedContentParametersOrDict = Union[ + _EmbedContentParameters, _EmbedContentParametersDict +] + + +class ContentEmbeddingStatistics(_common.BaseModel): + """Statistics of the input text associated with the result of content embedding.""" + + truncated: Optional[bool] = Field( + default=None, + description="""Vertex API only. If the input text was truncated due to having + a length longer than the allowed maximum input. + """, + ) + token_count: Optional[float] = Field( + default=None, + description="""Vertex API only. Number of tokens of the input text. + """, + ) + + +class ContentEmbeddingStatisticsDict(TypedDict, total=False): + """Statistics of the input text associated with the result of content embedding.""" + + truncated: Optional[bool] + """Vertex API only. If the input text was truncated due to having + a length longer than the allowed maximum input. + """ + + token_count: Optional[float] + """Vertex API only. Number of tokens of the input text. + """ + + +ContentEmbeddingStatisticsOrDict = Union[ + ContentEmbeddingStatistics, ContentEmbeddingStatisticsDict +] + + +class ContentEmbedding(_common.BaseModel): + """The embedding generated from an input content.""" + + values: Optional[list[float]] = Field( + default=None, + description="""A list of floats representing an embedding. + """, + ) + statistics: Optional[ContentEmbeddingStatistics] = Field( + default=None, + description="""Vertex API only. Statistics of the input text associated with this + embedding. + """, + ) + + +class ContentEmbeddingDict(TypedDict, total=False): + """The embedding generated from an input content.""" + + values: Optional[list[float]] + """A list of floats representing an embedding. + """ + + statistics: Optional[ContentEmbeddingStatisticsDict] + """Vertex API only. Statistics of the input text associated with this + embedding. + """ + + +ContentEmbeddingOrDict = Union[ContentEmbedding, ContentEmbeddingDict] + + +class EmbedContentMetadata(_common.BaseModel): + """Request-level metadata for the Vertex Embed Content API.""" + + billable_character_count: Optional[int] = Field( + default=None, + description="""Vertex API only. The total number of billable characters included + in the request. + """, + ) + + +class EmbedContentMetadataDict(TypedDict, total=False): + """Request-level metadata for the Vertex Embed Content API.""" + + billable_character_count: Optional[int] + """Vertex API only. The total number of billable characters included + in the request. + """ + + +EmbedContentMetadataOrDict = Union[ + EmbedContentMetadata, EmbedContentMetadataDict +] + + +class EmbedContentResponse(_common.BaseModel): + """Response for the embed_content method.""" + + embeddings: Optional[list[ContentEmbedding]] = Field( + default=None, + description="""The embeddings for each request, in the same order as provided in + the batch request. + """, + ) + metadata: Optional[EmbedContentMetadata] = Field( + default=None, + description="""Vertex API only. Metadata about the request. + """, + ) + + +class EmbedContentResponseDict(TypedDict, total=False): + """Response for the embed_content method.""" + + embeddings: Optional[list[ContentEmbeddingDict]] + """The embeddings for each request, in the same order as provided in + the batch request. + """ + + metadata: Optional[EmbedContentMetadataDict] + """Vertex API only. Metadata about the request. + """ + + +EmbedContentResponseOrDict = Union[ + EmbedContentResponse, EmbedContentResponseDict +] + + +class GenerateImageConfig(_common.BaseModel): + """Class that represents the config for generating an image.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + output_gcs_uri: Optional[str] = Field( + default=None, + description="""Cloud Storage URI used to store the generated images. + """, + ) + negative_prompt: Optional[str] = Field( + default=None, + description="""Description of what to discourage in the generated images. + """, + ) + number_of_images: Optional[int] = Field( + default=None, + description="""Number of images to generate. + """, + ) + guidance_scale: Optional[float] = Field( + default=None, + description="""Controls how much the model adheres to the text prompt. Large + values increase output and prompt alignment, but may compromise image + quality. + """, + ) + seed: Optional[int] = Field( + default=None, + description="""Random seed for image generation. This is not available when + ``add_watermark`` is set to true. + """, + ) + safety_filter_level: Optional[SafetyFilterLevel] = Field( + default=None, + description="""Filter level for safety filtering. + """, + ) + person_generation: Optional[PersonGeneration] = Field( + default=None, + description="""Allows generation of people by the model. + """, + ) + include_safety_attributes: Optional[bool] = Field( + default=None, + description="""Whether to report the safety scores of each image in the response. + """, + ) + include_rai_reason: Optional[bool] = Field( + default=None, + description="""Whether to include the Responsible AI filter reason if the image + is filtered out of the response. + """, + ) + language: Optional[ImagePromptLanguage] = Field( + default=None, + description="""Language of the text in the prompt. + """, + ) + output_mime_type: Optional[str] = Field( + default=None, + description="""MIME type of the generated image. + """, + ) + output_compression_quality: Optional[int] = Field( + default=None, + description="""Compression quality of the generated image (for ``image/jpeg`` + only). + """, + ) + add_watermark: Optional[bool] = Field( + default=None, + description="""Whether to add a watermark to the generated image. + """, + ) + aspect_ratio: Optional[str] = Field( + default=None, + description="""Aspect ratio of the generated image. + """, + ) + + +class GenerateImageConfigDict(TypedDict, total=False): + """Class that represents the config for generating an image.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + output_gcs_uri: Optional[str] + """Cloud Storage URI used to store the generated images. + """ + + negative_prompt: Optional[str] + """Description of what to discourage in the generated images. + """ + + number_of_images: Optional[int] + """Number of images to generate. + """ + + guidance_scale: Optional[float] + """Controls how much the model adheres to the text prompt. Large + values increase output and prompt alignment, but may compromise image + quality. + """ + + seed: Optional[int] + """Random seed for image generation. This is not available when + ``add_watermark`` is set to true. + """ + + safety_filter_level: Optional[SafetyFilterLevel] + """Filter level for safety filtering. + """ + + person_generation: Optional[PersonGeneration] + """Allows generation of people by the model. + """ + + include_safety_attributes: Optional[bool] + """Whether to report the safety scores of each image in the response. + """ + + include_rai_reason: Optional[bool] + """Whether to include the Responsible AI filter reason if the image + is filtered out of the response. + """ + + language: Optional[ImagePromptLanguage] + """Language of the text in the prompt. + """ + + output_mime_type: Optional[str] + """MIME type of the generated image. + """ + + output_compression_quality: Optional[int] + """Compression quality of the generated image (for ``image/jpeg`` + only). + """ + + add_watermark: Optional[bool] + """Whether to add a watermark to the generated image. + """ + + aspect_ratio: Optional[str] + """Aspect ratio of the generated image. + """ + + +GenerateImageConfigOrDict = Union[GenerateImageConfig, GenerateImageConfigDict] + + +class _GenerateImageParameters(_common.BaseModel): + """Class that represents the parameters for generating an image.""" + + model: Optional[str] = Field( + default=None, + description="""ID of the model to use. For a list of models, see `Google models + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""", + ) + prompt: Optional[str] = Field( + default=None, + description="""Text prompt that typically describes the image to output. + """, + ) + config: Optional[GenerateImageConfig] = Field( + default=None, + description="""Configuration for generating an image. + """, + ) + + +class _GenerateImageParametersDict(TypedDict, total=False): + """Class that represents the parameters for generating an image.""" + + model: Optional[str] + """ID of the model to use. For a list of models, see `Google models + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""" + + prompt: Optional[str] + """Text prompt that typically describes the image to output. + """ + + config: Optional[GenerateImageConfigDict] + """Configuration for generating an image. + """ + + +_GenerateImageParametersOrDict = Union[ + _GenerateImageParameters, _GenerateImageParametersDict +] + + +class Image(_common.BaseModel): + """Class that represents an image.""" + + gcs_uri: Optional[str] = Field( + default=None, + description="""The Cloud Storage URI of the image. ``Image`` can contain a value + for this field or the ``image_bytes`` field but not both. + """, + ) + image_bytes: Optional[bytes] = Field( + default=None, + description="""The image bytes data. ``Image`` can contain a value for this field + or the ``gcs_uri`` field but not both. + """, + ) + + _loaded_image = None + + """Image.""" + + @staticmethod + def from_file(location: str) -> 'Image': + """Lazy-loads an image from a local file or Google Cloud Storage. + + Args: + location: The local path or Google Cloud Storage URI from which to load + the image. + + Returns: + A loaded image as an `Image` object. + """ + import urllib + import pathlib + + parsed_url = urllib.parse.urlparse(location) + if ( + parsed_url.scheme == 'https' + and parsed_url.netloc == 'storage.googleapis.com' + ): + parsed_url = parsed_url._replace( + scheme='gs', + netloc='', + path=f'/{urllib.parse.unquote(parsed_url.path)}', + ) + location = urllib.parse.urlunparse(parsed_url) + + if parsed_url.scheme == 'gs': + return Image(gcs_uri=location) + + # Load image from local path + image_bytes = pathlib.Path(location).read_bytes() + image = Image(image_bytes=image_bytes) + return image + + def show(self): + """Shows the image. + + This method only works in a notebook environment. + """ + try: + from IPython import display as IPython_display + except ImportError: + IPython_display = None + + try: + from PIL import Image as PIL_Image + except ImportError: + PIL_Image = None + if PIL_Image and IPython_display: + IPython_display.display(self._pil_image) + + @property + def _pil_image(self) -> 'PIL_Image.Image': + try: + from PIL import Image as PIL_Image + except ImportError: + PIL_Image = None + import io + + if self._loaded_image is None: + if not PIL_Image: + raise RuntimeError( + 'The PIL module is not available. Please install the Pillow' + ' package.' + ) + self._loaded_image = PIL_Image.open(io.BytesIO(self.image_bytes)) + return self._loaded_image + + def save(self, location: str): + """Saves the image to a file. + + Args: + location: Local path where to save the image. + """ + import pathlib + + pathlib.Path(location).write_bytes(self.image_bytes) + + +JOB_STATES_SUCCEEDED_VERTEX = [ + 'JOB_STATE_SUCCEEDED', +] + +JOB_STATES_SUCCEEDED_MLDEV = [ + 'ACTIVE', +] + +JOB_STATES_SUCCEEDED = JOB_STATES_SUCCEEDED_VERTEX + JOB_STATES_SUCCEEDED_MLDEV + + +JOB_STATES_ENDED_VERTEX = [ + 'JOB_STATE_SUCCEEDED', + 'JOB_STATE_FAILED', + 'JOB_STATE_CANCELLED', + 'JOB_STATE_EXPIRED', +] + +JOB_STATES_ENDED_MLDEV = [ + 'ACTIVE', + 'FAILED', +] + +JOB_STATES_ENDED = JOB_STATES_ENDED_VERTEX + JOB_STATES_ENDED_MLDEV + + +class ImageDict(TypedDict, total=False): + """Class that represents an image.""" + + gcs_uri: Optional[str] + """The Cloud Storage URI of the image. ``Image`` can contain a value + for this field or the ``image_bytes`` field but not both. + """ + + image_bytes: Optional[bytes] + """The image bytes data. ``Image`` can contain a value for this field + or the ``gcs_uri`` field but not both. + """ + + +ImageOrDict = Union[Image, ImageDict] + + +class GeneratedImage(_common.BaseModel): + """Class that represents an output image.""" + + image: Optional[Image] = Field( + default=None, + description="""The output image data. + """, + ) + rai_filtered_reason: Optional[str] = Field( + default=None, + description="""Responsible AI filter reason if the image is filtered out of the + response. + """, + ) + + +class GeneratedImageDict(TypedDict, total=False): + """Class that represents an output image.""" + + image: Optional[ImageDict] + """The output image data. + """ + + rai_filtered_reason: Optional[str] + """Responsible AI filter reason if the image is filtered out of the + response. + """ + + +GeneratedImageOrDict = Union[GeneratedImage, GeneratedImageDict] + + +class GenerateImageResponse(_common.BaseModel): + """Class that represents the output image response.""" + + generated_images: Optional[list[GeneratedImage]] = Field( + default=None, + description="""List of generated images. + """, + ) + + +class GenerateImageResponseDict(TypedDict, total=False): + """Class that represents the output image response.""" + + generated_images: Optional[list[GeneratedImageDict]] + """List of generated images. + """ + + +GenerateImageResponseOrDict = Union[ + GenerateImageResponse, GenerateImageResponseDict +] + + +class MaskReferenceConfig(_common.BaseModel): + """Configuration for a Mask reference image.""" + + mask_mode: Optional[MaskReferenceMode] = Field( + default=None, + description="""Prompts the model to generate a mask instead of you needing to + provide one (unless MASK_MODE_USER_PROVIDED is used).""", + ) + segmentation_classes: Optional[list[int]] = Field( + default=None, + description="""A list of up to 5 class ids to use for semantic segmentation. + Automatically creates an image mask based on specific objects.""", + ) + mask_dilation: Optional[float] = Field( + default=None, + description="""Dilation percentage of the mask provided. + Float between 0 and 1.""", + ) + + +class MaskReferenceConfigDict(TypedDict, total=False): + """Configuration for a Mask reference image.""" + + mask_mode: Optional[MaskReferenceMode] + """Prompts the model to generate a mask instead of you needing to + provide one (unless MASK_MODE_USER_PROVIDED is used).""" + + segmentation_classes: Optional[list[int]] + """A list of up to 5 class ids to use for semantic segmentation. + Automatically creates an image mask based on specific objects.""" + + mask_dilation: Optional[float] + """Dilation percentage of the mask provided. + Float between 0 and 1.""" + + +MaskReferenceConfigOrDict = Union[MaskReferenceConfig, MaskReferenceConfigDict] + + +class ControlReferenceConfig(_common.BaseModel): + """Configuration for a Control reference image.""" + + control_type: Optional[ControlReferenceType] = Field( + default=None, + description="""The type of control reference image to use.""", + ) + enable_control_image_computation: Optional[bool] = Field( + default=None, + description="""Defaults to False. When set to True, the control image will be + computed by the model based on the control type. When set to False, + the control image must be provided by the user.""", + ) + + +class ControlReferenceConfigDict(TypedDict, total=False): + """Configuration for a Control reference image.""" + + control_type: Optional[ControlReferenceType] + """The type of control reference image to use.""" + + enable_control_image_computation: Optional[bool] + """Defaults to False. When set to True, the control image will be + computed by the model based on the control type. When set to False, + the control image must be provided by the user.""" + + +ControlReferenceConfigOrDict = Union[ + ControlReferenceConfig, ControlReferenceConfigDict +] + + +class StyleReferenceConfig(_common.BaseModel): + """Configuration for a Style reference image.""" + + style_description: Optional[str] = Field( + default=None, + description="""A text description of the style to use for the generated image.""", + ) + + +class StyleReferenceConfigDict(TypedDict, total=False): + """Configuration for a Style reference image.""" + + style_description: Optional[str] + """A text description of the style to use for the generated image.""" + + +StyleReferenceConfigOrDict = Union[ + StyleReferenceConfig, StyleReferenceConfigDict +] + + +class SubjectReferenceConfig(_common.BaseModel): + """Configuration for a Subject reference image.""" + + subject_type: Optional[SubjectReferenceType] = Field( + default=None, + description="""The subject type of a subject reference image.""", + ) + subject_description: Optional[str] = Field( + default=None, description="""Subject description for the image.""" + ) + + +class SubjectReferenceConfigDict(TypedDict, total=False): + """Configuration for a Subject reference image.""" + + subject_type: Optional[SubjectReferenceType] + """The subject type of a subject reference image.""" + + subject_description: Optional[str] + """Subject description for the image.""" + + +SubjectReferenceConfigOrDict = Union[ + SubjectReferenceConfig, SubjectReferenceConfigDict +] + + +class _ReferenceImageAPI(_common.BaseModel): + """Private class that represents a Reference image that is sent to API.""" + + reference_image: Optional[Image] = Field( + default=None, + description="""The reference image for the editing operation.""", + ) + reference_id: Optional[int] = Field( + default=None, description="""The id of the reference image.""" + ) + reference_type: Optional[str] = Field( + default=None, description="""The type of the reference image.""" + ) + mask_image_config: Optional[MaskReferenceConfig] = Field( + default=None, + description="""Configuration for the mask reference image.""", + ) + control_image_config: Optional[ControlReferenceConfig] = Field( + default=None, + description="""Configuration for the control reference image.""", + ) + style_image_config: Optional[StyleReferenceConfig] = Field( + default=None, + description="""Configuration for the style reference image.""", + ) + subject_image_config: Optional[SubjectReferenceConfig] = Field( + default=None, + description="""Configuration for the subject reference image.""", + ) + + +class _ReferenceImageAPIDict(TypedDict, total=False): + """Private class that represents a Reference image that is sent to API.""" + + reference_image: Optional[ImageDict] + """The reference image for the editing operation.""" + + reference_id: Optional[int] + """The id of the reference image.""" + + reference_type: Optional[str] + """The type of the reference image.""" + + mask_image_config: Optional[MaskReferenceConfigDict] + """Configuration for the mask reference image.""" + + control_image_config: Optional[ControlReferenceConfigDict] + """Configuration for the control reference image.""" + + style_image_config: Optional[StyleReferenceConfigDict] + """Configuration for the style reference image.""" + + subject_image_config: Optional[SubjectReferenceConfigDict] + """Configuration for the subject reference image.""" + + +_ReferenceImageAPIOrDict = Union[_ReferenceImageAPI, _ReferenceImageAPIDict] + + +class EditImageConfig(_common.BaseModel): + """Configuration for editing an image.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + output_gcs_uri: Optional[str] = Field( + default=None, + description="""Cloud Storage URI used to store the generated images. + """, + ) + negative_prompt: Optional[str] = Field( + default=None, + description="""Description of what to discourage in the generated images. + """, + ) + number_of_images: Optional[int] = Field( + default=None, + description="""Number of images to generate. + """, + ) + guidance_scale: Optional[float] = Field( + default=None, + description="""Controls how much the model adheres to the text prompt. Large + values increase output and prompt alignment, but may compromise image + quality. + """, + ) + seed: Optional[int] = Field( + default=None, + description="""Random seed for image generation. This is not available when + ``add_watermark`` is set to true. + """, + ) + safety_filter_level: Optional[SafetyFilterLevel] = Field( + default=None, + description="""Filter level for safety filtering. + """, + ) + person_generation: Optional[PersonGeneration] = Field( + default=None, + description="""Allows generation of people by the model. + """, + ) + include_safety_attributes: Optional[bool] = Field( + default=None, + description="""Whether to report the safety scores of each image in the response. + """, + ) + include_rai_reason: Optional[bool] = Field( + default=None, + description="""Whether to include the Responsible AI filter reason if the image + is filtered out of the response. + """, + ) + language: Optional[ImagePromptLanguage] = Field( + default=None, + description="""Language of the text in the prompt. + """, + ) + output_mime_type: Optional[str] = Field( + default=None, + description="""MIME type of the generated image. + """, + ) + output_compression_quality: Optional[int] = Field( + default=None, + description="""Compression quality of the generated image (for ``image/jpeg`` + only). + """, + ) + edit_mode: Optional[EditMode] = Field( + default=None, + description="""Describes the editing mode for the request.""", + ) + + +class EditImageConfigDict(TypedDict, total=False): + """Configuration for editing an image.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + output_gcs_uri: Optional[str] + """Cloud Storage URI used to store the generated images. + """ + + negative_prompt: Optional[str] + """Description of what to discourage in the generated images. + """ + + number_of_images: Optional[int] + """Number of images to generate. + """ + + guidance_scale: Optional[float] + """Controls how much the model adheres to the text prompt. Large + values increase output and prompt alignment, but may compromise image + quality. + """ + + seed: Optional[int] + """Random seed for image generation. This is not available when + ``add_watermark`` is set to true. + """ + + safety_filter_level: Optional[SafetyFilterLevel] + """Filter level for safety filtering. + """ + + person_generation: Optional[PersonGeneration] + """Allows generation of people by the model. + """ + + include_safety_attributes: Optional[bool] + """Whether to report the safety scores of each image in the response. + """ + + include_rai_reason: Optional[bool] + """Whether to include the Responsible AI filter reason if the image + is filtered out of the response. + """ + + language: Optional[ImagePromptLanguage] + """Language of the text in the prompt. + """ + + output_mime_type: Optional[str] + """MIME type of the generated image. + """ + + output_compression_quality: Optional[int] + """Compression quality of the generated image (for ``image/jpeg`` + only). + """ + + edit_mode: Optional[EditMode] + """Describes the editing mode for the request.""" + + +EditImageConfigOrDict = Union[EditImageConfig, EditImageConfigDict] + + +class _EditImageParameters(_common.BaseModel): + """Parameters for the request to edit an image.""" + + model: Optional[str] = Field( + default=None, description="""The model to use.""" + ) + prompt: Optional[str] = Field( + default=None, + description="""A text description of the edit to apply to the image.""", + ) + reference_images: Optional[list[_ReferenceImageAPI]] = Field( + default=None, description="""The reference images for Imagen 3 editing.""" + ) + config: Optional[EditImageConfig] = Field( + default=None, description="""Configuration for editing.""" + ) + + +class _EditImageParametersDict(TypedDict, total=False): + """Parameters for the request to edit an image.""" + + model: Optional[str] + """The model to use.""" + + prompt: Optional[str] + """A text description of the edit to apply to the image.""" + + reference_images: Optional[list[_ReferenceImageAPIDict]] + """The reference images for Imagen 3 editing.""" + + config: Optional[EditImageConfigDict] + """Configuration for editing.""" + + +_EditImageParametersOrDict = Union[ + _EditImageParameters, _EditImageParametersDict +] + + +class EditImageResponse(_common.BaseModel): + """Response for the request to edit an image.""" + + generated_images: Optional[list[GeneratedImage]] = Field( + default=None, description="""Generated images.""" + ) + + +class EditImageResponseDict(TypedDict, total=False): + """Response for the request to edit an image.""" + + generated_images: Optional[list[GeneratedImageDict]] + """Generated images.""" + + +EditImageResponseOrDict = Union[EditImageResponse, EditImageResponseDict] + + +class _UpscaleImageAPIConfig(_common.BaseModel): + """API config for UpscaleImage with fields not exposed to users. + + These fields require default values sent to the API which are not intended + to be modifiable or exposed to users in the SDK method. + """ + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + include_rai_reason: Optional[bool] = Field( + default=None, + description="""Whether to include a reason for filtered-out images in the + response.""", + ) + output_mime_type: Optional[str] = Field( + default=None, + description="""The image format that the output should be saved as.""", + ) + output_compression_quality: Optional[int] = Field( + default=None, + description="""The level of compression if the ``output_mime_type`` is + ``image/jpeg``.""", + ) + number_of_images: Optional[int] = Field(default=None, description="""""") + mode: Optional[str] = Field(default=None, description="""""") + + +class _UpscaleImageAPIConfigDict(TypedDict, total=False): + """API config for UpscaleImage with fields not exposed to users. + + These fields require default values sent to the API which are not intended + to be modifiable or exposed to users in the SDK method. + """ + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + include_rai_reason: Optional[bool] + """Whether to include a reason for filtered-out images in the + response.""" + + output_mime_type: Optional[str] + """The image format that the output should be saved as.""" + + output_compression_quality: Optional[int] + """The level of compression if the ``output_mime_type`` is + ``image/jpeg``.""" + + number_of_images: Optional[int] + """""" + + mode: Optional[str] + """""" + + +_UpscaleImageAPIConfigOrDict = Union[ + _UpscaleImageAPIConfig, _UpscaleImageAPIConfigDict +] + + +class _UpscaleImageAPIParameters(_common.BaseModel): + """API parameters for UpscaleImage.""" + + model: Optional[str] = Field( + default=None, description="""The model to use.""" + ) + image: Optional[Image] = Field( + default=None, description="""The input image to upscale.""" + ) + upscale_factor: Optional[str] = Field( + default=None, + description="""The factor to upscale the image (x2 or x4).""", + ) + config: Optional[_UpscaleImageAPIConfig] = Field( + default=None, description="""Configuration for upscaling.""" + ) + + +class _UpscaleImageAPIParametersDict(TypedDict, total=False): + """API parameters for UpscaleImage.""" + + model: Optional[str] + """The model to use.""" + + image: Optional[ImageDict] + """The input image to upscale.""" + + upscale_factor: Optional[str] + """The factor to upscale the image (x2 or x4).""" + + config: Optional[_UpscaleImageAPIConfigDict] + """Configuration for upscaling.""" + + +_UpscaleImageAPIParametersOrDict = Union[ + _UpscaleImageAPIParameters, _UpscaleImageAPIParametersDict +] + + +class UpscaleImageResponse(_common.BaseModel): + + generated_images: Optional[list[GeneratedImage]] = Field( + default=None, description="""Generated images.""" + ) + + +class UpscaleImageResponseDict(TypedDict, total=False): + + generated_images: Optional[list[GeneratedImageDict]] + """Generated images.""" + + +UpscaleImageResponseOrDict = Union[ + UpscaleImageResponse, UpscaleImageResponseDict +] + + +class _GetModelParameters(_common.BaseModel): + + model: Optional[str] = Field(default=None, description="""""") + + +class _GetModelParametersDict(TypedDict, total=False): + + model: Optional[str] + """""" + + +_GetModelParametersOrDict = Union[_GetModelParameters, _GetModelParametersDict] + + +class Endpoint(_common.BaseModel): + """An endpoint where you deploy models.""" + + name: Optional[str] = Field( + default=None, description="""Resource name of the endpoint.""" + ) + deployed_model_id: Optional[str] = Field( + default=None, + description="""ID of the model that's deployed to the endpoint.""", + ) + + +class EndpointDict(TypedDict, total=False): + """An endpoint where you deploy models.""" + + name: Optional[str] + """Resource name of the endpoint.""" + + deployed_model_id: Optional[str] + """ID of the model that's deployed to the endpoint.""" + + +EndpointOrDict = Union[Endpoint, EndpointDict] + + +class TunedModelInfo(_common.BaseModel): + """A tuned machine learning model.""" + + base_model: Optional[str] = Field( + default=None, + description="""ID of the base model that you want to tune.""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, + description="""Date and time when the base model was created.""", + ) + update_time: Optional[datetime.datetime] = Field( + default=None, + description="""Date and time when the base model was last updated.""", + ) + + +class TunedModelInfoDict(TypedDict, total=False): + """A tuned machine learning model.""" + + base_model: Optional[str] + """ID of the base model that you want to tune.""" + + create_time: Optional[datetime.datetime] + """Date and time when the base model was created.""" + + update_time: Optional[datetime.datetime] + """Date and time when the base model was last updated.""" + + +TunedModelInfoOrDict = Union[TunedModelInfo, TunedModelInfoDict] + + +class Model(_common.BaseModel): + """A trained machine learning model.""" + + name: Optional[str] = Field( + default=None, description="""Resource name of the model.""" + ) + display_name: Optional[str] = Field( + default=None, description="""Display name of the model.""" + ) + description: Optional[str] = Field( + default=None, description="""Description of the model.""" + ) + version: Optional[str] = Field( + default=None, + description="""Version ID of the model. A new version is committed when a new + model version is uploaded or trained under an existing model ID. The + version ID is an auto-incrementing decimal number in string + representation.""", + ) + endpoints: Optional[list[Endpoint]] = Field( + default=None, + description="""List of deployed models created from this base model. Note that a + model could have been deployed to endpoints in different locations.""", + ) + labels: Optional[dict[str, str]] = Field( + default=None, + description="""Labels with user-defined metadata to organize your models.""", + ) + tuned_model_info: Optional[TunedModelInfo] = Field( + default=None, + description="""Information about the tuned model from the base model.""", + ) + input_token_limit: Optional[int] = Field( + default=None, + description="""The maximum number of input tokens that the model can handle.""", + ) + output_token_limit: Optional[int] = Field( + default=None, + description="""The maximum number of output tokens that the model can generate.""", + ) + supported_actions: Optional[list[str]] = Field( + default=None, + description="""List of actions that are supported by the model.""", + ) + + +class ModelDict(TypedDict, total=False): + """A trained machine learning model.""" + + name: Optional[str] + """Resource name of the model.""" + + display_name: Optional[str] + """Display name of the model.""" + + description: Optional[str] + """Description of the model.""" + + version: Optional[str] + """Version ID of the model. A new version is committed when a new + model version is uploaded or trained under an existing model ID. The + version ID is an auto-incrementing decimal number in string + representation.""" + + endpoints: Optional[list[EndpointDict]] + """List of deployed models created from this base model. Note that a + model could have been deployed to endpoints in different locations.""" + + labels: Optional[dict[str, str]] + """Labels with user-defined metadata to organize your models.""" + + tuned_model_info: Optional[TunedModelInfoDict] + """Information about the tuned model from the base model.""" + + input_token_limit: Optional[int] + """The maximum number of input tokens that the model can handle.""" + + output_token_limit: Optional[int] + """The maximum number of output tokens that the model can generate.""" + + supported_actions: Optional[list[str]] + """List of actions that are supported by the model.""" + + +ModelOrDict = Union[Model, ModelDict] + + +class ListModelsConfig(_common.BaseModel): + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + page_size: Optional[int] = Field(default=None, description="""""") + page_token: Optional[str] = Field(default=None, description="""""") + filter: Optional[str] = Field(default=None, description="""""") + query_base: Optional[bool] = Field( + default=None, + description="""Set true to list base models, false to list tuned models.""", + ) + + +class ListModelsConfigDict(TypedDict, total=False): + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + page_size: Optional[int] + """""" + + page_token: Optional[str] + """""" + + filter: Optional[str] + """""" + + query_base: Optional[bool] + """Set true to list base models, false to list tuned models.""" + + +ListModelsConfigOrDict = Union[ListModelsConfig, ListModelsConfigDict] + + +class _ListModelsParameters(_common.BaseModel): + + config: Optional[ListModelsConfig] = Field(default=None, description="""""") + + +class _ListModelsParametersDict(TypedDict, total=False): + + config: Optional[ListModelsConfigDict] + """""" + + +_ListModelsParametersOrDict = Union[ + _ListModelsParameters, _ListModelsParametersDict +] + + +class ListModelsResponse(_common.BaseModel): + + next_page_token: Optional[str] = Field(default=None, description="""""") + models: Optional[list[Model]] = Field(default=None, description="""""") + + +class ListModelsResponseDict(TypedDict, total=False): + + next_page_token: Optional[str] + """""" + + models: Optional[list[ModelDict]] + """""" + + +ListModelsResponseOrDict = Union[ListModelsResponse, ListModelsResponseDict] + + +class UpdateModelConfig(_common.BaseModel): + + display_name: Optional[str] = Field(default=None, description="""""") + description: Optional[str] = Field(default=None, description="""""") + + +class UpdateModelConfigDict(TypedDict, total=False): + + display_name: Optional[str] + """""" + + description: Optional[str] + """""" + + +UpdateModelConfigOrDict = Union[UpdateModelConfig, UpdateModelConfigDict] + + +class _UpdateModelParameters(_common.BaseModel): + + model: Optional[str] = Field(default=None, description="""""") + config: Optional[UpdateModelConfig] = Field(default=None, description="""""") + + +class _UpdateModelParametersDict(TypedDict, total=False): + + model: Optional[str] + """""" + + config: Optional[UpdateModelConfigDict] + """""" + + +_UpdateModelParametersOrDict = Union[ + _UpdateModelParameters, _UpdateModelParametersDict +] + + +class _DeleteModelParameters(_common.BaseModel): + + model: Optional[str] = Field(default=None, description="""""") + + +class _DeleteModelParametersDict(TypedDict, total=False): + + model: Optional[str] + """""" + + +_DeleteModelParametersOrDict = Union[ + _DeleteModelParameters, _DeleteModelParametersDict +] + + +class DeleteModelResponse(_common.BaseModel): + + pass + + +class DeleteModelResponseDict(TypedDict, total=False): + + pass + + +DeleteModelResponseOrDict = Union[DeleteModelResponse, DeleteModelResponseDict] + + +class GenerationConfig(_common.BaseModel): + """Generation config.""" + + audio_timestamp: Optional[bool] = Field( + default=None, + description="""Optional. If enabled, audio timestamp will be included in the request to the model.""", + ) + candidate_count: Optional[int] = Field( + default=None, + description="""Optional. Number of candidates to generate.""", + ) + frequency_penalty: Optional[float] = Field( + default=None, description="""Optional. Frequency penalties.""" + ) + logprobs: Optional[int] = Field( + default=None, description="""Optional. Logit probabilities.""" + ) + max_output_tokens: Optional[int] = Field( + default=None, + description="""Optional. The maximum number of output tokens to generate per message.""", + ) + presence_penalty: Optional[float] = Field( + default=None, description="""Optional. Positive penalties.""" + ) + response_logprobs: Optional[bool] = Field( + default=None, + description="""Optional. If true, export the logprobs results in response.""", + ) + response_mime_type: Optional[str] = Field( + default=None, + description="""Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature.""", + ) + response_schema: Optional[Schema] = Field( + default=None, + description="""Optional. The `Schema` object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: Schema for JSON response.""", + ) + routing_config: Optional[GenerationConfigRoutingConfig] = Field( + default=None, description="""Optional. Routing configuration.""" + ) + seed: Optional[int] = Field(default=None, description="""Optional. Seed.""") + stop_sequences: Optional[list[str]] = Field( + default=None, description="""Optional. Stop sequences.""" + ) + temperature: Optional[float] = Field( + default=None, + description="""Optional. Controls the randomness of predictions.""", + ) + top_k: Optional[float] = Field( + default=None, + description="""Optional. If specified, top-k sampling will be used.""", + ) + top_p: Optional[float] = Field( + default=None, + description="""Optional. If specified, nucleus sampling will be used.""", + ) + + +class GenerationConfigDict(TypedDict, total=False): + """Generation config.""" + + audio_timestamp: Optional[bool] + """Optional. If enabled, audio timestamp will be included in the request to the model.""" + + candidate_count: Optional[int] + """Optional. Number of candidates to generate.""" + + frequency_penalty: Optional[float] + """Optional. Frequency penalties.""" + + logprobs: Optional[int] + """Optional. Logit probabilities.""" + + max_output_tokens: Optional[int] + """Optional. The maximum number of output tokens to generate per message.""" + + presence_penalty: Optional[float] + """Optional. Positive penalties.""" + + response_logprobs: Optional[bool] + """Optional. If true, export the logprobs results in response.""" + + response_mime_type: Optional[str] + """Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature.""" + + response_schema: Optional[SchemaDict] + """Optional. The `Schema` object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: Schema for JSON response.""" + + routing_config: Optional[GenerationConfigRoutingConfigDict] + """Optional. Routing configuration.""" + + seed: Optional[int] + """Optional. Seed.""" + + stop_sequences: Optional[list[str]] + """Optional. Stop sequences.""" + + temperature: Optional[float] + """Optional. Controls the randomness of predictions.""" + + top_k: Optional[float] + """Optional. If specified, top-k sampling will be used.""" + + top_p: Optional[float] + """Optional. If specified, nucleus sampling will be used.""" + + +GenerationConfigOrDict = Union[GenerationConfig, GenerationConfigDict] + + +class CountTokensConfig(_common.BaseModel): + """Config for the count_tokens method.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + system_instruction: Optional[ContentUnion] = Field( + default=None, + description="""Instructions for the model to steer it toward better performance. + """, + ) + tools: Optional[list[Tool]] = Field( + default=None, + description="""Code that enables the system to interact with external systems to + perform an action outside of the knowledge and scope of the model. + """, + ) + generation_config: Optional[GenerationConfig] = Field( + default=None, + description="""Configuration that the model uses to generate the response. Not + supported by the Gemini Developer API. + """, + ) + + +class CountTokensConfigDict(TypedDict, total=False): + """Config for the count_tokens method.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + system_instruction: Optional[ContentUnionDict] + """Instructions for the model to steer it toward better performance. + """ + + tools: Optional[list[ToolDict]] + """Code that enables the system to interact with external systems to + perform an action outside of the knowledge and scope of the model. + """ + + generation_config: Optional[GenerationConfigDict] + """Configuration that the model uses to generate the response. Not + supported by the Gemini Developer API. + """ + + +CountTokensConfigOrDict = Union[CountTokensConfig, CountTokensConfigDict] + + +class _CountTokensParameters(_common.BaseModel): + """Parameters for counting tokens.""" + + model: Optional[str] = Field( + default=None, + description="""ID of the model to use. For a list of models, see `Google models + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""", + ) + contents: Optional[ContentListUnion] = Field( + default=None, description="""Input content.""" + ) + config: Optional[CountTokensConfig] = Field( + default=None, description="""Configuration for counting tokens.""" + ) + + +class _CountTokensParametersDict(TypedDict, total=False): + """Parameters for counting tokens.""" + + model: Optional[str] + """ID of the model to use. For a list of models, see `Google models + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""" + + contents: Optional[ContentListUnionDict] + """Input content.""" + + config: Optional[CountTokensConfigDict] + """Configuration for counting tokens.""" + + +_CountTokensParametersOrDict = Union[ + _CountTokensParameters, _CountTokensParametersDict +] + + +class CountTokensResponse(_common.BaseModel): + """Response for counting tokens.""" + + total_tokens: Optional[int] = Field( + default=None, description="""Total number of tokens.""" + ) + cached_content_token_count: Optional[int] = Field( + default=None, + description="""Number of tokens in the cached part of the prompt (the cached content).""", + ) + + +class CountTokensResponseDict(TypedDict, total=False): + """Response for counting tokens.""" + + total_tokens: Optional[int] + """Total number of tokens.""" + + cached_content_token_count: Optional[int] + """Number of tokens in the cached part of the prompt (the cached content).""" + + +CountTokensResponseOrDict = Union[CountTokensResponse, CountTokensResponseDict] + + +class ComputeTokensConfig(_common.BaseModel): + """Optional parameters for computing tokens.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class ComputeTokensConfigDict(TypedDict, total=False): + """Optional parameters for computing tokens.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + +ComputeTokensConfigOrDict = Union[ComputeTokensConfig, ComputeTokensConfigDict] + + +class _ComputeTokensParameters(_common.BaseModel): + """Parameters for computing tokens.""" + + model: Optional[str] = Field( + default=None, + description="""ID of the model to use. For a list of models, see `Google models + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""", + ) + contents: Optional[ContentListUnion] = Field( + default=None, description="""Input content.""" + ) + config: Optional[ComputeTokensConfig] = Field( + default=None, + description="""Optional parameters for the request. + """, + ) + + +class _ComputeTokensParametersDict(TypedDict, total=False): + """Parameters for computing tokens.""" + + model: Optional[str] + """ID of the model to use. For a list of models, see `Google models + <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""" + + contents: Optional[ContentListUnionDict] + """Input content.""" + + config: Optional[ComputeTokensConfigDict] + """Optional parameters for the request. + """ + + +_ComputeTokensParametersOrDict = Union[ + _ComputeTokensParameters, _ComputeTokensParametersDict +] + + +class TokensInfo(_common.BaseModel): + """Tokens info with a list of tokens and the corresponding list of token ids.""" + + role: Optional[str] = Field( + default=None, + description="""Optional. Optional fields for the role from the corresponding Content.""", + ) + token_ids: Optional[list[int]] = Field( + default=None, description="""A list of token ids from the input.""" + ) + tokens: Optional[list[bytes]] = Field( + default=None, description="""A list of tokens from the input.""" + ) + + +class TokensInfoDict(TypedDict, total=False): + """Tokens info with a list of tokens and the corresponding list of token ids.""" + + role: Optional[str] + """Optional. Optional fields for the role from the corresponding Content.""" + + token_ids: Optional[list[int]] + """A list of token ids from the input.""" + + tokens: Optional[list[bytes]] + """A list of tokens from the input.""" + + +TokensInfoOrDict = Union[TokensInfo, TokensInfoDict] + + +class ComputeTokensResponse(_common.BaseModel): + """Response for computing tokens.""" + + tokens_info: Optional[list[TokensInfo]] = Field( + default=None, + description="""Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances.""", + ) + + +class ComputeTokensResponseDict(TypedDict, total=False): + """Response for computing tokens.""" + + tokens_info: Optional[list[TokensInfoDict]] + """Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances.""" + + +ComputeTokensResponseOrDict = Union[ + ComputeTokensResponse, ComputeTokensResponseDict +] + + +class GetTuningJobConfig(_common.BaseModel): + """Optional parameters for tunings.get method.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GetTuningJobConfigDict(TypedDict, total=False): + """Optional parameters for tunings.get method.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + +GetTuningJobConfigOrDict = Union[GetTuningJobConfig, GetTuningJobConfigDict] + + +class _GetTuningJobParameters(_common.BaseModel): + """Parameters for the get method.""" + + name: Optional[str] = Field(default=None, description="""""") + config: Optional[GetTuningJobConfig] = Field( + default=None, description="""Optional parameters for the request.""" + ) + + +class _GetTuningJobParametersDict(TypedDict, total=False): + """Parameters for the get method.""" + + name: Optional[str] + """""" + + config: Optional[GetTuningJobConfigDict] + """Optional parameters for the request.""" + + +_GetTuningJobParametersOrDict = Union[ + _GetTuningJobParameters, _GetTuningJobParametersDict +] + + +class TunedModel(_common.BaseModel): + + model: Optional[str] = Field( + default=None, + description="""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}`.""", + ) + endpoint: Optional[str] = Field( + default=None, + description="""Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.""", + ) + + +class TunedModelDict(TypedDict, total=False): + + model: Optional[str] + """Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}`.""" + + endpoint: Optional[str] + """Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.""" + + +TunedModelOrDict = Union[TunedModel, TunedModelDict] + + +class GoogleRpcStatus(_common.BaseModel): + """The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. + + It is used by [gRPC](https://github.com/grpc). Each `Status` message contains + three pieces of data: error code, error message, and error details. You can + find out more about this error model and how to work with it in the [API + Design Guide](https://cloud.google.com/apis/design/errors). + """ + + code: Optional[int] = Field( + default=None, + description="""The status code, which should be an enum value of google.rpc.Code.""", + ) + details: Optional[list[dict[str, Any]]] = Field( + default=None, + description="""A list of messages that carry the error details. There is a common set of message types for APIs to use.""", + ) + message: Optional[str] = Field( + default=None, + description="""A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.""", + ) + + +class GoogleRpcStatusDict(TypedDict, total=False): + """The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. + + It is used by [gRPC](https://github.com/grpc). Each `Status` message contains + three pieces of data: error code, error message, and error details. You can + find out more about this error model and how to work with it in the [API + Design Guide](https://cloud.google.com/apis/design/errors). + """ + + code: Optional[int] + """The status code, which should be an enum value of google.rpc.Code.""" + + details: Optional[list[dict[str, Any]]] + """A list of messages that carry the error details. There is a common set of message types for APIs to use.""" + + message: Optional[str] + """A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.""" + + +GoogleRpcStatusOrDict = Union[GoogleRpcStatus, GoogleRpcStatusDict] + + +class SupervisedHyperParameters(_common.BaseModel): + """Hyperparameters for SFT.""" + + adapter_size: Optional[AdapterSize] = Field( + default=None, description="""Optional. Adapter size for tuning.""" + ) + epoch_count: Optional[int] = Field( + default=None, + description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""", + ) + learning_rate_multiplier: Optional[float] = Field( + default=None, + description="""Optional. Multiplier for adjusting the default learning rate.""", + ) + + +class SupervisedHyperParametersDict(TypedDict, total=False): + """Hyperparameters for SFT.""" + + adapter_size: Optional[AdapterSize] + """Optional. Adapter size for tuning.""" + + epoch_count: Optional[int] + """Optional. Number of complete passes the model makes over the entire training dataset during training.""" + + learning_rate_multiplier: Optional[float] + """Optional. Multiplier for adjusting the default learning rate.""" + + +SupervisedHyperParametersOrDict = Union[ + SupervisedHyperParameters, SupervisedHyperParametersDict +] + + +class SupervisedTuningSpec(_common.BaseModel): + """Tuning Spec for Supervised Tuning for first party models.""" + + hyper_parameters: Optional[SupervisedHyperParameters] = Field( + default=None, description="""Optional. Hyperparameters for SFT.""" + ) + training_dataset_uri: Optional[str] = Field( + default=None, + description="""Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + validation_dataset_uri: Optional[str] = Field( + default=None, + description="""Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + + +class SupervisedTuningSpecDict(TypedDict, total=False): + """Tuning Spec for Supervised Tuning for first party models.""" + + hyper_parameters: Optional[SupervisedHyperParametersDict] + """Optional. Hyperparameters for SFT.""" + + training_dataset_uri: Optional[str] + """Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""" + + validation_dataset_uri: Optional[str] + """Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""" + + +SupervisedTuningSpecOrDict = Union[ + SupervisedTuningSpec, SupervisedTuningSpecDict +] + + +class DatasetDistributionDistributionBucket(_common.BaseModel): + """Dataset bucket used to create a histogram for the distribution given a population of values.""" + + count: Optional[int] = Field( + default=None, + description="""Output only. Number of values in the bucket.""", + ) + left: Optional[float] = Field( + default=None, description="""Output only. Left bound of the bucket.""" + ) + right: Optional[float] = Field( + default=None, description="""Output only. Right bound of the bucket.""" + ) + + +class DatasetDistributionDistributionBucketDict(TypedDict, total=False): + """Dataset bucket used to create a histogram for the distribution given a population of values.""" + + count: Optional[int] + """Output only. Number of values in the bucket.""" + + left: Optional[float] + """Output only. Left bound of the bucket.""" + + right: Optional[float] + """Output only. Right bound of the bucket.""" + + +DatasetDistributionDistributionBucketOrDict = Union[ + DatasetDistributionDistributionBucket, + DatasetDistributionDistributionBucketDict, +] + + +class DatasetDistribution(_common.BaseModel): + """Distribution computed over a tuning dataset.""" + + buckets: Optional[list[DatasetDistributionDistributionBucket]] = Field( + default=None, description="""Output only. Defines the histogram bucket.""" + ) + max: Optional[float] = Field( + default=None, + description="""Output only. The maximum of the population values.""", + ) + mean: Optional[float] = Field( + default=None, + description="""Output only. The arithmetic mean of the values in the population.""", + ) + median: Optional[float] = Field( + default=None, + description="""Output only. The median of the values in the population.""", + ) + min: Optional[float] = Field( + default=None, + description="""Output only. The minimum of the population values.""", + ) + p5: Optional[float] = Field( + default=None, + description="""Output only. The 5th percentile of the values in the population.""", + ) + p95: Optional[float] = Field( + default=None, + description="""Output only. The 95th percentile of the values in the population.""", + ) + sum: Optional[float] = Field( + default=None, + description="""Output only. Sum of a given population of values.""", + ) + + +class DatasetDistributionDict(TypedDict, total=False): + """Distribution computed over a tuning dataset.""" + + buckets: Optional[list[DatasetDistributionDistributionBucketDict]] + """Output only. Defines the histogram bucket.""" + + max: Optional[float] + """Output only. The maximum of the population values.""" + + mean: Optional[float] + """Output only. The arithmetic mean of the values in the population.""" + + median: Optional[float] + """Output only. The median of the values in the population.""" + + min: Optional[float] + """Output only. The minimum of the population values.""" + + p5: Optional[float] + """Output only. The 5th percentile of the values in the population.""" + + p95: Optional[float] + """Output only. The 95th percentile of the values in the population.""" + + sum: Optional[float] + """Output only. Sum of a given population of values.""" + + +DatasetDistributionOrDict = Union[DatasetDistribution, DatasetDistributionDict] + + +class DatasetStats(_common.BaseModel): + """Statistics computed over a tuning dataset.""" + + total_billable_character_count: Optional[int] = Field( + default=None, + description="""Output only. Number of billable characters in the tuning dataset.""", + ) + total_tuning_character_count: Optional[int] = Field( + default=None, + description="""Output only. Number of tuning characters in the tuning dataset.""", + ) + tuning_dataset_example_count: Optional[int] = Field( + default=None, + description="""Output only. Number of examples in the tuning dataset.""", + ) + tuning_step_count: Optional[int] = Field( + default=None, + description="""Output only. Number of tuning steps for this Tuning Job.""", + ) + user_dataset_examples: Optional[list[Content]] = Field( + default=None, + description="""Output only. Sample user messages in the training dataset uri.""", + ) + user_input_token_distribution: Optional[DatasetDistribution] = Field( + default=None, + description="""Output only. Dataset distributions for the user input tokens.""", + ) + user_message_per_example_distribution: Optional[DatasetDistribution] = Field( + default=None, + description="""Output only. Dataset distributions for the messages per example.""", + ) + user_output_token_distribution: Optional[DatasetDistribution] = Field( + default=None, + description="""Output only. Dataset distributions for the user output tokens.""", + ) + + +class DatasetStatsDict(TypedDict, total=False): + """Statistics computed over a tuning dataset.""" + + total_billable_character_count: Optional[int] + """Output only. Number of billable characters in the tuning dataset.""" + + total_tuning_character_count: Optional[int] + """Output only. Number of tuning characters in the tuning dataset.""" + + tuning_dataset_example_count: Optional[int] + """Output only. Number of examples in the tuning dataset.""" + + tuning_step_count: Optional[int] + """Output only. Number of tuning steps for this Tuning Job.""" + + user_dataset_examples: Optional[list[ContentDict]] + """Output only. Sample user messages in the training dataset uri.""" + + user_input_token_distribution: Optional[DatasetDistributionDict] + """Output only. Dataset distributions for the user input tokens.""" + + user_message_per_example_distribution: Optional[DatasetDistributionDict] + """Output only. Dataset distributions for the messages per example.""" + + user_output_token_distribution: Optional[DatasetDistributionDict] + """Output only. Dataset distributions for the user output tokens.""" + + +DatasetStatsOrDict = Union[DatasetStats, DatasetStatsDict] + + +class DistillationDataStats(_common.BaseModel): + """Statistics computed for datasets used for distillation.""" + + training_dataset_stats: Optional[DatasetStats] = Field( + default=None, + description="""Output only. Statistics computed for the training dataset.""", + ) + + +class DistillationDataStatsDict(TypedDict, total=False): + """Statistics computed for datasets used for distillation.""" + + training_dataset_stats: Optional[DatasetStatsDict] + """Output only. Statistics computed for the training dataset.""" + + +DistillationDataStatsOrDict = Union[ + DistillationDataStats, DistillationDataStatsDict +] + + +class SupervisedTuningDatasetDistributionDatasetBucket(_common.BaseModel): + """Dataset bucket used to create a histogram for the distribution given a population of values.""" + + count: Optional[float] = Field( + default=None, + description="""Output only. Number of values in the bucket.""", + ) + left: Optional[float] = Field( + default=None, description="""Output only. Left bound of the bucket.""" + ) + right: Optional[float] = Field( + default=None, description="""Output only. Right bound of the bucket.""" + ) + + +class SupervisedTuningDatasetDistributionDatasetBucketDict( + TypedDict, total=False +): + """Dataset bucket used to create a histogram for the distribution given a population of values.""" + + count: Optional[float] + """Output only. Number of values in the bucket.""" + + left: Optional[float] + """Output only. Left bound of the bucket.""" + + right: Optional[float] + """Output only. Right bound of the bucket.""" + + +SupervisedTuningDatasetDistributionDatasetBucketOrDict = Union[ + SupervisedTuningDatasetDistributionDatasetBucket, + SupervisedTuningDatasetDistributionDatasetBucketDict, +] + + +class SupervisedTuningDatasetDistribution(_common.BaseModel): + """Dataset distribution for Supervised Tuning.""" + + billable_sum: Optional[int] = Field( + default=None, + description="""Output only. Sum of a given population of values that are billable.""", + ) + buckets: Optional[list[SupervisedTuningDatasetDistributionDatasetBucket]] = ( + Field( + default=None, + description="""Output only. Defines the histogram bucket.""", + ) + ) + max: Optional[float] = Field( + default=None, + description="""Output only. The maximum of the population values.""", + ) + mean: Optional[float] = Field( + default=None, + description="""Output only. The arithmetic mean of the values in the population.""", + ) + median: Optional[float] = Field( + default=None, + description="""Output only. The median of the values in the population.""", + ) + min: Optional[float] = Field( + default=None, + description="""Output only. The minimum of the population values.""", + ) + p5: Optional[float] = Field( + default=None, + description="""Output only. The 5th percentile of the values in the population.""", + ) + p95: Optional[float] = Field( + default=None, + description="""Output only. The 95th percentile of the values in the population.""", + ) + sum: Optional[int] = Field( + default=None, + description="""Output only. Sum of a given population of values.""", + ) + + +class SupervisedTuningDatasetDistributionDict(TypedDict, total=False): + """Dataset distribution for Supervised Tuning.""" + + billable_sum: Optional[int] + """Output only. Sum of a given population of values that are billable.""" + + buckets: Optional[list[SupervisedTuningDatasetDistributionDatasetBucketDict]] + """Output only. Defines the histogram bucket.""" + + max: Optional[float] + """Output only. The maximum of the population values.""" + + mean: Optional[float] + """Output only. The arithmetic mean of the values in the population.""" + + median: Optional[float] + """Output only. The median of the values in the population.""" + + min: Optional[float] + """Output only. The minimum of the population values.""" + + p5: Optional[float] + """Output only. The 5th percentile of the values in the population.""" + + p95: Optional[float] + """Output only. The 95th percentile of the values in the population.""" + + sum: Optional[int] + """Output only. Sum of a given population of values.""" + + +SupervisedTuningDatasetDistributionOrDict = Union[ + SupervisedTuningDatasetDistribution, SupervisedTuningDatasetDistributionDict +] + + +class SupervisedTuningDataStats(_common.BaseModel): + """Tuning data statistics for Supervised Tuning.""" + + total_billable_character_count: Optional[int] = Field( + default=None, + description="""Output only. Number of billable characters in the tuning dataset.""", + ) + total_billable_token_count: Optional[int] = Field( + default=None, + description="""Output only. Number of billable tokens in the tuning dataset.""", + ) + total_truncated_example_count: Optional[int] = Field( + default=None, + description="""The number of examples in the dataset that have been truncated by any amount.""", + ) + total_tuning_character_count: Optional[int] = Field( + default=None, + description="""Output only. Number of tuning characters in the tuning dataset.""", + ) + truncated_example_indices: Optional[list[int]] = Field( + default=None, + description="""A partial sample of the indices (starting from 1) of the truncated examples.""", + ) + tuning_dataset_example_count: Optional[int] = Field( + default=None, + description="""Output only. Number of examples in the tuning dataset.""", + ) + tuning_step_count: Optional[int] = Field( + default=None, + description="""Output only. Number of tuning steps for this Tuning Job.""", + ) + user_dataset_examples: Optional[list[Content]] = Field( + default=None, + description="""Output only. Sample user messages in the training dataset uri.""", + ) + user_input_token_distribution: Optional[ + SupervisedTuningDatasetDistribution + ] = Field( + default=None, + description="""Output only. Dataset distributions for the user input tokens.""", + ) + user_message_per_example_distribution: Optional[ + SupervisedTuningDatasetDistribution + ] = Field( + default=None, + description="""Output only. Dataset distributions for the messages per example.""", + ) + user_output_token_distribution: Optional[ + SupervisedTuningDatasetDistribution + ] = Field( + default=None, + description="""Output only. Dataset distributions for the user output tokens.""", + ) + + +class SupervisedTuningDataStatsDict(TypedDict, total=False): + """Tuning data statistics for Supervised Tuning.""" + + total_billable_character_count: Optional[int] + """Output only. Number of billable characters in the tuning dataset.""" + + total_billable_token_count: Optional[int] + """Output only. Number of billable tokens in the tuning dataset.""" + + total_truncated_example_count: Optional[int] + """The number of examples in the dataset that have been truncated by any amount.""" + + total_tuning_character_count: Optional[int] + """Output only. Number of tuning characters in the tuning dataset.""" + + truncated_example_indices: Optional[list[int]] + """A partial sample of the indices (starting from 1) of the truncated examples.""" + + tuning_dataset_example_count: Optional[int] + """Output only. Number of examples in the tuning dataset.""" + + tuning_step_count: Optional[int] + """Output only. Number of tuning steps for this Tuning Job.""" + + user_dataset_examples: Optional[list[ContentDict]] + """Output only. Sample user messages in the training dataset uri.""" + + user_input_token_distribution: Optional[ + SupervisedTuningDatasetDistributionDict + ] + """Output only. Dataset distributions for the user input tokens.""" + + user_message_per_example_distribution: Optional[ + SupervisedTuningDatasetDistributionDict + ] + """Output only. Dataset distributions for the messages per example.""" + + user_output_token_distribution: Optional[ + SupervisedTuningDatasetDistributionDict + ] + """Output only. Dataset distributions for the user output tokens.""" + + +SupervisedTuningDataStatsOrDict = Union[ + SupervisedTuningDataStats, SupervisedTuningDataStatsDict +] + + +class TuningDataStats(_common.BaseModel): + """The tuning data statistic values for TuningJob.""" + + distillation_data_stats: Optional[DistillationDataStats] = Field( + default=None, description="""Output only. Statistics for distillation.""" + ) + supervised_tuning_data_stats: Optional[SupervisedTuningDataStats] = Field( + default=None, description="""The SFT Tuning data stats.""" + ) + + +class TuningDataStatsDict(TypedDict, total=False): + """The tuning data statistic values for TuningJob.""" + + distillation_data_stats: Optional[DistillationDataStatsDict] + """Output only. Statistics for distillation.""" + + supervised_tuning_data_stats: Optional[SupervisedTuningDataStatsDict] + """The SFT Tuning data stats.""" + + +TuningDataStatsOrDict = Union[TuningDataStats, TuningDataStatsDict] + + +class EncryptionSpec(_common.BaseModel): + """Represents a customer-managed encryption key spec that can be applied to a top-level resource.""" + + kms_key_name: Optional[str] = Field( + default=None, + description="""Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.""", + ) + + +class EncryptionSpecDict(TypedDict, total=False): + """Represents a customer-managed encryption key spec that can be applied to a top-level resource.""" + + kms_key_name: Optional[str] + """Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.""" + + +EncryptionSpecOrDict = Union[EncryptionSpec, EncryptionSpecDict] + + +class DistillationHyperParameters(_common.BaseModel): + """Hyperparameters for Distillation.""" + + adapter_size: Optional[AdapterSize] = Field( + default=None, description="""Optional. Adapter size for distillation.""" + ) + epoch_count: Optional[int] = Field( + default=None, + description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""", + ) + learning_rate_multiplier: Optional[float] = Field( + default=None, + description="""Optional. Multiplier for adjusting the default learning rate.""", + ) + + +class DistillationHyperParametersDict(TypedDict, total=False): + """Hyperparameters for Distillation.""" + + adapter_size: Optional[AdapterSize] + """Optional. Adapter size for distillation.""" + + epoch_count: Optional[int] + """Optional. Number of complete passes the model makes over the entire training dataset during training.""" + + learning_rate_multiplier: Optional[float] + """Optional. Multiplier for adjusting the default learning rate.""" + + +DistillationHyperParametersOrDict = Union[ + DistillationHyperParameters, DistillationHyperParametersDict +] + + +class DistillationSpec(_common.BaseModel): + """Tuning Spec for Distillation.""" + + base_teacher_model: Optional[str] = Field( + default=None, + description="""The base teacher model that is being distilled, e.g., "gemini-1.0-pro-002".""", + ) + hyper_parameters: Optional[DistillationHyperParameters] = Field( + default=None, + description="""Optional. Hyperparameters for Distillation.""", + ) + pipeline_root_directory: Optional[str] = Field( + default=None, + description="""Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the distillation pipeline. It is used by the system to generate the paths of output artifacts.""", + ) + student_model: Optional[str] = Field( + default=None, + description="""The student model that is being tuned, e.g., "google/gemma-2b-1.1-it".""", + ) + training_dataset_uri: Optional[str] = Field( + default=None, + description="""Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + tuned_teacher_model_source: Optional[str] = Field( + default=None, + description="""The resource name of the Tuned teacher model. Format: `projects/{project}/locations/{location}/models/{model}`.""", + ) + validation_dataset_uri: Optional[str] = Field( + default=None, + description="""Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + + +class DistillationSpecDict(TypedDict, total=False): + """Tuning Spec for Distillation.""" + + base_teacher_model: Optional[str] + """The base teacher model that is being distilled, e.g., "gemini-1.0-pro-002".""" + + hyper_parameters: Optional[DistillationHyperParametersDict] + """Optional. Hyperparameters for Distillation.""" + + pipeline_root_directory: Optional[str] + """Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the distillation pipeline. It is used by the system to generate the paths of output artifacts.""" + + student_model: Optional[str] + """The student model that is being tuned, e.g., "google/gemma-2b-1.1-it".""" + + training_dataset_uri: Optional[str] + """Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""" + + tuned_teacher_model_source: Optional[str] + """The resource name of the Tuned teacher model. Format: `projects/{project}/locations/{location}/models/{model}`.""" + + validation_dataset_uri: Optional[str] + """Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""" + + +DistillationSpecOrDict = Union[DistillationSpec, DistillationSpecDict] + + +class PartnerModelTuningSpec(_common.BaseModel): + """Tuning spec for Partner models.""" + + hyper_parameters: Optional[dict[str, Any]] = Field( + default=None, + description="""Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model.""", + ) + training_dataset_uri: Optional[str] = Field( + default=None, + description="""Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + validation_dataset_uri: Optional[str] = Field( + default=None, + description="""Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + + +class PartnerModelTuningSpecDict(TypedDict, total=False): + """Tuning spec for Partner models.""" + + hyper_parameters: Optional[dict[str, Any]] + """Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model.""" + + training_dataset_uri: Optional[str] + """Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""" + + validation_dataset_uri: Optional[str] + """Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""" + + +PartnerModelTuningSpecOrDict = Union[ + PartnerModelTuningSpec, PartnerModelTuningSpecDict +] + + +class TuningJob(_common.BaseModel): + """A tuning job.""" + + name: Optional[str] = Field( + default=None, + description="""Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}`""", + ) + state: Optional[JobState] = Field( + default=None, + description="""Output only. The detailed state of the job.""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Time when the TuningJob was created.""", + ) + start_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state.""", + ) + end_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`.""", + ) + update_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Time when the TuningJob was most recently updated.""", + ) + error: Optional[GoogleRpcStatus] = Field( + default=None, + description="""Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.""", + ) + description: Optional[str] = Field( + default=None, + description="""Optional. The description of the TuningJob.""", + ) + base_model: Optional[str] = Field( + default=None, + description="""The base model that is being tuned, e.g., "gemini-1.0-pro-002". .""", + ) + tuned_model: Optional[TunedModel] = Field( + default=None, + description="""Output only. The tuned model resources assiociated with this TuningJob.""", + ) + supervised_tuning_spec: Optional[SupervisedTuningSpec] = Field( + default=None, description="""Tuning Spec for Supervised Fine Tuning.""" + ) + tuning_data_stats: Optional[TuningDataStats] = Field( + default=None, + description="""Output only. The tuning data statistics associated with this TuningJob.""", + ) + encryption_spec: Optional[EncryptionSpec] = Field( + default=None, + description="""Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key.""", + ) + distillation_spec: Optional[DistillationSpec] = Field( + default=None, description="""Tuning Spec for Distillation.""" + ) + partner_model_tuning_spec: Optional[PartnerModelTuningSpec] = Field( + default=None, + description="""Tuning Spec for open sourced and third party Partner models.""", + ) + pipeline_job: Optional[str] = Field( + default=None, + description="""Output only. The resource name of the PipelineJob associated with the TuningJob. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`.""", + ) + experiment: Optional[str] = Field( + default=None, + description="""Output only. The Experiment associated with this TuningJob.""", + ) + labels: Optional[dict[str, str]] = Field( + default=None, + description="""Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.""", + ) + tuned_model_display_name: Optional[str] = Field( + default=None, + description="""Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters.""", + ) + + @property + def has_ended(self) -> bool: + """Whether the tuning job has ended.""" + return self.state in JOB_STATES_ENDED + + @property + def has_succeeded(self) -> bool: + """Whether the tuning job has succeeded.""" + return self.state in JOB_STATES_SUCCEEDED + + +class TuningJobDict(TypedDict, total=False): + """A tuning job.""" + + name: Optional[str] + """Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}`""" + + state: Optional[JobState] + """Output only. The detailed state of the job.""" + + create_time: Optional[datetime.datetime] + """Output only. Time when the TuningJob was created.""" + + start_time: Optional[datetime.datetime] + """Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state.""" + + end_time: Optional[datetime.datetime] + """Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`.""" + + update_time: Optional[datetime.datetime] + """Output only. Time when the TuningJob was most recently updated.""" + + error: Optional[GoogleRpcStatusDict] + """Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.""" + + description: Optional[str] + """Optional. The description of the TuningJob.""" + + base_model: Optional[str] + """The base model that is being tuned, e.g., "gemini-1.0-pro-002". .""" + + tuned_model: Optional[TunedModelDict] + """Output only. The tuned model resources assiociated with this TuningJob.""" + + supervised_tuning_spec: Optional[SupervisedTuningSpecDict] + """Tuning Spec for Supervised Fine Tuning.""" + + tuning_data_stats: Optional[TuningDataStatsDict] + """Output only. The tuning data statistics associated with this TuningJob.""" + + encryption_spec: Optional[EncryptionSpecDict] + """Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key.""" + + distillation_spec: Optional[DistillationSpecDict] + """Tuning Spec for Distillation.""" + + partner_model_tuning_spec: Optional[PartnerModelTuningSpecDict] + """Tuning Spec for open sourced and third party Partner models.""" + + pipeline_job: Optional[str] + """Output only. The resource name of the PipelineJob associated with the TuningJob. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`.""" + + experiment: Optional[str] + """Output only. The Experiment associated with this TuningJob.""" + + labels: Optional[dict[str, str]] + """Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.""" + + tuned_model_display_name: Optional[str] + """Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters.""" + + +TuningJobOrDict = Union[TuningJob, TuningJobDict] + + +class ListTuningJobsConfig(_common.BaseModel): + """Configuration for the list tuning jobs method.""" + + page_size: Optional[int] = Field(default=None, description="""""") + page_token: Optional[str] = Field(default=None, description="""""") + filter: Optional[str] = Field(default=None, description="""""") + + +class ListTuningJobsConfigDict(TypedDict, total=False): + """Configuration for the list tuning jobs method.""" + + page_size: Optional[int] + """""" + + page_token: Optional[str] + """""" + + filter: Optional[str] + """""" + + +ListTuningJobsConfigOrDict = Union[ + ListTuningJobsConfig, ListTuningJobsConfigDict +] + + +class _ListTuningJobsParameters(_common.BaseModel): + """Parameters for the list tuning jobs method.""" + + config: Optional[ListTuningJobsConfig] = Field( + default=None, description="""""" + ) + + +class _ListTuningJobsParametersDict(TypedDict, total=False): + """Parameters for the list tuning jobs method.""" + + config: Optional[ListTuningJobsConfigDict] + """""" + + +_ListTuningJobsParametersOrDict = Union[ + _ListTuningJobsParameters, _ListTuningJobsParametersDict +] + + +class ListTuningJobsResponse(_common.BaseModel): + """Response for the list tuning jobs method.""" + + next_page_token: Optional[str] = Field( + default=None, + description="""A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page.""", + ) + tuning_jobs: Optional[list[TuningJob]] = Field( + default=None, description="""List of TuningJobs in the requested page.""" + ) + + +class ListTuningJobsResponseDict(TypedDict, total=False): + """Response for the list tuning jobs method.""" + + next_page_token: Optional[str] + """A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page.""" + + tuning_jobs: Optional[list[TuningJobDict]] + """List of TuningJobs in the requested page.""" + + +ListTuningJobsResponseOrDict = Union[ + ListTuningJobsResponse, ListTuningJobsResponseDict +] + + +class TuningExample(_common.BaseModel): + + text_input: Optional[str] = Field( + default=None, description="""Text model input.""" + ) + output: Optional[str] = Field( + default=None, description="""The expected model output.""" + ) + + +class TuningExampleDict(TypedDict, total=False): + + text_input: Optional[str] + """Text model input.""" + + output: Optional[str] + """The expected model output.""" + + +TuningExampleOrDict = Union[TuningExample, TuningExampleDict] + + +class TuningDataset(_common.BaseModel): + """Supervised fune-tuning training dataset.""" + + gcs_uri: Optional[str] = Field( + default=None, + description="""GCS URI of the file containing training dataset in JSONL format.""", + ) + examples: Optional[list[TuningExample]] = Field( + default=None, + description="""Inline examples with simple input/output text.""", + ) + + +class TuningDatasetDict(TypedDict, total=False): + """Supervised fune-tuning training dataset.""" + + gcs_uri: Optional[str] + """GCS URI of the file containing training dataset in JSONL format.""" + + examples: Optional[list[TuningExampleDict]] + """Inline examples with simple input/output text.""" + + +TuningDatasetOrDict = Union[TuningDataset, TuningDatasetDict] + + +class TuningValidationDataset(_common.BaseModel): + + gcs_uri: Optional[str] = Field( + default=None, + description="""GCS URI of the file containing validation dataset in JSONL format.""", + ) + + +class TuningValidationDatasetDict(TypedDict, total=False): + + gcs_uri: Optional[str] + """GCS URI of the file containing validation dataset in JSONL format.""" + + +TuningValidationDatasetOrDict = Union[ + TuningValidationDataset, TuningValidationDatasetDict +] + + +class CreateTuningJobConfig(_common.BaseModel): + """Supervised fine-tuning job creation request - optional fields.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + validation_dataset: Optional[TuningValidationDataset] = Field( + default=None, + description="""Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + tuned_model_display_name: Optional[str] = Field( + default=None, + description="""The display name of the tuned Model. The name can be up to 128 characters long and can consist of any UTF-8 characters.""", + ) + description: Optional[str] = Field( + default=None, description="""The description of the TuningJob""" + ) + epoch_count: Optional[int] = Field( + default=None, + description="""Number of complete passes the model makes over the entire training dataset during training.""", + ) + learning_rate_multiplier: Optional[float] = Field( + default=None, + description="""Multiplier for adjusting the default learning rate.""", + ) + adapter_size: Optional[AdapterSize] = Field( + default=None, description="""Adapter size for tuning.""" + ) + batch_size: Optional[int] = Field( + default=None, + description="""The batch size hyperparameter for tuning. If not set, a default of 4 or 16 will be used based on the number of training examples.""", + ) + learning_rate: Optional[float] = Field( + default=None, + description="""The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples.""", + ) + + +class CreateTuningJobConfigDict(TypedDict, total=False): + """Supervised fine-tuning job creation request - optional fields.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + validation_dataset: Optional[TuningValidationDatasetDict] + """Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""" + + tuned_model_display_name: Optional[str] + """The display name of the tuned Model. The name can be up to 128 characters long and can consist of any UTF-8 characters.""" + + description: Optional[str] + """The description of the TuningJob""" + + epoch_count: Optional[int] + """Number of complete passes the model makes over the entire training dataset during training.""" + + learning_rate_multiplier: Optional[float] + """Multiplier for adjusting the default learning rate.""" + + adapter_size: Optional[AdapterSize] + """Adapter size for tuning.""" + + batch_size: Optional[int] + """The batch size hyperparameter for tuning. If not set, a default of 4 or 16 will be used based on the number of training examples.""" + + learning_rate: Optional[float] + """The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples.""" + + +CreateTuningJobConfigOrDict = Union[ + CreateTuningJobConfig, CreateTuningJobConfigDict +] + + +class _CreateTuningJobParameters(_common.BaseModel): + """Supervised fine-tuning job creation parameters - optional fields.""" + + base_model: Optional[str] = Field( + default=None, + description="""The base model that is being tuned, e.g., "gemini-1.0-pro-002".""", + ) + training_dataset: Optional[TuningDataset] = Field( + default=None, + description="""Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + config: Optional[CreateTuningJobConfig] = Field( + default=None, description="""Configuration for the tuning job.""" + ) + + +class _CreateTuningJobParametersDict(TypedDict, total=False): + """Supervised fine-tuning job creation parameters - optional fields.""" + + base_model: Optional[str] + """The base model that is being tuned, e.g., "gemini-1.0-pro-002".""" + + training_dataset: Optional[TuningDatasetDict] + """Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""" + + config: Optional[CreateTuningJobConfigDict] + """Configuration for the tuning job.""" + + +_CreateTuningJobParametersOrDict = Union[ + _CreateTuningJobParameters, _CreateTuningJobParametersDict +] + + +class TuningJobOrOperation(_common.BaseModel): + """A tuning job or an long-running-operation that resolves to a tuning job.""" + + tuning_job: Optional[TuningJob] = Field(default=None, description="""""") + + +class TuningJobOrOperationDict(TypedDict, total=False): + """A tuning job or an long-running-operation that resolves to a tuning job.""" + + tuning_job: Optional[TuningJobDict] + """""" + + +TuningJobOrOperationOrDict = Union[ + TuningJobOrOperation, TuningJobOrOperationDict +] + + +class DistillationDataset(_common.BaseModel): + """Training dataset.""" + + gcs_uri: Optional[str] = Field( + default=None, + description="""GCS URI of the file containing training dataset in JSONL format.""", + ) + + +class DistillationDatasetDict(TypedDict, total=False): + """Training dataset.""" + + gcs_uri: Optional[str] + """GCS URI of the file containing training dataset in JSONL format.""" + + +DistillationDatasetOrDict = Union[DistillationDataset, DistillationDatasetDict] + + +class DistillationValidationDataset(_common.BaseModel): + """Validation dataset.""" + + gcs_uri: Optional[str] = Field( + default=None, + description="""GCS URI of the file containing validation dataset in JSONL format.""", + ) + + +class DistillationValidationDatasetDict(TypedDict, total=False): + """Validation dataset.""" + + gcs_uri: Optional[str] + """GCS URI of the file containing validation dataset in JSONL format.""" + + +DistillationValidationDatasetOrDict = Union[ + DistillationValidationDataset, DistillationValidationDatasetDict +] + + +class CreateDistillationJobConfig(_common.BaseModel): + """Distillation job creation request - optional fields.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + validation_dataset: Optional[DistillationValidationDataset] = Field( + default=None, + description="""Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + tuned_model_display_name: Optional[str] = Field( + default=None, + description="""The display name of the tuned Model. The name can be up to 128 characters long and can consist of any UTF-8 characters.""", + ) + epoch_count: Optional[int] = Field( + default=None, + description="""Number of complete passes the model makes over the entire training dataset during training.""", + ) + learning_rate_multiplier: Optional[float] = Field( + default=None, + description="""Multiplier for adjusting the default learning rate.""", + ) + adapter_size: Optional[AdapterSize] = Field( + default=None, description="""Adapter size for tuning.""" + ) + pipeline_root_directory: Optional[str] = Field( + default=None, + description="""The resource name of the PipelineJob associated with the TuningJob. Format:`projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`.""", + ) + + +class CreateDistillationJobConfigDict(TypedDict, total=False): + """Distillation job creation request - optional fields.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + validation_dataset: Optional[DistillationValidationDatasetDict] + """Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""" + + tuned_model_display_name: Optional[str] + """The display name of the tuned Model. The name can be up to 128 characters long and can consist of any UTF-8 characters.""" + + epoch_count: Optional[int] + """Number of complete passes the model makes over the entire training dataset during training.""" + + learning_rate_multiplier: Optional[float] + """Multiplier for adjusting the default learning rate.""" + + adapter_size: Optional[AdapterSize] + """Adapter size for tuning.""" + + pipeline_root_directory: Optional[str] + """The resource name of the PipelineJob associated with the TuningJob. Format:`projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`.""" + + +CreateDistillationJobConfigOrDict = Union[ + CreateDistillationJobConfig, CreateDistillationJobConfigDict +] + + +class _CreateDistillationJobParameters(_common.BaseModel): + """Distillation job creation parameters - optional fields.""" + + student_model: Optional[str] = Field( + default=None, + description="""The student model that is being tuned, e.g. ``google/gemma-2b-1.1-it``.""", + ) + teacher_model: Optional[str] = Field( + default=None, + description="""The teacher model that is being distilled from, e.g. ``gemini-1.0-pro-002``.""", + ) + training_dataset: Optional[DistillationDataset] = Field( + default=None, + description="""Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + config: Optional[CreateDistillationJobConfig] = Field( + default=None, description="""Configuration for the distillation job.""" + ) + + +class _CreateDistillationJobParametersDict(TypedDict, total=False): + """Distillation job creation parameters - optional fields.""" + + student_model: Optional[str] + """The student model that is being tuned, e.g. ``google/gemma-2b-1.1-it``.""" + + teacher_model: Optional[str] + """The teacher model that is being distilled from, e.g. ``gemini-1.0-pro-002``.""" + + training_dataset: Optional[DistillationDatasetDict] + """Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""" + + config: Optional[CreateDistillationJobConfigDict] + """Configuration for the distillation job.""" + + +_CreateDistillationJobParametersOrDict = Union[ + _CreateDistillationJobParameters, _CreateDistillationJobParametersDict +] + + +class CreateCachedContentConfig(_common.BaseModel): + """Class for configuring optional cached content creation parameters.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + ttl: Optional[str] = Field( + default=None, + description="""The TTL for this resource. The expiration time is computed: now + TTL.""", + ) + expire_time: Optional[datetime.datetime] = Field( + default=None, + description="""Timestamp of when this resource is considered expired.""", + ) + display_name: Optional[str] = Field( + default=None, + description="""The user-generated meaningful display name of the cached content. + """, + ) + contents: Optional[ContentListUnion] = Field( + default=None, + description="""The content to cache. + """, + ) + system_instruction: Optional[ContentUnion] = Field( + default=None, + description="""Developer set system instruction. + """, + ) + tools: Optional[list[Tool]] = Field( + default=None, + description="""A list of `Tools` the model may use to generate the next response. + """, + ) + tool_config: Optional[ToolConfig] = Field( + default=None, + description="""Configuration for the tools to use. This config is shared for all tools. + """, + ) + + +class CreateCachedContentConfigDict(TypedDict, total=False): + """Class for configuring optional cached content creation parameters.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + ttl: Optional[str] + """The TTL for this resource. The expiration time is computed: now + TTL.""" + + expire_time: Optional[datetime.datetime] + """Timestamp of when this resource is considered expired.""" + + display_name: Optional[str] + """The user-generated meaningful display name of the cached content. + """ + + contents: Optional[ContentListUnionDict] + """The content to cache. + """ + + system_instruction: Optional[ContentUnionDict] + """Developer set system instruction. + """ + + tools: Optional[list[ToolDict]] + """A list of `Tools` the model may use to generate the next response. + """ + + tool_config: Optional[ToolConfigDict] + """Configuration for the tools to use. This config is shared for all tools. + """ + + +CreateCachedContentConfigOrDict = Union[ + CreateCachedContentConfig, CreateCachedContentConfigDict +] + + +class _CreateCachedContentParameters(_common.BaseModel): + """Parameters for caches.create method.""" + + model: Optional[str] = Field( + default=None, + description="""ID of the model to use. Example: gemini-1.5-flash""", + ) + config: Optional[CreateCachedContentConfig] = Field( + default=None, + description="""Configuration that contains optional parameters. + """, + ) + + +class _CreateCachedContentParametersDict(TypedDict, total=False): + """Parameters for caches.create method.""" + + model: Optional[str] + """ID of the model to use. Example: gemini-1.5-flash""" + + config: Optional[CreateCachedContentConfigDict] + """Configuration that contains optional parameters. + """ + + +_CreateCachedContentParametersOrDict = Union[ + _CreateCachedContentParameters, _CreateCachedContentParametersDict +] + + +class CachedContentUsageMetadata(_common.BaseModel): + """Metadata on the usage of the cached content.""" + + audio_duration_seconds: Optional[int] = Field( + default=None, description="""Duration of audio in seconds.""" + ) + image_count: Optional[int] = Field( + default=None, description="""Number of images.""" + ) + text_count: Optional[int] = Field( + default=None, description="""Number of text characters.""" + ) + total_token_count: Optional[int] = Field( + default=None, + description="""Total number of tokens that the cached content consumes.""", + ) + video_duration_seconds: Optional[int] = Field( + default=None, description="""Duration of video in seconds.""" + ) + + +class CachedContentUsageMetadataDict(TypedDict, total=False): + """Metadata on the usage of the cached content.""" + + audio_duration_seconds: Optional[int] + """Duration of audio in seconds.""" + + image_count: Optional[int] + """Number of images.""" + + text_count: Optional[int] + """Number of text characters.""" + + total_token_count: Optional[int] + """Total number of tokens that the cached content consumes.""" + + video_duration_seconds: Optional[int] + """Duration of video in seconds.""" + + +CachedContentUsageMetadataOrDict = Union[ + CachedContentUsageMetadata, CachedContentUsageMetadataDict +] + + +class CachedContent(_common.BaseModel): + """A resource used in LLM queries for users to explicitly specify what to cache.""" + + name: Optional[str] = Field( + default=None, + description="""The server-generated resource name of the cached content.""", + ) + display_name: Optional[str] = Field( + default=None, + description="""The user-generated meaningful display name of the cached content.""", + ) + model: Optional[str] = Field( + default=None, + description="""The name of the publisher model to use for cached content.""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, description="""Creatation time of the cache entry.""" + ) + update_time: Optional[datetime.datetime] = Field( + default=None, + description="""When the cache entry was last updated in UTC time.""", + ) + expire_time: Optional[datetime.datetime] = Field( + default=None, description="""Expiration time of the cached content.""" + ) + usage_metadata: Optional[CachedContentUsageMetadata] = Field( + default=None, + description="""Metadata on the usage of the cached content.""", + ) + + +class CachedContentDict(TypedDict, total=False): + """A resource used in LLM queries for users to explicitly specify what to cache.""" + + name: Optional[str] + """The server-generated resource name of the cached content.""" + + display_name: Optional[str] + """The user-generated meaningful display name of the cached content.""" + + model: Optional[str] + """The name of the publisher model to use for cached content.""" + + create_time: Optional[datetime.datetime] + """Creatation time of the cache entry.""" + + update_time: Optional[datetime.datetime] + """When the cache entry was last updated in UTC time.""" + + expire_time: Optional[datetime.datetime] + """Expiration time of the cached content.""" + + usage_metadata: Optional[CachedContentUsageMetadataDict] + """Metadata on the usage of the cached content.""" + + +CachedContentOrDict = Union[CachedContent, CachedContentDict] + + +class GetCachedContentConfig(_common.BaseModel): + """Optional parameters for caches.get method.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GetCachedContentConfigDict(TypedDict, total=False): + """Optional parameters for caches.get method.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + +GetCachedContentConfigOrDict = Union[ + GetCachedContentConfig, GetCachedContentConfigDict +] + + +class _GetCachedContentParameters(_common.BaseModel): + """Parameters for caches.get method.""" + + name: Optional[str] = Field( + default=None, + description="""The server-generated resource name of the cached content. + """, + ) + config: Optional[GetCachedContentConfig] = Field( + default=None, + description="""Optional parameters for the request. + """, + ) + + +class _GetCachedContentParametersDict(TypedDict, total=False): + """Parameters for caches.get method.""" + + name: Optional[str] + """The server-generated resource name of the cached content. + """ + + config: Optional[GetCachedContentConfigDict] + """Optional parameters for the request. + """ + + +_GetCachedContentParametersOrDict = Union[ + _GetCachedContentParameters, _GetCachedContentParametersDict +] + + +class DeleteCachedContentConfig(_common.BaseModel): + """Optional parameters for caches.delete method.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class DeleteCachedContentConfigDict(TypedDict, total=False): + """Optional parameters for caches.delete method.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + +DeleteCachedContentConfigOrDict = Union[ + DeleteCachedContentConfig, DeleteCachedContentConfigDict +] + + +class _DeleteCachedContentParameters(_common.BaseModel): + """Parameters for caches.delete method.""" + + name: Optional[str] = Field( + default=None, + description="""The server-generated resource name of the cached content. + """, + ) + config: Optional[DeleteCachedContentConfig] = Field( + default=None, + description="""Optional parameters for the request. + """, + ) + + +class _DeleteCachedContentParametersDict(TypedDict, total=False): + """Parameters for caches.delete method.""" + + name: Optional[str] + """The server-generated resource name of the cached content. + """ + + config: Optional[DeleteCachedContentConfigDict] + """Optional parameters for the request. + """ + + +_DeleteCachedContentParametersOrDict = Union[ + _DeleteCachedContentParameters, _DeleteCachedContentParametersDict +] + + +class DeleteCachedContentResponse(_common.BaseModel): + """Empty response for caches.delete method.""" + + pass + + +class DeleteCachedContentResponseDict(TypedDict, total=False): + """Empty response for caches.delete method.""" + + pass + + +DeleteCachedContentResponseOrDict = Union[ + DeleteCachedContentResponse, DeleteCachedContentResponseDict +] + + +class UpdateCachedContentConfig(_common.BaseModel): + """Optional parameters for caches.update method.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + ttl: Optional[str] = Field( + default=None, + description="""The TTL for this resource. The expiration time is computed: now + TTL.""", + ) + expire_time: Optional[datetime.datetime] = Field( + default=None, + description="""Timestamp of when this resource is considered expired.""", + ) + + +class UpdateCachedContentConfigDict(TypedDict, total=False): + """Optional parameters for caches.update method.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + ttl: Optional[str] + """The TTL for this resource. The expiration time is computed: now + TTL.""" + + expire_time: Optional[datetime.datetime] + """Timestamp of when this resource is considered expired.""" + + +UpdateCachedContentConfigOrDict = Union[ + UpdateCachedContentConfig, UpdateCachedContentConfigDict +] + + +class _UpdateCachedContentParameters(_common.BaseModel): + + name: Optional[str] = Field( + default=None, + description="""The server-generated resource name of the cached content. + """, + ) + config: Optional[UpdateCachedContentConfig] = Field( + default=None, + description="""Configuration that contains optional parameters. + """, + ) + + +class _UpdateCachedContentParametersDict(TypedDict, total=False): + + name: Optional[str] + """The server-generated resource name of the cached content. + """ + + config: Optional[UpdateCachedContentConfigDict] + """Configuration that contains optional parameters. + """ + + +_UpdateCachedContentParametersOrDict = Union[ + _UpdateCachedContentParameters, _UpdateCachedContentParametersDict +] + + +class ListCachedContentsConfig(_common.BaseModel): + """Config for caches.list method.""" + + page_size: Optional[int] = Field(default=None, description="""""") + page_token: Optional[str] = Field(default=None, description="""""") + + +class ListCachedContentsConfigDict(TypedDict, total=False): + """Config for caches.list method.""" + + page_size: Optional[int] + """""" + + page_token: Optional[str] + """""" + + +ListCachedContentsConfigOrDict = Union[ + ListCachedContentsConfig, ListCachedContentsConfigDict +] + + +class _ListCachedContentsParameters(_common.BaseModel): + """Parameters for caches.list method.""" + + config: Optional[ListCachedContentsConfig] = Field( + default=None, + description="""Configuration that contains optional parameters. + """, + ) + + +class _ListCachedContentsParametersDict(TypedDict, total=False): + """Parameters for caches.list method.""" + + config: Optional[ListCachedContentsConfigDict] + """Configuration that contains optional parameters. + """ + + +_ListCachedContentsParametersOrDict = Union[ + _ListCachedContentsParameters, _ListCachedContentsParametersDict +] + + +class ListCachedContentsResponse(_common.BaseModel): + + next_page_token: Optional[str] = Field(default=None, description="""""") + cached_contents: Optional[list[CachedContent]] = Field( + default=None, + description="""List of cached contents. + """, + ) + + +class ListCachedContentsResponseDict(TypedDict, total=False): + + next_page_token: Optional[str] + """""" + + cached_contents: Optional[list[CachedContentDict]] + """List of cached contents. + """ + + +ListCachedContentsResponseOrDict = Union[ + ListCachedContentsResponse, ListCachedContentsResponseDict +] + + +class ListFilesConfig(_common.BaseModel): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + page_size: Optional[int] = Field(default=None, description="""""") + page_token: Optional[str] = Field(default=None, description="""""") + + +class ListFilesConfigDict(TypedDict, total=False): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + page_size: Optional[int] + """""" + + page_token: Optional[str] + """""" + + +ListFilesConfigOrDict = Union[ListFilesConfig, ListFilesConfigDict] + + +class _ListFilesParameters(_common.BaseModel): + """Generates the parameters for the list method.""" + + config: Optional[ListFilesConfig] = Field( + default=None, + description="""Used to override the default configuration.""", + ) + + +class _ListFilesParametersDict(TypedDict, total=False): + """Generates the parameters for the list method.""" + + config: Optional[ListFilesConfigDict] + """Used to override the default configuration.""" + + +_ListFilesParametersOrDict = Union[ + _ListFilesParameters, _ListFilesParametersDict +] + + +class ListFilesResponse(_common.BaseModel): + """Response for the list files method.""" + + next_page_token: Optional[str] = Field( + default=None, description="""A token to retrieve next page of results.""" + ) + files: Optional[list[File]] = Field( + default=None, description="""The list of files.""" + ) + + +class ListFilesResponseDict(TypedDict, total=False): + """Response for the list files method.""" + + next_page_token: Optional[str] + """A token to retrieve next page of results.""" + + files: Optional[list[FileDict]] + """The list of files.""" + + +ListFilesResponseOrDict = Union[ListFilesResponse, ListFilesResponseDict] + + +class CreateFileConfig(_common.BaseModel): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class CreateFileConfigDict(TypedDict, total=False): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + +CreateFileConfigOrDict = Union[CreateFileConfig, CreateFileConfigDict] + + +class _CreateFileParameters(_common.BaseModel): + """Generates the parameters for the private _create method.""" + + file: Optional[File] = Field( + default=None, + description="""The file to be uploaded. + mime_type: (Required) The MIME type of the file. Must be provided. + name: (Optional) The name of the file in the destination (e.g. + 'files/sample-image'). + display_name: (Optional) The display name of the file. + """, + ) + config: Optional[CreateFileConfig] = Field( + default=None, + description="""Used to override the default configuration.""", + ) + + +class _CreateFileParametersDict(TypedDict, total=False): + """Generates the parameters for the private _create method.""" + + file: Optional[FileDict] + """The file to be uploaded. + mime_type: (Required) The MIME type of the file. Must be provided. + name: (Optional) The name of the file in the destination (e.g. + 'files/sample-image'). + display_name: (Optional) The display name of the file. + """ + + config: Optional[CreateFileConfigDict] + """Used to override the default configuration.""" + + +_CreateFileParametersOrDict = Union[ + _CreateFileParameters, _CreateFileParametersDict +] + + +class CreateFileResponse(_common.BaseModel): + """Response for the create file method.""" + + pass + + +class CreateFileResponseDict(TypedDict, total=False): + """Response for the create file method.""" + + pass + + +CreateFileResponseOrDict = Union[CreateFileResponse, CreateFileResponseDict] + + +class GetFileConfig(_common.BaseModel): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GetFileConfigDict(TypedDict, total=False): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + +GetFileConfigOrDict = Union[GetFileConfig, GetFileConfigDict] + + +class _GetFileParameters(_common.BaseModel): + """Generates the parameters for the get method.""" + + name: Optional[str] = Field( + default=None, + description="""The name identifier for the file to retrieve.""", + ) + config: Optional[GetFileConfig] = Field( + default=None, + description="""Used to override the default configuration.""", + ) + + +class _GetFileParametersDict(TypedDict, total=False): + """Generates the parameters for the get method.""" + + name: Optional[str] + """The name identifier for the file to retrieve.""" + + config: Optional[GetFileConfigDict] + """Used to override the default configuration.""" + + +_GetFileParametersOrDict = Union[_GetFileParameters, _GetFileParametersDict] + + +class DeleteFileConfig(_common.BaseModel): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class DeleteFileConfigDict(TypedDict, total=False): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + +DeleteFileConfigOrDict = Union[DeleteFileConfig, DeleteFileConfigDict] + + +class _DeleteFileParameters(_common.BaseModel): + """Generates the parameters for the get method.""" + + name: Optional[str] = Field( + default=None, + description="""The name identifier for the file to be deleted.""", + ) + config: Optional[DeleteFileConfig] = Field( + default=None, + description="""Used to override the default configuration.""", + ) + + +class _DeleteFileParametersDict(TypedDict, total=False): + """Generates the parameters for the get method.""" + + name: Optional[str] + """The name identifier for the file to be deleted.""" + + config: Optional[DeleteFileConfigDict] + """Used to override the default configuration.""" + + +_DeleteFileParametersOrDict = Union[ + _DeleteFileParameters, _DeleteFileParametersDict +] + + +class DeleteFileResponse(_common.BaseModel): + """Response for the delete file method.""" + + pass + + +class DeleteFileResponseDict(TypedDict, total=False): + """Response for the delete file method.""" + + pass + + +DeleteFileResponseOrDict = Union[DeleteFileResponse, DeleteFileResponseDict] + + +class BatchJobSource(_common.BaseModel): + """Config class for `src` parameter.""" + + format: Optional[str] = Field( + default=None, + description="""Storage format of the input files. Must be one of: + 'jsonl', 'bigquery'. + """, + ) + gcs_uri: Optional[list[str]] = Field( + default=None, + description="""The Google Cloud Storage URIs to input files. + """, + ) + bigquery_uri: Optional[str] = Field( + default=None, + description="""The BigQuery URI to input table. + """, + ) + + +class BatchJobSourceDict(TypedDict, total=False): + """Config class for `src` parameter.""" + + format: Optional[str] + """Storage format of the input files. Must be one of: + 'jsonl', 'bigquery'. + """ + + gcs_uri: Optional[list[str]] + """The Google Cloud Storage URIs to input files. + """ + + bigquery_uri: Optional[str] + """The BigQuery URI to input table. + """ + + +BatchJobSourceOrDict = Union[BatchJobSource, BatchJobSourceDict] + + +class BatchJobDestination(_common.BaseModel): + """Config class for `des` parameter.""" + + format: Optional[str] = Field( + default=None, + description="""Storage format of the output files. Must be one of: + 'jsonl', 'bigquery'. + """, + ) + gcs_uri: Optional[str] = Field( + default=None, + description="""The Google Cloud Storage URI to the output file. + """, + ) + bigquery_uri: Optional[str] = Field( + default=None, + description="""The BigQuery URI to the output table. + """, + ) + + +class BatchJobDestinationDict(TypedDict, total=False): + """Config class for `des` parameter.""" + + format: Optional[str] + """Storage format of the output files. Must be one of: + 'jsonl', 'bigquery'. + """ + + gcs_uri: Optional[str] + """The Google Cloud Storage URI to the output file. + """ + + bigquery_uri: Optional[str] + """The BigQuery URI to the output table. + """ + + +BatchJobDestinationOrDict = Union[BatchJobDestination, BatchJobDestinationDict] + + +class CreateBatchJobConfig(_common.BaseModel): + """Config class for optional parameters.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + display_name: Optional[str] = Field( + default=None, + description="""The user-defined name of this BatchJob. + """, + ) + dest: Optional[str] = Field( + default=None, + description="""GCS or BigQuery URI prefix for the output predictions. Example: + "gs://path/to/output/data" or "bq://projectId.bqDatasetId.bqTableId". + """, + ) + + +class CreateBatchJobConfigDict(TypedDict, total=False): + """Config class for optional parameters.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + display_name: Optional[str] + """The user-defined name of this BatchJob. + """ + + dest: Optional[str] + """GCS or BigQuery URI prefix for the output predictions. Example: + "gs://path/to/output/data" or "bq://projectId.bqDatasetId.bqTableId". + """ + + +CreateBatchJobConfigOrDict = Union[ + CreateBatchJobConfig, CreateBatchJobConfigDict +] + + +class _CreateBatchJobParameters(_common.BaseModel): + """Config class for batches.create parameters.""" + + model: Optional[str] = Field( + default=None, + description="""The name of the model to produces the predictions via the BatchJob. + """, + ) + src: Optional[str] = Field( + default=None, + description="""GCS URI(-s) or BigQuery URI to your input data to run batch job. + Example: "gs://path/to/input/data" or "bq://projectId.bqDatasetId.bqTableId". + """, + ) + config: Optional[CreateBatchJobConfig] = Field( + default=None, + description="""Optional parameters for creating a BatchJob. + """, + ) + + +class _CreateBatchJobParametersDict(TypedDict, total=False): + """Config class for batches.create parameters.""" + + model: Optional[str] + """The name of the model to produces the predictions via the BatchJob. + """ + + src: Optional[str] + """GCS URI(-s) or BigQuery URI to your input data to run batch job. + Example: "gs://path/to/input/data" or "bq://projectId.bqDatasetId.bqTableId". + """ + + config: Optional[CreateBatchJobConfigDict] + """Optional parameters for creating a BatchJob. + """ + + +_CreateBatchJobParametersOrDict = Union[ + _CreateBatchJobParameters, _CreateBatchJobParametersDict +] + + +class JobError(_common.BaseModel): + """Config class for the job error.""" + + details: Optional[list[str]] = Field( + default=None, + description="""A list of messages that carry the error details. There is a common set of message types for APIs to use.""", + ) + code: Optional[int] = Field(default=None, description="""The status code.""") + message: Optional[str] = Field( + default=None, + description="""A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the `details` field.""", + ) + + +class JobErrorDict(TypedDict, total=False): + """Config class for the job error.""" + + details: Optional[list[str]] + """A list of messages that carry the error details. There is a common set of message types for APIs to use.""" + + code: Optional[int] + """The status code.""" + + message: Optional[str] + """A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the `details` field.""" + + +JobErrorOrDict = Union[JobError, JobErrorDict] + + +class BatchJob(_common.BaseModel): + """Config class for batches.create return value.""" + + name: Optional[str] = Field( + default=None, description="""Output only. Resource name of the Job.""" + ) + display_name: Optional[str] = Field( + default=None, description="""The user-defined name of this Job.""" + ) + state: Optional[JobState] = Field( + default=None, + description="""Output only. The detailed state of the job.""", + ) + error: Optional[JobError] = Field( + default=None, + description="""Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED.""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Time when the Job was created.""", + ) + start_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Time when the Job for the first time entered the `JOB_STATE_RUNNING` state.""", + ) + end_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Time when the Job entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`.""", + ) + update_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Time when the Job was most recently updated.""", + ) + model: Optional[str] = Field( + default=None, + description="""The name of the model that produces the predictions via the BatchJob. + """, + ) + src: Optional[BatchJobSource] = Field( + default=None, + description="""Configuration for the input data. + """, + ) + dest: Optional[BatchJobDestination] = Field( + default=None, + description="""Configuration for the output data. + """, + ) + + +class BatchJobDict(TypedDict, total=False): + """Config class for batches.create return value.""" + + name: Optional[str] + """Output only. Resource name of the Job.""" + + display_name: Optional[str] + """The user-defined name of this Job.""" + + state: Optional[JobState] + """Output only. The detailed state of the job.""" + + error: Optional[JobErrorDict] + """Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED.""" + + create_time: Optional[datetime.datetime] + """Output only. Time when the Job was created.""" + + start_time: Optional[datetime.datetime] + """Output only. Time when the Job for the first time entered the `JOB_STATE_RUNNING` state.""" + + end_time: Optional[datetime.datetime] + """Output only. Time when the Job entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`.""" + + update_time: Optional[datetime.datetime] + """Output only. Time when the Job was most recently updated.""" + + model: Optional[str] + """The name of the model that produces the predictions via the BatchJob. + """ + + src: Optional[BatchJobSourceDict] + """Configuration for the input data. + """ + + dest: Optional[BatchJobDestinationDict] + """Configuration for the output data. + """ + + +BatchJobOrDict = Union[BatchJob, BatchJobDict] + + +class GetBatchJobConfig(_common.BaseModel): + """Optional parameters.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GetBatchJobConfigDict(TypedDict, total=False): + """Optional parameters.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + +GetBatchJobConfigOrDict = Union[GetBatchJobConfig, GetBatchJobConfigDict] + + +class _GetBatchJobParameters(_common.BaseModel): + """Config class for batches.get parameters.""" + + name: Optional[str] = Field( + default=None, + description="""A fully-qualified BatchJob resource name or ID. + Example: "projects/.../locations/.../batchPredictionJobs/456" + or "456" when project and location are initialized in the client. + """, + ) + config: Optional[GetBatchJobConfig] = Field( + default=None, description="""Optional parameters for the request.""" + ) + + +class _GetBatchJobParametersDict(TypedDict, total=False): + """Config class for batches.get parameters.""" + + name: Optional[str] + """A fully-qualified BatchJob resource name or ID. + Example: "projects/.../locations/.../batchPredictionJobs/456" + or "456" when project and location are initialized in the client. + """ + + config: Optional[GetBatchJobConfigDict] + """Optional parameters for the request.""" + + +_GetBatchJobParametersOrDict = Union[ + _GetBatchJobParameters, _GetBatchJobParametersDict +] + + +class CancelBatchJobConfig(_common.BaseModel): + """Optional parameters.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class CancelBatchJobConfigDict(TypedDict, total=False): + """Optional parameters.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + +CancelBatchJobConfigOrDict = Union[ + CancelBatchJobConfig, CancelBatchJobConfigDict +] + + +class _CancelBatchJobParameters(_common.BaseModel): + """Config class for batches.cancel parameters.""" + + name: Optional[str] = Field( + default=None, + description="""A fully-qualified BatchJob resource name or ID. + Example: "projects/.../locations/.../batchPredictionJobs/456" + or "456" when project and location are initialized in the client. + """, + ) + config: Optional[CancelBatchJobConfig] = Field( + default=None, description="""Optional parameters for the request.""" + ) + + +class _CancelBatchJobParametersDict(TypedDict, total=False): + """Config class for batches.cancel parameters.""" + + name: Optional[str] + """A fully-qualified BatchJob resource name or ID. + Example: "projects/.../locations/.../batchPredictionJobs/456" + or "456" when project and location are initialized in the client. + """ + + config: Optional[CancelBatchJobConfigDict] + """Optional parameters for the request.""" + + +_CancelBatchJobParametersOrDict = Union[ + _CancelBatchJobParameters, _CancelBatchJobParametersDict +] + + +class ListBatchJobConfig(_common.BaseModel): + """Config class for optional parameters.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + page_size: Optional[int] = Field(default=None, description="""""") + page_token: Optional[str] = Field(default=None, description="""""") + filter: Optional[str] = Field(default=None, description="""""") + + +class ListBatchJobConfigDict(TypedDict, total=False): + """Config class for optional parameters.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + page_size: Optional[int] + """""" + + page_token: Optional[str] + """""" + + filter: Optional[str] + """""" + + +ListBatchJobConfigOrDict = Union[ListBatchJobConfig, ListBatchJobConfigDict] + + +class _ListBatchJobParameters(_common.BaseModel): + """Config class for batches.list parameters.""" + + config: Optional[ListBatchJobConfig] = Field(default=None, description="""""") + + +class _ListBatchJobParametersDict(TypedDict, total=False): + """Config class for batches.list parameters.""" + + config: Optional[ListBatchJobConfigDict] + """""" + + +_ListBatchJobParametersOrDict = Union[ + _ListBatchJobParameters, _ListBatchJobParametersDict +] + + +class ListBatchJobResponse(_common.BaseModel): + """Config class for batches.list return value.""" + + next_page_token: Optional[str] = Field(default=None, description="""""") + batch_jobs: Optional[list[BatchJob]] = Field(default=None, description="""""") + + +class ListBatchJobResponseDict(TypedDict, total=False): + """Config class for batches.list return value.""" + + next_page_token: Optional[str] + """""" + + batch_jobs: Optional[list[BatchJobDict]] + """""" + + +ListBatchJobResponseOrDict = Union[ + ListBatchJobResponse, ListBatchJobResponseDict +] + + +class _DeleteBatchJobParameters(_common.BaseModel): + """Config class for batches.delete parameters.""" + + name: Optional[str] = Field( + default=None, + description="""A fully-qualified BatchJob resource name or ID. + Example: "projects/.../locations/.../batchPredictionJobs/456" + or "456" when project and location are initialized in the client. + """, + ) + + +class _DeleteBatchJobParametersDict(TypedDict, total=False): + """Config class for batches.delete parameters.""" + + name: Optional[str] + """A fully-qualified BatchJob resource name or ID. + Example: "projects/.../locations/.../batchPredictionJobs/456" + or "456" when project and location are initialized in the client. + """ + + +_DeleteBatchJobParametersOrDict = Union[ + _DeleteBatchJobParameters, _DeleteBatchJobParametersDict +] + + +class DeleteResourceJob(_common.BaseModel): + """Config class for the return value of delete operation.""" + + name: Optional[str] = Field(default=None, description="""""") + done: Optional[bool] = Field(default=None, description="""""") + error: Optional[JobError] = Field(default=None, description="""""") + + +class DeleteResourceJobDict(TypedDict, total=False): + """Config class for the return value of delete operation.""" + + name: Optional[str] + """""" + + done: Optional[bool] + """""" + + error: Optional[JobErrorDict] + """""" + + +DeleteResourceJobOrDict = Union[DeleteResourceJob, DeleteResourceJobDict] + + +class TestTableItem(_common.BaseModel): + + name: Optional[str] = Field( + default=None, + description="""The name of the test. This is used to derive the replay id.""", + ) + parameters: Optional[dict[str, Any]] = Field( + default=None, + description="""The parameters to the test. Use pydantic models.""", + ) + exception_if_mldev: Optional[str] = Field( + default=None, + description="""Expects an exception for MLDev matching the string.""", + ) + exception_if_vertex: Optional[str] = Field( + default=None, + description="""Expects an exception for Vertex matching the string.""", + ) + override_replay_id: Optional[str] = Field( + default=None, + description="""Use if you don't want to use the default replay id which is derived from the test name.""", + ) + has_union: Optional[bool] = Field( + default=None, + description="""True if the parameters contain an unsupported union type. This test will be skipped for languages that do not support the union type.""", + ) + skip_in_api_mode: Optional[str] = Field( + default=None, + description="""When set to a reason string, this test will be skipped in the API mode. Use this flag for tests that can not be reproduced with the real API. E.g. a test that deletes a resource.""", + ) + + +class TestTableItemDict(TypedDict, total=False): + + name: Optional[str] + """The name of the test. This is used to derive the replay id.""" + + parameters: Optional[dict[str, Any]] + """The parameters to the test. Use pydantic models.""" + + exception_if_mldev: Optional[str] + """Expects an exception for MLDev matching the string.""" + + exception_if_vertex: Optional[str] + """Expects an exception for Vertex matching the string.""" + + override_replay_id: Optional[str] + """Use if you don't want to use the default replay id which is derived from the test name.""" + + has_union: Optional[bool] + """True if the parameters contain an unsupported union type. This test will be skipped for languages that do not support the union type.""" + + skip_in_api_mode: Optional[str] + """When set to a reason string, this test will be skipped in the API mode. Use this flag for tests that can not be reproduced with the real API. E.g. a test that deletes a resource.""" + + +TestTableItemOrDict = Union[TestTableItem, TestTableItemDict] + + +class TestTableFile(_common.BaseModel): + + comment: Optional[str] = Field(default=None, description="""""") + test_method: Optional[str] = Field(default=None, description="""""") + parameter_names: Optional[list[str]] = Field(default=None, description="""""") + test_table: Optional[list[TestTableItem]] = Field( + default=None, description="""""" + ) + + +class TestTableFileDict(TypedDict, total=False): + + comment: Optional[str] + """""" + + test_method: Optional[str] + """""" + + parameter_names: Optional[list[str]] + """""" + + test_table: Optional[list[TestTableItemDict]] + """""" + + +TestTableFileOrDict = Union[TestTableFile, TestTableFileDict] + + +class ReplayRequest(_common.BaseModel): + """Represents a single request in a replay.""" + + method: Optional[str] = Field(default=None, description="""""") + url: Optional[str] = Field(default=None, description="""""") + headers: Optional[dict[str, str]] = Field(default=None, description="""""") + body_segments: Optional[list[dict[str, Any]]] = Field( + default=None, description="""""" + ) + + +class ReplayRequestDict(TypedDict, total=False): + """Represents a single request in a replay.""" + + method: Optional[str] + """""" + + url: Optional[str] + """""" + + headers: Optional[dict[str, str]] + """""" + + body_segments: Optional[list[dict[str, Any]]] + """""" + + +ReplayRequestOrDict = Union[ReplayRequest, ReplayRequestDict] + + +class ReplayResponse(_common.BaseModel): + """Represents a single response in a replay.""" + + status_code: Optional[int] = Field(default=None, description="""""") + headers: Optional[dict[str, str]] = Field(default=None, description="""""") + body_segments: Optional[list[dict[str, Any]]] = Field( + default=None, description="""""" + ) + sdk_response_segments: Optional[list[dict[str, Any]]] = Field( + default=None, description="""""" + ) + + +class ReplayResponseDict(TypedDict, total=False): + """Represents a single response in a replay.""" + + status_code: Optional[int] + """""" + + headers: Optional[dict[str, str]] + """""" + + body_segments: Optional[list[dict[str, Any]]] + """""" + + sdk_response_segments: Optional[list[dict[str, Any]]] + """""" + + +ReplayResponseOrDict = Union[ReplayResponse, ReplayResponseDict] + + +class ReplayInteraction(_common.BaseModel): + """Represents a single interaction, request and response in a replay.""" + + request: Optional[ReplayRequest] = Field(default=None, description="""""") + response: Optional[ReplayResponse] = Field(default=None, description="""""") + + +class ReplayInteractionDict(TypedDict, total=False): + """Represents a single interaction, request and response in a replay.""" + + request: Optional[ReplayRequestDict] + """""" + + response: Optional[ReplayResponseDict] + """""" + + +ReplayInteractionOrDict = Union[ReplayInteraction, ReplayInteractionDict] + + +class ReplayFile(_common.BaseModel): + """Represents a recorded session.""" + + replay_id: Optional[str] = Field(default=None, description="""""") + interactions: Optional[list[ReplayInteraction]] = Field( + default=None, description="""""" + ) + + +class ReplayFileDict(TypedDict, total=False): + """Represents a recorded session.""" + + replay_id: Optional[str] + """""" + + interactions: Optional[list[ReplayInteractionDict]] + """""" + + +ReplayFileOrDict = Union[ReplayFile, ReplayFileDict] + + +class UploadFileConfig(_common.BaseModel): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + name: Optional[str] = Field( + default=None, + description="""The name of the file in the destination (e.g., 'files/sample-image'. If not provided one will be generated.""", + ) + mime_type: Optional[str] = Field( + default=None, + description="""mime_type: The MIME type of the file. If not provided, it will be inferred from the file extension.""", + ) + display_name: Optional[str] = Field( + default=None, description="""Optional display name of the file.""" + ) + + +class UploadFileConfigDict(TypedDict, total=False): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + name: Optional[str] + """The name of the file in the destination (e.g., 'files/sample-image'. If not provided one will be generated.""" + + mime_type: Optional[str] + """mime_type: The MIME type of the file. If not provided, it will be inferred from the file extension.""" + + display_name: Optional[str] + """Optional display name of the file.""" + + +UploadFileConfigOrDict = Union[UploadFileConfig, UploadFileConfigDict] + + +class DownloadFileConfig(_common.BaseModel): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class DownloadFileConfigDict(TypedDict, total=False): + """Used to override the default configuration.""" + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + +DownloadFileConfigOrDict = Union[DownloadFileConfig, DownloadFileConfigDict] + + +class UpscaleImageConfig(_common.BaseModel): + """Configuration for upscaling an image. + + For more information on this configuration, refer to + the `Imagen API reference documentation + <https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api>`_. + """ + + http_options: Optional[dict[str, Any]] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + include_rai_reason: Optional[bool] = Field( + default=None, + description="""Whether to include a reason for filtered-out images in the + response.""", + ) + output_mime_type: Optional[str] = Field( + default=None, + description="""The image format that the output should be saved as.""", + ) + output_compression_quality: Optional[int] = Field( + default=None, + description="""The level of compression if the ``output_mime_type`` is + ``image/jpeg``.""", + ) + + +class UpscaleImageConfigDict(TypedDict, total=False): + """Configuration for upscaling an image. + + For more information on this configuration, refer to + the `Imagen API reference documentation + <https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api>`_. + """ + + http_options: Optional[dict[str, Any]] + """Used to override HTTP request options.""" + + include_rai_reason: Optional[bool] + """Whether to include a reason for filtered-out images in the + response.""" + + output_mime_type: Optional[str] + """The image format that the output should be saved as.""" + + output_compression_quality: Optional[int] + """The level of compression if the ``output_mime_type`` is + ``image/jpeg``.""" + + +UpscaleImageConfigOrDict = Union[UpscaleImageConfig, UpscaleImageConfigDict] + + +class UpscaleImageParameters(_common.BaseModel): + """User-facing config UpscaleImageParameters.""" + + model: Optional[str] = Field( + default=None, description="""The model to use.""" + ) + image: Optional[Image] = Field( + default=None, description="""The input image to upscale.""" + ) + upscale_factor: Optional[str] = Field( + default=None, + description="""The factor to upscale the image (x2 or x4).""", + ) + config: Optional[UpscaleImageConfig] = Field( + default=None, description="""Configuration for upscaling.""" + ) + + +class UpscaleImageParametersDict(TypedDict, total=False): + """User-facing config UpscaleImageParameters.""" + + model: Optional[str] + """The model to use.""" + + image: Optional[ImageDict] + """The input image to upscale.""" + + upscale_factor: Optional[str] + """The factor to upscale the image (x2 or x4).""" + + config: Optional[UpscaleImageConfigDict] + """Configuration for upscaling.""" + + +UpscaleImageParametersOrDict = Union[ + UpscaleImageParameters, UpscaleImageParametersDict +] + + +class RawReferenceImage(_common.BaseModel): + """Class that represents a Raw reference image. + + A raw reference image represents the base image to edit, provided by the user. + It can optionally be provided in addition to a mask reference image or + a style reference image. + """ + + reference_image: Optional[Image] = Field( + default=None, + description="""The reference image for the editing operation.""", + ) + reference_id: Optional[int] = Field( + default=None, description="""The id of the reference image.""" + ) + reference_type: Optional[str] = Field( + default=None, description="""The type of the reference image.""" + ) + + def __init__( + self, + reference_image: Optional[Image] = None, + reference_id: Optional[int] = None, + ): + super().__init__( + reference_image=reference_image, + reference_id=reference_id, + reference_type='REFERENCE_TYPE_RAW', + ) + + +class RawReferenceImageDict(TypedDict, total=False): + """Class that represents a Raw reference image. + + A raw reference image represents the base image to edit, provided by the user. + It can optionally be provided in addition to a mask reference image or + a style reference image. + """ + + reference_image: Optional[ImageDict] + """The reference image for the editing operation.""" + + reference_id: Optional[int] + """The id of the reference image.""" + + reference_type: Optional[str] + """The type of the reference image.""" + + +RawReferenceImageOrDict = Union[RawReferenceImage, RawReferenceImageDict] + + +class MaskReferenceImage(_common.BaseModel): + """Class that represents a Mask reference image. + + This encapsulates either a mask image provided by the user and configs for + the user provided mask, or only config parameters for the model to generate + a mask. + + A mask image is an image whose non-zero values indicate where to edit the base + image. If the user provides a mask image, the mask must be in the same + dimensions as the raw image. + """ + + reference_image: Optional[Image] = Field( + default=None, + description="""The reference image for the editing operation.""", + ) + reference_id: Optional[int] = Field( + default=None, description="""The id of the reference image.""" + ) + reference_type: Optional[str] = Field( + default=None, description="""The type of the reference image.""" + ) + config: Optional[MaskReferenceConfig] = Field( + default=None, + description="""Configuration for the mask reference image.""", + ) + """Re-map config to mask_reference_config to send to API.""" + mask_image_config: Optional['MaskReferenceConfig'] = Field( + default=None, description="""""" + ) + + def __init__( + self, + reference_image: Optional[Image] = None, + reference_id: Optional[int] = None, + config: Optional['MaskReferenceConfig'] = None, + ): + super().__init__( + reference_image=reference_image, + reference_id=reference_id, + reference_type='REFERENCE_TYPE_MASK', + ) + self.mask_image_config = config + + +class MaskReferenceImageDict(TypedDict, total=False): + """Class that represents a Mask reference image. + + This encapsulates either a mask image provided by the user and configs for + the user provided mask, or only config parameters for the model to generate + a mask. + + A mask image is an image whose non-zero values indicate where to edit the base + image. If the user provides a mask image, the mask must be in the same + dimensions as the raw image. + """ + + reference_image: Optional[ImageDict] + """The reference image for the editing operation.""" + + reference_id: Optional[int] + """The id of the reference image.""" + + reference_type: Optional[str] + """The type of the reference image.""" + + config: Optional[MaskReferenceConfigDict] + """Configuration for the mask reference image.""" + + +MaskReferenceImageOrDict = Union[MaskReferenceImage, MaskReferenceImageDict] + + +class ControlReferenceImage(_common.BaseModel): + """Class that represents a Control reference image. + + The image of the control reference image is either a control image provided + by the user, or a regular image which the backend will use to generate a + control image of. In the case of the latter, the + enable_control_image_computation field in the config should be set to True. + + A control image is an image that represents a sketch image of areas for the + model to fill in based on the prompt. + """ + + reference_image: Optional[Image] = Field( + default=None, + description="""The reference image for the editing operation.""", + ) + reference_id: Optional[int] = Field( + default=None, description="""The id of the reference image.""" + ) + reference_type: Optional[str] = Field( + default=None, description="""The type of the reference image.""" + ) + config: Optional[ControlReferenceConfig] = Field( + default=None, + description="""Configuration for the control reference image.""", + ) + """Re-map config to control_reference_config to send to API.""" + control_image_config: Optional['ControlReferenceConfig'] = Field( + default=None, description="""""" + ) + + def __init__( + self, + reference_image: Optional[Image] = None, + reference_id: Optional[int] = None, + config: Optional['ControlReferenceConfig'] = None, + ): + super().__init__( + reference_image=reference_image, + reference_id=reference_id, + reference_type='REFERENCE_TYPE_CONTROL', + ) + self.control_image_config = config + + +class ControlReferenceImageDict(TypedDict, total=False): + """Class that represents a Control reference image. + + The image of the control reference image is either a control image provided + by the user, or a regular image which the backend will use to generate a + control image of. In the case of the latter, the + enable_control_image_computation field in the config should be set to True. + + A control image is an image that represents a sketch image of areas for the + model to fill in based on the prompt. + """ + + reference_image: Optional[ImageDict] + """The reference image for the editing operation.""" + + reference_id: Optional[int] + """The id of the reference image.""" + + reference_type: Optional[str] + """The type of the reference image.""" + + config: Optional[ControlReferenceConfigDict] + """Configuration for the control reference image.""" + + +ControlReferenceImageOrDict = Union[ + ControlReferenceImage, ControlReferenceImageDict +] + + +class StyleReferenceImage(_common.BaseModel): + """Class that represents a Style reference image. + + This encapsulates a style reference image provided by the user, and + additionally optional config parameters for the style reference image. + + A raw reference image can also be provided as a destination for the style to + be applied to. + """ + + reference_image: Optional[Image] = Field( + default=None, + description="""The reference image for the editing operation.""", + ) + reference_id: Optional[int] = Field( + default=None, description="""The id of the reference image.""" + ) + reference_type: Optional[str] = Field( + default=None, description="""The type of the reference image.""" + ) + config: Optional[StyleReferenceConfig] = Field( + default=None, + description="""Configuration for the style reference image.""", + ) + """Re-map config to style_reference_config to send to API.""" + style_image_config: Optional['StyleReferenceConfig'] = Field( + default=None, description="""""" + ) + + def __init__( + self, + reference_image: Optional[Image] = None, + reference_id: Optional[int] = None, + config: Optional['StyleReferenceConfig'] = None, + ): + super().__init__( + reference_image=reference_image, + reference_id=reference_id, + reference_type='REFERENCE_TYPE_STYLE', + ) + self.style_image_config = config + + +class StyleReferenceImageDict(TypedDict, total=False): + """Class that represents a Style reference image. + + This encapsulates a style reference image provided by the user, and + additionally optional config parameters for the style reference image. + + A raw reference image can also be provided as a destination for the style to + be applied to. + """ + + reference_image: Optional[ImageDict] + """The reference image for the editing operation.""" + + reference_id: Optional[int] + """The id of the reference image.""" + + reference_type: Optional[str] + """The type of the reference image.""" + + config: Optional[StyleReferenceConfigDict] + """Configuration for the style reference image.""" + + +StyleReferenceImageOrDict = Union[StyleReferenceImage, StyleReferenceImageDict] + + +class SubjectReferenceImage(_common.BaseModel): + """Class that represents a Subject reference image. + + This encapsulates a subject reference image provided by the user, and + additionally optional config parameters for the subject reference image. + + A raw reference image can also be provided as a destination for the subject to + be applied to. + """ + + reference_image: Optional[Image] = Field( + default=None, + description="""The reference image for the editing operation.""", + ) + reference_id: Optional[int] = Field( + default=None, description="""The id of the reference image.""" + ) + reference_type: Optional[str] = Field( + default=None, description="""The type of the reference image.""" + ) + config: Optional[SubjectReferenceConfig] = Field( + default=None, + description="""Configuration for the subject reference image.""", + ) + """Re-map config to subject_reference_config to send to API.""" + subject_image_config: Optional['SubjectReferenceConfig'] = Field( + default=None, description="""""" + ) + + def __init__( + self, + reference_image: Optional[Image] = None, + reference_id: Optional[int] = None, + config: Optional['SubjectReferenceConfig'] = None, + ): + super().__init__( + reference_image=reference_image, + reference_id=reference_id, + reference_type='REFERENCE_TYPE_SUBJECT', + ) + self.subject_image_config = config + + +class SubjectReferenceImageDict(TypedDict, total=False): + """Class that represents a Subject reference image. + + This encapsulates a subject reference image provided by the user, and + additionally optional config parameters for the subject reference image. + + A raw reference image can also be provided as a destination for the subject to + be applied to. + """ + + reference_image: Optional[ImageDict] + """The reference image for the editing operation.""" + + reference_id: Optional[int] + """The id of the reference image.""" + + reference_type: Optional[str] + """The type of the reference image.""" + + config: Optional[SubjectReferenceConfigDict] + """Configuration for the subject reference image.""" + + +SubjectReferenceImageOrDict = Union[ + SubjectReferenceImage, SubjectReferenceImageDict +] + + +class LiveServerSetupComplete(_common.BaseModel): + """Sent in response to a `LiveGenerateContentSetup` message from the client.""" + + pass + + +class LiveServerSetupCompleteDict(TypedDict, total=False): + """Sent in response to a `LiveGenerateContentSetup` message from the client.""" + + pass + + +LiveServerSetupCompleteOrDict = Union[ + LiveServerSetupComplete, LiveServerSetupCompleteDict +] + + +class LiveServerContent(_common.BaseModel): + """Incremental server update generated by the model in response to client messages. + + Content is generated as quickly as possible, and not in real time. Clients + may choose to buffer and play it out in real time. + """ + + model_turn: Optional[Content] = Field( + default=None, + description="""The content that the model has generated as part of the current conversation with the user.""", + ) + turn_complete: Optional[bool] = Field( + default=None, + description="""If true, indicates that the model is done generating. Generation will only start in response to additional client messages. Can be set alongside `content`, indicating that the `content` is the last in the turn.""", + ) + interrupted: Optional[bool] = Field( + default=None, + description="""If true, indicates that a client message has interrupted current model generation. If the client is playing out the content in realtime, this is a good signal to stop and empty the current queue. If the client is playing out the content in realtime, this is a good signal to stop and empty the current playback queue.""", + ) + + +class LiveServerContentDict(TypedDict, total=False): + """Incremental server update generated by the model in response to client messages. + + Content is generated as quickly as possible, and not in real time. Clients + may choose to buffer and play it out in real time. + """ + + model_turn: Optional[ContentDict] + """The content that the model has generated as part of the current conversation with the user.""" + + turn_complete: Optional[bool] + """If true, indicates that the model is done generating. Generation will only start in response to additional client messages. Can be set alongside `content`, indicating that the `content` is the last in the turn.""" + + interrupted: Optional[bool] + """If true, indicates that a client message has interrupted current model generation. If the client is playing out the content in realtime, this is a good signal to stop and empty the current queue. If the client is playing out the content in realtime, this is a good signal to stop and empty the current playback queue.""" + + +LiveServerContentOrDict = Union[LiveServerContent, LiveServerContentDict] + + +class LiveServerToolCall(_common.BaseModel): + """Request for the client to execute the `function_calls` and return the responses with the matching `id`s.""" + + function_calls: Optional[list[FunctionCall]] = Field( + default=None, description="""The function call to be executed.""" + ) + + +class LiveServerToolCallDict(TypedDict, total=False): + """Request for the client to execute the `function_calls` and return the responses with the matching `id`s.""" + + function_calls: Optional[list[FunctionCallDict]] + """The function call to be executed.""" + + +LiveServerToolCallOrDict = Union[LiveServerToolCall, LiveServerToolCallDict] + + +class LiveServerToolCallCancellation(_common.BaseModel): + """Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled. + + If there were side-effects to those tool calls, clients may attempt to undo + the tool calls. This message occurs only in cases where the clients interrupt + server turns. + """ + + ids: Optional[list[str]] = Field( + default=None, description="""The ids of the tool calls to be cancelled.""" + ) + + +class LiveServerToolCallCancellationDict(TypedDict, total=False): + """Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled. + + If there were side-effects to those tool calls, clients may attempt to undo + the tool calls. This message occurs only in cases where the clients interrupt + server turns. + """ + + ids: Optional[list[str]] + """The ids of the tool calls to be cancelled.""" + + +LiveServerToolCallCancellationOrDict = Union[ + LiveServerToolCallCancellation, LiveServerToolCallCancellationDict +] + + +class LiveServerMessage(_common.BaseModel): + """Response message for API call.""" + + setup_complete: Optional[LiveServerSetupComplete] = Field( + default=None, + description="""Sent in response to a `LiveClientSetup` message from the client.""", + ) + server_content: Optional[LiveServerContent] = Field( + default=None, + description="""Content generated by the model in response to client messages.""", + ) + tool_call: Optional[LiveServerToolCall] = Field( + default=None, + description="""Request for the client to execute the `function_calls` and return the responses with the matching `id`s.""", + ) + tool_call_cancellation: Optional[LiveServerToolCallCancellation] = Field( + default=None, + description="""Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled.""", + ) + + @property + def text(self) -> Optional[str]: + """Returns the concatenation of all text parts in the response.""" + if ( + not self.server_content + or not self.server_content + or not self.server_content.model_turn + or not self.server_content.model_turn.parts + ): + return None + text = '' + for part in self.server_content.model_turn.parts: + if isinstance(part.text, str): + if isinstance(part.thought, bool) and part.thought: + continue + text += part.text + return text if text else None + + @property + def data(self) -> Optional[bytes]: + """Returns the concatenation of all inline data parts in the response.""" + if ( + not self.server_content + or not self.server_content + or not self.server_content.model_turn + or not self.server_content.model_turn.parts + ): + return None + concatenated_data = b'' + for part in self.server_content.model_turn.parts: + if part.inline_data and isinstance(part.inline_data.data, bytes): + concatenated_data += part.inline_data.data + return concatenated_data if len(concatenated_data) > 0 else None + + +class LiveServerMessageDict(TypedDict, total=False): + """Response message for API call.""" + + setup_complete: Optional[LiveServerSetupCompleteDict] + """Sent in response to a `LiveClientSetup` message from the client.""" + + server_content: Optional[LiveServerContentDict] + """Content generated by the model in response to client messages.""" + + tool_call: Optional[LiveServerToolCallDict] + """Request for the client to execute the `function_calls` and return the responses with the matching `id`s.""" + + tool_call_cancellation: Optional[LiveServerToolCallCancellationDict] + """Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled.""" + + +LiveServerMessageOrDict = Union[LiveServerMessage, LiveServerMessageDict] + + +class LiveClientSetup(_common.BaseModel): + """Message contains configuration that will apply for the duration of the streaming session.""" + + model: Optional[str] = Field( + default=None, + description=""" + The fully qualified name of the publisher model or tuned model endpoint to + use. + """, + ) + generation_config: Optional[GenerationConfig] = Field( + default=None, + description="""The generation configuration for the session.""", + ) + system_instruction: Optional[Content] = Field( + default=None, + description="""The user provided system instructions for the model. + Note: only text should be used in parts and content in each part will be + in a separate paragraph.""", + ) + tools: Optional[list[Tool]] = Field( + default=None, + description=""" A list of `Tools` the model may use to generate the next response. + + A `Tool` is a piece of code that enables the system to interact with + external systems to perform an action, or set of actions, outside of + knowledge and scope of the model.""", + ) + + +class LiveClientSetupDict(TypedDict, total=False): + """Message contains configuration that will apply for the duration of the streaming session.""" + + model: Optional[str] + """ + The fully qualified name of the publisher model or tuned model endpoint to + use. + """ + + generation_config: Optional[GenerationConfigDict] + """The generation configuration for the session.""" + + system_instruction: Optional[ContentDict] + """The user provided system instructions for the model. + Note: only text should be used in parts and content in each part will be + in a separate paragraph.""" + + tools: Optional[list[ToolDict]] + """ A list of `Tools` the model may use to generate the next response. + + A `Tool` is a piece of code that enables the system to interact with + external systems to perform an action, or set of actions, outside of + knowledge and scope of the model.""" + + +LiveClientSetupOrDict = Union[LiveClientSetup, LiveClientSetupDict] + + +class LiveClientContent(_common.BaseModel): + """Incremental update of the current conversation delivered from the client. + + All the content here will unconditionally be appended to the conversation + history and used as part of the prompt to the model to generate content. + + A message here will interrupt any current model generation. + """ + + turns: Optional[list[Content]] = Field( + default=None, + description="""The content appended to the current conversation with the model. + + For single-turn queries, this is a single instance. For multi-turn + queries, this is a repeated field that contains conversation history + + latest request. + """, + ) + turn_complete: Optional[bool] = Field( + default=None, + description="""If true, indicates that the server content generation should start with + the currently accumulated prompt. Otherwise, the server will await + additional messages before starting generation.""", + ) + + +class LiveClientContentDict(TypedDict, total=False): + """Incremental update of the current conversation delivered from the client. + + All the content here will unconditionally be appended to the conversation + history and used as part of the prompt to the model to generate content. + + A message here will interrupt any current model generation. + """ + + turns: Optional[list[ContentDict]] + """The content appended to the current conversation with the model. + + For single-turn queries, this is a single instance. For multi-turn + queries, this is a repeated field that contains conversation history + + latest request. + """ + + turn_complete: Optional[bool] + """If true, indicates that the server content generation should start with + the currently accumulated prompt. Otherwise, the server will await + additional messages before starting generation.""" + + +LiveClientContentOrDict = Union[LiveClientContent, LiveClientContentDict] + + +class LiveClientRealtimeInput(_common.BaseModel): + """User input that is sent in real time. + + This is different from `ClientContentUpdate` in a few ways: + - Can be sent continuously without interruption the model generation. + - If there is a need to mix data interleaved across the + `ClientContentUpdate` and the `RealtimeUpdate`, server will attempt to + optimize for best response, but there are no guarantees. + - End of turn is not explicitly specified, but is rather derived from user + activity, e.g. end of speech. + - Even before the end of turn, the data will be processed incrementally + to optimize for a fast start of the response from the model. + - Is always assumed to be the user's input (cannot be used to populate + conversation history). + """ + + media_chunks: Optional[list[Blob]] = Field( + default=None, description="""Inlined bytes data for media input.""" + ) + + +class LiveClientRealtimeInputDict(TypedDict, total=False): + """User input that is sent in real time. + + This is different from `ClientContentUpdate` in a few ways: + - Can be sent continuously without interruption the model generation. + - If there is a need to mix data interleaved across the + `ClientContentUpdate` and the `RealtimeUpdate`, server will attempt to + optimize for best response, but there are no guarantees. + - End of turn is not explicitly specified, but is rather derived from user + activity, e.g. end of speech. + - Even before the end of turn, the data will be processed incrementally + to optimize for a fast start of the response from the model. + - Is always assumed to be the user's input (cannot be used to populate + conversation history). + """ + + media_chunks: Optional[list[BlobDict]] + """Inlined bytes data for media input.""" + + +LiveClientRealtimeInputOrDict = Union[ + LiveClientRealtimeInput, LiveClientRealtimeInputDict +] + + +class LiveClientToolResponse(_common.BaseModel): + """Client generated response to a `ToolCall` received from the server. + + Individual `FunctionResponse` objects are matched to the respective + `FunctionCall` objects by the `id` field. + + Note that in the unary and server-streaming GenerateContent APIs function + calling happens by exchanging the `Content` parts, while in the bidi + GenerateContent APIs function calling happens over this dedicated set of + messages. + """ + + function_responses: Optional[list[FunctionResponse]] = Field( + default=None, description="""The response to the function calls.""" + ) + + +class LiveClientToolResponseDict(TypedDict, total=False): + """Client generated response to a `ToolCall` received from the server. + + Individual `FunctionResponse` objects are matched to the respective + `FunctionCall` objects by the `id` field. + + Note that in the unary and server-streaming GenerateContent APIs function + calling happens by exchanging the `Content` parts, while in the bidi + GenerateContent APIs function calling happens over this dedicated set of + messages. + """ + + function_responses: Optional[list[FunctionResponseDict]] + """The response to the function calls.""" + + +LiveClientToolResponseOrDict = Union[ + LiveClientToolResponse, LiveClientToolResponseDict +] + + +class LiveClientMessage(_common.BaseModel): + """Messages sent by the client in the API call.""" + + setup: Optional[LiveClientSetup] = Field( + default=None, + description="""Message to be sent by the system when connecting to the API. SDK users should not send this message.""", + ) + client_content: Optional[LiveClientContent] = Field( + default=None, + description="""Incremental update of the current conversation delivered from the client.""", + ) + realtime_input: Optional[LiveClientRealtimeInput] = Field( + default=None, description="""User input that is sent in real time.""" + ) + tool_response: Optional[LiveClientToolResponse] = Field( + default=None, + description="""Response to a `ToolCallMessage` received from the server.""", + ) + + +class LiveClientMessageDict(TypedDict, total=False): + """Messages sent by the client in the API call.""" + + setup: Optional[LiveClientSetupDict] + """Message to be sent by the system when connecting to the API. SDK users should not send this message.""" + + client_content: Optional[LiveClientContentDict] + """Incremental update of the current conversation delivered from the client.""" + + realtime_input: Optional[LiveClientRealtimeInputDict] + """User input that is sent in real time.""" + + tool_response: Optional[LiveClientToolResponseDict] + """Response to a `ToolCallMessage` received from the server.""" + + +LiveClientMessageOrDict = Union[LiveClientMessage, LiveClientMessageDict] + + +class LiveConnectConfig(_common.BaseModel): + """Config class for the session.""" + + generation_config: Optional[GenerationConfig] = Field( + default=None, + description="""The generation configuration for the session.""", + ) + response_modalities: Optional[list[Modality]] = Field( + default=None, + description="""The requested modalities of the response. Represents the set of + modalities that the model can return. Defaults to AUDIO if not specified. + """, + ) + speech_config: Optional[SpeechConfig] = Field( + default=None, + description="""The speech generation configuration. + """, + ) + system_instruction: Optional[Content] = Field( + default=None, + description="""The user provided system instructions for the model. + Note: only text should be used in parts and content in each part will be + in a separate paragraph.""", + ) + tools: Optional[list[Tool]] = Field( + default=None, + description="""A list of `Tools` the model may use to generate the next response. + + A `Tool` is a piece of code that enables the system to interact with + external systems to perform an action, or set of actions, outside of + knowledge and scope of the model.""", + ) + + +class LiveConnectConfigDict(TypedDict, total=False): + """Config class for the session.""" + + generation_config: Optional[GenerationConfigDict] + """The generation configuration for the session.""" + + response_modalities: Optional[list[Modality]] + """The requested modalities of the response. Represents the set of + modalities that the model can return. Defaults to AUDIO if not specified. + """ + + speech_config: Optional[SpeechConfigDict] + """The speech generation configuration. + """ + + system_instruction: Optional[ContentDict] + """The user provided system instructions for the model. + Note: only text should be used in parts and content in each part will be + in a separate paragraph.""" + + tools: Optional[list[ToolDict]] + """A list of `Tools` the model may use to generate the next response. + + A `Tool` is a piece of code that enables the system to interact with + external systems to perform an action, or set of actions, outside of + knowledge and scope of the model.""" + + +LiveConnectConfigOrDict = Union[LiveConnectConfig, LiveConnectConfigDict] diff --git a/.venv/lib/python3.12/site-packages/google/genai/version.py b/.venv/lib/python3.12/site-packages/google/genai/version.py new file mode 100644 index 00000000..f69c999e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/genai/version.py @@ -0,0 +1,16 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +__version__ = '0.6.0' # x-release-please-version diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/__init__.py b/.venv/lib/python3.12/site-packages/google/oauth2/__init__.py new file mode 100644 index 00000000..accae965 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google OAuth 2.0 Library for Python.""" + +import sys +import warnings + + +class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER + """ + Deprecation warning raised when Python 3.7 runtime is detected. + Python 3.7 support will be dropped after January 1, 2024. + """ + + pass + + +# Checks if the current runtime is Python 3.7. +if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER + message = ( + "After January 1, 2024, new releases of this library will drop support " + "for Python 3.7." + ) + warnings.warn(message, Python37DeprecationWarning) diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/_client.py b/.venv/lib/python3.12/site-packages/google/oauth2/_client.py new file mode 100644 index 00000000..5a9fc350 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/_client.py @@ -0,0 +1,508 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OAuth 2.0 client. + +This is a client for interacting with an OAuth 2.0 authorization server's +token endpoint. + +For more information about the token endpoint, see +`Section 3.1 of rfc6749`_ + +.. _Section 3.1 of rfc6749: https://tools.ietf.org/html/rfc6749#section-3.2 +""" + +import datetime +import http.client as http_client +import json +import urllib + +from google.auth import _exponential_backoff +from google.auth import _helpers +from google.auth import credentials +from google.auth import exceptions +from google.auth import jwt +from google.auth import metrics +from google.auth import transport + +_URLENCODED_CONTENT_TYPE = "application/x-www-form-urlencoded" +_JSON_CONTENT_TYPE = "application/json" +_JWT_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:jwt-bearer" +_REFRESH_GRANT_TYPE = "refresh_token" + + +def _handle_error_response(response_data, retryable_error): + """Translates an error response into an exception. + + Args: + response_data (Mapping | str): The decoded response data. + retryable_error Optional[bool]: A boolean indicating if an error is retryable. + Defaults to False. + + Raises: + google.auth.exceptions.RefreshError: The errors contained in response_data. + """ + + retryable_error = retryable_error if retryable_error else False + + if isinstance(response_data, str): + raise exceptions.RefreshError(response_data, retryable=retryable_error) + try: + error_details = "{}: {}".format( + response_data["error"], response_data.get("error_description") + ) + # If no details could be extracted, use the response data. + except (KeyError, ValueError): + error_details = json.dumps(response_data) + + raise exceptions.RefreshError( + error_details, response_data, retryable=retryable_error + ) + + +def _can_retry(status_code, response_data): + """Checks if a request can be retried by inspecting the status code + and response body of the request. + + Args: + status_code (int): The response status code. + response_data (Mapping | str): The decoded response data. + + Returns: + bool: True if the response is retryable. False otherwise. + """ + if status_code in transport.DEFAULT_RETRYABLE_STATUS_CODES: + return True + + try: + # For a failed response, response_body could be a string + error_desc = response_data.get("error_description") or "" + error_code = response_data.get("error") or "" + + if not isinstance(error_code, str) or not isinstance(error_desc, str): + return False + + # Per Oauth 2.0 RFC https://www.rfc-editor.org/rfc/rfc6749.html#section-4.1.2.1 + # This is needed because a redirect will not return a 500 status code. + retryable_error_descriptions = { + "internal_failure", + "server_error", + "temporarily_unavailable", + } + + if any(e in retryable_error_descriptions for e in (error_code, error_desc)): + return True + + except AttributeError: + pass + + return False + + +def _parse_expiry(response_data): + """Parses the expiry field from a response into a datetime. + + Args: + response_data (Mapping): The JSON-parsed response data. + + Returns: + Optional[datetime]: The expiration or ``None`` if no expiration was + specified. + """ + expires_in = response_data.get("expires_in", None) + + if expires_in is not None: + # Some services do not respect the OAUTH2.0 RFC and send expires_in as a + # JSON String. + if isinstance(expires_in, str): + expires_in = int(expires_in) + + return _helpers.utcnow() + datetime.timedelta(seconds=expires_in) + else: + return None + + +def _token_endpoint_request_no_throw( + request, + token_uri, + body, + access_token=None, + use_json=False, + can_retry=True, + headers=None, + **kwargs +): + """Makes a request to the OAuth 2.0 authorization server's token endpoint. + This function doesn't throw on response errors. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorizations server's token endpoint + URI. + body (Mapping[str, str]): The parameters to send in the request body. + access_token (Optional(str)): The access token needed to make the request. + use_json (Optional(bool)): Use urlencoded format or json format for the + content type. The default value is False. + can_retry (bool): Enable or disable request retry behavior. + headers (Optional[Mapping[str, str]]): The headers for the request. + kwargs: Additional arguments passed on to the request method. The + kwargs will be passed to `requests.request` method, see: + https://docs.python-requests.org/en/latest/api/#requests.request. + For example, you can use `cert=("cert_pem_path", "key_pem_path")` + to set up client side SSL certificate, and use + `verify="ca_bundle_path"` to set up the CA certificates for sever + side SSL certificate verification. + + Returns: + Tuple(bool, Mapping[str, str], Optional[bool]): A boolean indicating + if the request is successful, a mapping for the JSON-decoded response + data and in the case of an error a boolean indicating if the error + is retryable. + """ + if use_json: + headers_to_use = {"Content-Type": _JSON_CONTENT_TYPE} + body = json.dumps(body).encode("utf-8") + else: + headers_to_use = {"Content-Type": _URLENCODED_CONTENT_TYPE} + body = urllib.parse.urlencode(body).encode("utf-8") + + if access_token: + headers_to_use["Authorization"] = "Bearer {}".format(access_token) + + if headers: + headers_to_use.update(headers) + + response_data = {} + retryable_error = False + + retries = _exponential_backoff.ExponentialBackoff() + for _ in retries: + response = request( + method="POST", url=token_uri, headers=headers_to_use, body=body, **kwargs + ) + response_body = ( + response.data.decode("utf-8") + if hasattr(response.data, "decode") + else response.data + ) + + try: + # response_body should be a JSON + response_data = json.loads(response_body) + except ValueError: + response_data = response_body + + if response.status == http_client.OK: + return True, response_data, None + + retryable_error = _can_retry( + status_code=response.status, response_data=response_data + ) + + if not can_retry or not retryable_error: + return False, response_data, retryable_error + + return False, response_data, retryable_error + + +def _token_endpoint_request( + request, + token_uri, + body, + access_token=None, + use_json=False, + can_retry=True, + headers=None, + **kwargs +): + """Makes a request to the OAuth 2.0 authorization server's token endpoint. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorizations server's token endpoint + URI. + body (Mapping[str, str]): The parameters to send in the request body. + access_token (Optional(str)): The access token needed to make the request. + use_json (Optional(bool)): Use urlencoded format or json format for the + content type. The default value is False. + can_retry (bool): Enable or disable request retry behavior. + headers (Optional[Mapping[str, str]]): The headers for the request. + kwargs: Additional arguments passed on to the request method. The + kwargs will be passed to `requests.request` method, see: + https://docs.python-requests.org/en/latest/api/#requests.request. + For example, you can use `cert=("cert_pem_path", "key_pem_path")` + to set up client side SSL certificate, and use + `verify="ca_bundle_path"` to set up the CA certificates for sever + side SSL certificate verification. + + Returns: + Mapping[str, str]: The JSON-decoded response data. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + """ + + response_status_ok, response_data, retryable_error = _token_endpoint_request_no_throw( + request, + token_uri, + body, + access_token=access_token, + use_json=use_json, + can_retry=can_retry, + headers=headers, + **kwargs + ) + if not response_status_ok: + _handle_error_response(response_data, retryable_error) + return response_data + + +def jwt_grant(request, token_uri, assertion, can_retry=True): + """Implements the JWT Profile for OAuth 2.0 Authorization Grants. + + For more details, see `rfc7523 section 4`_. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorizations server's token endpoint + URI. + assertion (str): The OAuth 2.0 assertion. + can_retry (bool): Enable or disable request retry behavior. + + Returns: + Tuple[str, Optional[datetime], Mapping[str, str]]: The access token, + expiration, and additional data returned by the token endpoint. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + + .. _rfc7523 section 4: https://tools.ietf.org/html/rfc7523#section-4 + """ + body = {"assertion": assertion, "grant_type": _JWT_GRANT_TYPE} + + response_data = _token_endpoint_request( + request, + token_uri, + body, + can_retry=can_retry, + headers={ + metrics.API_CLIENT_HEADER: metrics.token_request_access_token_sa_assertion() + }, + ) + + try: + access_token = response_data["access_token"] + except KeyError as caught_exc: + new_exc = exceptions.RefreshError( + "No access token in response.", response_data, retryable=False + ) + raise new_exc from caught_exc + + expiry = _parse_expiry(response_data) + + return access_token, expiry, response_data + + +def call_iam_generate_id_token_endpoint( + request, + iam_id_token_endpoint, + signer_email, + audience, + access_token, + universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN, +): + """Call iam.generateIdToken endpoint to get ID token. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + iam_id_token_endpoint (str): The IAM ID token endpoint to use. + signer_email (str): The signer email used to form the IAM + generateIdToken endpoint. + audience (str): The audience for the ID token. + access_token (str): The access token used to call the IAM endpoint. + + Returns: + Tuple[str, datetime]: The ID token and expiration. + """ + body = {"audience": audience, "includeEmail": "true", "useEmailAzp": "true"} + + response_data = _token_endpoint_request( + request, + iam_id_token_endpoint.replace( + credentials.DEFAULT_UNIVERSE_DOMAIN, universe_domain + ).format(signer_email), + body, + access_token=access_token, + use_json=True, + ) + + try: + id_token = response_data["token"] + except KeyError as caught_exc: + new_exc = exceptions.RefreshError( + "No ID token in response.", response_data, retryable=False + ) + raise new_exc from caught_exc + + payload = jwt.decode(id_token, verify=False) + expiry = datetime.datetime.utcfromtimestamp(payload["exp"]) + + return id_token, expiry + + +def id_token_jwt_grant(request, token_uri, assertion, can_retry=True): + """Implements the JWT Profile for OAuth 2.0 Authorization Grants, but + requests an OpenID Connect ID Token instead of an access token. + + This is a variant on the standard JWT Profile that is currently unique + to Google. This was added for the benefit of authenticating to services + that require ID Tokens instead of access tokens or JWT bearer tokens. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorization server's token endpoint + URI. + assertion (str): JWT token signed by a service account. The token's + payload must include a ``target_audience`` claim. + can_retry (bool): Enable or disable request retry behavior. + + Returns: + Tuple[str, Optional[datetime], Mapping[str, str]]: + The (encoded) Open ID Connect ID Token, expiration, and additional + data returned by the endpoint. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + """ + body = {"assertion": assertion, "grant_type": _JWT_GRANT_TYPE} + + response_data = _token_endpoint_request( + request, + token_uri, + body, + can_retry=can_retry, + headers={ + metrics.API_CLIENT_HEADER: metrics.token_request_id_token_sa_assertion() + }, + ) + + try: + id_token = response_data["id_token"] + except KeyError as caught_exc: + new_exc = exceptions.RefreshError( + "No ID token in response.", response_data, retryable=False + ) + raise new_exc from caught_exc + + payload = jwt.decode(id_token, verify=False) + expiry = datetime.datetime.utcfromtimestamp(payload["exp"]) + + return id_token, expiry, response_data + + +def _handle_refresh_grant_response(response_data, refresh_token): + """Extract tokens from refresh grant response. + + Args: + response_data (Mapping[str, str]): Refresh grant response data. + refresh_token (str): Current refresh token. + + Returns: + Tuple[str, str, Optional[datetime], Mapping[str, str]]: The access token, + refresh token, expiration, and additional data returned by the token + endpoint. If response_data doesn't have refresh token, then the current + refresh token will be returned. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + """ + try: + access_token = response_data["access_token"] + except KeyError as caught_exc: + new_exc = exceptions.RefreshError( + "No access token in response.", response_data, retryable=False + ) + raise new_exc from caught_exc + + refresh_token = response_data.get("refresh_token", refresh_token) + expiry = _parse_expiry(response_data) + + return access_token, refresh_token, expiry, response_data + + +def refresh_grant( + request, + token_uri, + refresh_token, + client_id, + client_secret, + scopes=None, + rapt_token=None, + can_retry=True, +): + """Implements the OAuth 2.0 refresh token grant. + + For more details, see `rfc678 section 6`_. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorizations server's token endpoint + URI. + refresh_token (str): The refresh token to use to get a new access + token. + client_id (str): The OAuth 2.0 application's client ID. + client_secret (str): The Oauth 2.0 appliaction's client secret. + scopes (Optional(Sequence[str])): Scopes to request. If present, all + scopes must be authorized for the refresh token. Useful if refresh + token has a wild card scope (e.g. + 'https://www.googleapis.com/auth/any-api'). + rapt_token (Optional(str)): The reauth Proof Token. + can_retry (bool): Enable or disable request retry behavior. + + Returns: + Tuple[str, str, Optional[datetime], Mapping[str, str]]: The access + token, new or current refresh token, expiration, and additional data + returned by the token endpoint. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + + .. _rfc6748 section 6: https://tools.ietf.org/html/rfc6749#section-6 + """ + body = { + "grant_type": _REFRESH_GRANT_TYPE, + "client_id": client_id, + "client_secret": client_secret, + "refresh_token": refresh_token, + } + if scopes: + body["scope"] = " ".join(scopes) + if rapt_token: + body["rapt"] = rapt_token + + response_data = _token_endpoint_request( + request, token_uri, body, can_retry=can_retry + ) + return _handle_refresh_grant_response(response_data, refresh_token) diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/_client_async.py b/.venv/lib/python3.12/site-packages/google/oauth2/_client_async.py new file mode 100644 index 00000000..8867f0a5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/_client_async.py @@ -0,0 +1,286 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OAuth 2.0 async client. + +This is a client for interacting with an OAuth 2.0 authorization server's +token endpoint. + +For more information about the token endpoint, see +`Section 3.1 of rfc6749`_ + +.. _Section 3.1 of rfc6749: https://tools.ietf.org/html/rfc6749#section-3.2 +""" + +import datetime +import http.client as http_client +import json +import urllib + +from google.auth import _exponential_backoff +from google.auth import exceptions +from google.auth import jwt +from google.oauth2 import _client as client + + +async def _token_endpoint_request_no_throw( + request, token_uri, body, access_token=None, use_json=False, can_retry=True +): + """Makes a request to the OAuth 2.0 authorization server's token endpoint. + This function doesn't throw on response errors. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorizations server's token endpoint + URI. + body (Mapping[str, str]): The parameters to send in the request body. + access_token (Optional(str)): The access token needed to make the request. + use_json (Optional(bool)): Use urlencoded format or json format for the + content type. The default value is False. + can_retry (bool): Enable or disable request retry behavior. + + Returns: + Tuple(bool, Mapping[str, str], Optional[bool]): A boolean indicating + if the request is successful, a mapping for the JSON-decoded response + data and in the case of an error a boolean indicating if the error + is retryable. + """ + if use_json: + headers = {"Content-Type": client._JSON_CONTENT_TYPE} + body = json.dumps(body).encode("utf-8") + else: + headers = {"Content-Type": client._URLENCODED_CONTENT_TYPE} + body = urllib.parse.urlencode(body).encode("utf-8") + + if access_token: + headers["Authorization"] = "Bearer {}".format(access_token) + + response_data = {} + retryable_error = False + + retries = _exponential_backoff.ExponentialBackoff() + for _ in retries: + response = await request( + method="POST", url=token_uri, headers=headers, body=body + ) + + # Using data.read() resulted in zlib decompression errors. This may require future investigation. + response_body1 = await response.content() + + response_body = ( + response_body1.decode("utf-8") + if hasattr(response_body1, "decode") + else response_body1 + ) + + try: + response_data = json.loads(response_body) + except ValueError: + response_data = response_body + + if response.status == http_client.OK: + return True, response_data, None + + retryable_error = client._can_retry( + status_code=response.status, response_data=response_data + ) + + if not can_retry or not retryable_error: + return False, response_data, retryable_error + + return False, response_data, retryable_error + + +async def _token_endpoint_request( + request, token_uri, body, access_token=None, use_json=False, can_retry=True +): + """Makes a request to the OAuth 2.0 authorization server's token endpoint. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorizations server's token endpoint + URI. + body (Mapping[str, str]): The parameters to send in the request body. + access_token (Optional(str)): The access token needed to make the request. + use_json (Optional(bool)): Use urlencoded format or json format for the + content type. The default value is False. + can_retry (bool): Enable or disable request retry behavior. + + Returns: + Mapping[str, str]: The JSON-decoded response data. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + """ + + response_status_ok, response_data, retryable_error = await _token_endpoint_request_no_throw( + request, + token_uri, + body, + access_token=access_token, + use_json=use_json, + can_retry=can_retry, + ) + if not response_status_ok: + client._handle_error_response(response_data, retryable_error) + return response_data + + +async def jwt_grant(request, token_uri, assertion, can_retry=True): + """Implements the JWT Profile for OAuth 2.0 Authorization Grants. + + For more details, see `rfc7523 section 4`_. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorizations server's token endpoint + URI. + assertion (str): The OAuth 2.0 assertion. + can_retry (bool): Enable or disable request retry behavior. + + Returns: + Tuple[str, Optional[datetime], Mapping[str, str]]: The access token, + expiration, and additional data returned by the token endpoint. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + + .. _rfc7523 section 4: https://tools.ietf.org/html/rfc7523#section-4 + """ + body = {"assertion": assertion, "grant_type": client._JWT_GRANT_TYPE} + + response_data = await _token_endpoint_request( + request, token_uri, body, can_retry=can_retry + ) + + try: + access_token = response_data["access_token"] + except KeyError as caught_exc: + new_exc = exceptions.RefreshError( + "No access token in response.", response_data, retryable=False + ) + raise new_exc from caught_exc + + expiry = client._parse_expiry(response_data) + + return access_token, expiry, response_data + + +async def id_token_jwt_grant(request, token_uri, assertion, can_retry=True): + """Implements the JWT Profile for OAuth 2.0 Authorization Grants, but + requests an OpenID Connect ID Token instead of an access token. + + This is a variant on the standard JWT Profile that is currently unique + to Google. This was added for the benefit of authenticating to services + that require ID Tokens instead of access tokens or JWT bearer tokens. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorization server's token endpoint + URI. + assertion (str): JWT token signed by a service account. The token's + payload must include a ``target_audience`` claim. + can_retry (bool): Enable or disable request retry behavior. + + Returns: + Tuple[str, Optional[datetime], Mapping[str, str]]: + The (encoded) Open ID Connect ID Token, expiration, and additional + data returned by the endpoint. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + """ + body = {"assertion": assertion, "grant_type": client._JWT_GRANT_TYPE} + + response_data = await _token_endpoint_request( + request, token_uri, body, can_retry=can_retry + ) + + try: + id_token = response_data["id_token"] + except KeyError as caught_exc: + new_exc = exceptions.RefreshError( + "No ID token in response.", response_data, retryable=False + ) + raise new_exc from caught_exc + + payload = jwt.decode(id_token, verify=False) + expiry = datetime.datetime.utcfromtimestamp(payload["exp"]) + + return id_token, expiry, response_data + + +async def refresh_grant( + request, + token_uri, + refresh_token, + client_id, + client_secret, + scopes=None, + rapt_token=None, + can_retry=True, +): + """Implements the OAuth 2.0 refresh token grant. + + For more details, see `rfc678 section 6`_. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorizations server's token endpoint + URI. + refresh_token (str): The refresh token to use to get a new access + token. + client_id (str): The OAuth 2.0 application's client ID. + client_secret (str): The Oauth 2.0 appliaction's client secret. + scopes (Optional(Sequence[str])): Scopes to request. If present, all + scopes must be authorized for the refresh token. Useful if refresh + token has a wild card scope (e.g. + 'https://www.googleapis.com/auth/any-api'). + rapt_token (Optional(str)): The reauth Proof Token. + can_retry (bool): Enable or disable request retry behavior. + + Returns: + Tuple[str, Optional[str], Optional[datetime], Mapping[str, str]]: The + access token, new or current refresh token, expiration, and additional data + returned by the token endpoint. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + + .. _rfc6748 section 6: https://tools.ietf.org/html/rfc6749#section-6 + """ + body = { + "grant_type": client._REFRESH_GRANT_TYPE, + "client_id": client_id, + "client_secret": client_secret, + "refresh_token": refresh_token, + } + if scopes: + body["scope"] = " ".join(scopes) + if rapt_token: + body["rapt"] = rapt_token + + response_data = await _token_endpoint_request( + request, token_uri, body, can_retry=can_retry + ) + return client._handle_refresh_grant_response(response_data, refresh_token) diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/_credentials_async.py b/.venv/lib/python3.12/site-packages/google/oauth2/_credentials_async.py new file mode 100644 index 00000000..b5561aae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/_credentials_async.py @@ -0,0 +1,118 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OAuth 2.0 Async Credentials. + +This module provides credentials based on OAuth 2.0 access and refresh tokens. +These credentials usually access resources on behalf of a user (resource +owner). + +Specifically, this is intended to use access tokens acquired using the +`Authorization Code grant`_ and can refresh those tokens using a +optional `refresh token`_. + +Obtaining the initial access and refresh token is outside of the scope of this +module. Consult `rfc6749 section 4.1`_ for complete details on the +Authorization Code grant flow. + +.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1 +.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6 +.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1 +""" + +from google.auth import _credentials_async as credentials +from google.auth import _helpers +from google.auth import exceptions +from google.oauth2 import _reauth_async as reauth +from google.oauth2 import credentials as oauth2_credentials + + +class Credentials(oauth2_credentials.Credentials): + """Credentials using OAuth 2.0 access and refresh tokens. + + The credentials are considered immutable. If you want to modify the + quota project, use :meth:`with_quota_project` or :: + + credentials = credentials.with_quota_project('myproject-123) + """ + + @_helpers.copy_docstring(credentials.Credentials) + async def refresh(self, request): + if ( + self._refresh_token is None + or self._token_uri is None + or self._client_id is None + or self._client_secret is None + ): + raise exceptions.RefreshError( + "The credentials do not contain the necessary fields need to " + "refresh the access token. You must specify refresh_token, " + "token_uri, client_id, and client_secret." + ) + + ( + access_token, + refresh_token, + expiry, + grant_response, + rapt_token, + ) = await reauth.refresh_grant( + request, + self._token_uri, + self._refresh_token, + self._client_id, + self._client_secret, + scopes=self._scopes, + rapt_token=self._rapt_token, + enable_reauth_refresh=self._enable_reauth_refresh, + ) + + self.token = access_token + self.expiry = expiry + self._refresh_token = refresh_token + self._id_token = grant_response.get("id_token") + self._rapt_token = rapt_token + + if self._scopes and "scope" in grant_response: + requested_scopes = frozenset(self._scopes) + granted_scopes = frozenset(grant_response["scope"].split()) + scopes_requested_but_not_granted = requested_scopes - granted_scopes + if scopes_requested_but_not_granted: + raise exceptions.RefreshError( + "Not all requested scopes were granted by the " + "authorization server, missing scopes {}.".format( + ", ".join(scopes_requested_but_not_granted) + ) + ) + + @_helpers.copy_docstring(credentials.Credentials) + async def before_request(self, request, method, url, headers): + if not self.valid: + await self.refresh(request) + self.apply(headers) + + +class UserAccessTokenCredentials(oauth2_credentials.UserAccessTokenCredentials): + """Access token credentials for user account. + + Obtain the access token for a given user account or the current active + user account with the ``gcloud auth print-access-token`` command. + + Args: + account (Optional[str]): Account to get the access token for. If not + specified, the current active account will be used. + quota_project_id (Optional[str]): The project ID used for quota + and billing. + + """ diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/_id_token_async.py b/.venv/lib/python3.12/site-packages/google/oauth2/_id_token_async.py new file mode 100644 index 00000000..6594e416 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/_id_token_async.py @@ -0,0 +1,285 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google ID Token helpers. + +Provides support for verifying `OpenID Connect ID Tokens`_, especially ones +generated by Google infrastructure. + +To parse and verify an ID Token issued by Google's OAuth 2.0 authorization +server use :func:`verify_oauth2_token`. To verify an ID Token issued by +Firebase, use :func:`verify_firebase_token`. + +A general purpose ID Token verifier is available as :func:`verify_token`. + +Example:: + + from google.oauth2 import _id_token_async + from google.auth.transport import aiohttp_requests + + request = aiohttp_requests.Request() + + id_info = await _id_token_async.verify_oauth2_token( + token, request, 'my-client-id.example.com') + + if id_info['iss'] != 'https://accounts.google.com': + raise ValueError('Wrong issuer.') + + userid = id_info['sub'] + +By default, this will re-fetch certificates for each verification. Because +Google's public keys are only changed infrequently (on the order of once per +day), you may wish to take advantage of caching to reduce latency and the +potential for network errors. This can be accomplished using an external +library like `CacheControl`_ to create a cache-aware +:class:`google.auth.transport.Request`:: + + import cachecontrol + import google.auth.transport.requests + import requests + + session = requests.session() + cached_session = cachecontrol.CacheControl(session) + request = google.auth.transport.requests.Request(session=cached_session) + +.. _OpenID Connect ID Token: + http://openid.net/specs/openid-connect-core-1_0.html#IDToken +.. _CacheControl: https://cachecontrol.readthedocs.io +""" + +import http.client as http_client +import json +import os + +from google.auth import environment_vars +from google.auth import exceptions +from google.auth import jwt +from google.auth.transport import requests +from google.oauth2 import id_token as sync_id_token + + +async def _fetch_certs(request, certs_url): + """Fetches certificates. + + Google-style cerificate endpoints return JSON in the format of + ``{'key id': 'x509 certificate'}``. + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. This must be an aiohttp request. + certs_url (str): The certificate endpoint URL. + + Returns: + Mapping[str, str]: A mapping of public key ID to x.509 certificate + data. + """ + response = await request(certs_url, method="GET") + + if response.status != http_client.OK: + raise exceptions.TransportError( + "Could not fetch certificates at {}".format(certs_url) + ) + + data = await response.content() + + return json.loads(data) + + +async def verify_token( + id_token, + request, + audience=None, + certs_url=sync_id_token._GOOGLE_OAUTH2_CERTS_URL, + clock_skew_in_seconds=0, +): + """Verifies an ID token and returns the decoded token. + + Args: + id_token (Union[str, bytes]): The encoded token. + request (google.auth.transport.Request): The object used to make + HTTP requests. This must be an aiohttp request. + audience (str): The audience that this token is intended for. If None + then the audience is not verified. + certs_url (str): The URL that specifies the certificates to use to + verify the token. This URL should return JSON in the format of + ``{'key id': 'x509 certificate'}``. + clock_skew_in_seconds (int): The clock skew used for `iat` and `exp` + validation. + + Returns: + Mapping[str, Any]: The decoded token. + """ + certs = await _fetch_certs(request, certs_url) + + return jwt.decode( + id_token, + certs=certs, + audience=audience, + clock_skew_in_seconds=clock_skew_in_seconds, + ) + + +async def verify_oauth2_token( + id_token, request, audience=None, clock_skew_in_seconds=0 +): + """Verifies an ID Token issued by Google's OAuth 2.0 authorization server. + + Args: + id_token (Union[str, bytes]): The encoded token. + request (google.auth.transport.Request): The object used to make + HTTP requests. This must be an aiohttp request. + audience (str): The audience that this token is intended for. This is + typically your application's OAuth 2.0 client ID. If None then the + audience is not verified. + clock_skew_in_seconds (int): The clock skew used for `iat` and `exp` + validation. + + Returns: + Mapping[str, Any]: The decoded token. + + Raises: + exceptions.GoogleAuthError: If the issuer is invalid. + """ + idinfo = await verify_token( + id_token, + request, + audience=audience, + certs_url=sync_id_token._GOOGLE_OAUTH2_CERTS_URL, + clock_skew_in_seconds=clock_skew_in_seconds, + ) + + if idinfo["iss"] not in sync_id_token._GOOGLE_ISSUERS: + raise exceptions.GoogleAuthError( + "Wrong issuer. 'iss' should be one of the following: {}".format( + sync_id_token._GOOGLE_ISSUERS + ) + ) + + return idinfo + + +async def verify_firebase_token( + id_token, request, audience=None, clock_skew_in_seconds=0 +): + """Verifies an ID Token issued by Firebase Authentication. + + Args: + id_token (Union[str, bytes]): The encoded token. + request (google.auth.transport.Request): The object used to make + HTTP requests. This must be an aiohttp request. + audience (str): The audience that this token is intended for. This is + typically your Firebase application ID. If None then the audience + is not verified. + clock_skew_in_seconds (int): The clock skew used for `iat` and `exp` + validation. + + Returns: + Mapping[str, Any]: The decoded token. + """ + return await verify_token( + id_token, + request, + audience=audience, + certs_url=sync_id_token._GOOGLE_APIS_CERTS_URL, + clock_skew_in_seconds=clock_skew_in_seconds, + ) + + +async def fetch_id_token(request, audience): + """Fetch the ID Token from the current environment. + + This function acquires ID token from the environment in the following order. + See https://google.aip.dev/auth/4110. + + 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set + to the path of a valid service account JSON file, then ID token is + acquired using this service account credentials. + 2. If the application is running in Compute Engine, App Engine or Cloud Run, + then the ID token are obtained from the metadata server. + 3. If metadata server doesn't exist and no valid service account credentials + are found, :class:`~google.auth.exceptions.DefaultCredentialsError` will + be raised. + + Example:: + + import google.oauth2._id_token_async + import google.auth.transport.aiohttp_requests + + request = google.auth.transport.aiohttp_requests.Request() + target_audience = "https://pubsub.googleapis.com" + + id_token = await google.oauth2._id_token_async.fetch_id_token(request, target_audience) + + Args: + request (google.auth.transport.aiohttp_requests.Request): A callable used to make + HTTP requests. + audience (str): The audience that this ID token is intended for. + + Returns: + str: The ID token. + + Raises: + ~google.auth.exceptions.DefaultCredentialsError: + If metadata server doesn't exist and no valid service account + credentials are found. + """ + # 1. Try to get credentials from the GOOGLE_APPLICATION_CREDENTIALS environment + # variable. + credentials_filename = os.environ.get(environment_vars.CREDENTIALS) + if credentials_filename: + if not ( + os.path.exists(credentials_filename) + and os.path.isfile(credentials_filename) + ): + raise exceptions.DefaultCredentialsError( + "GOOGLE_APPLICATION_CREDENTIALS path is either not found or invalid." + ) + + try: + with open(credentials_filename, "r") as f: + from google.oauth2 import _service_account_async as service_account + + info = json.load(f) + if info.get("type") == "service_account": + credentials = service_account.IDTokenCredentials.from_service_account_info( + info, target_audience=audience + ) + await credentials.refresh(request) + return credentials.token + except ValueError as caught_exc: + new_exc = exceptions.DefaultCredentialsError( + "GOOGLE_APPLICATION_CREDENTIALS is not valid service account credentials.", + caught_exc, + ) + raise new_exc from caught_exc + + # 2. Try to fetch ID token from metada server if it exists. The code works + # for GAE and Cloud Run metadata server as well. + try: + from google.auth import compute_engine + from google.auth.compute_engine import _metadata + + request_new = requests.Request() + if _metadata.ping(request_new): + credentials = compute_engine.IDTokenCredentials( + request_new, audience, use_metadata_identity_endpoint=True + ) + credentials.refresh(request_new) + return credentials.token + except (ImportError, exceptions.TransportError): + pass + + raise exceptions.DefaultCredentialsError( + "Neither metadata server or valid service account credentials are found." + ) diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/_reauth_async.py b/.venv/lib/python3.12/site-packages/google/oauth2/_reauth_async.py new file mode 100644 index 00000000..de3675c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/_reauth_async.py @@ -0,0 +1,328 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A module that provides functions for handling rapt authentication. + +Reauth is a process of obtaining additional authentication (such as password, +security token, etc.) while refreshing OAuth 2.0 credentials for a user. + +Credentials that use the Reauth flow must have the reauth scope, +``https://www.googleapis.com/auth/accounts.reauth``. + +This module provides a high-level function for executing the Reauth process, +:func:`refresh_grant`, and lower-level helpers for doing the individual +steps of the reauth process. + +Those steps are: + +1. Obtaining a list of challenges from the reauth server. +2. Running through each challenge and sending the result back to the reauth + server. +3. Refreshing the access token using the returned rapt token. +""" + +import sys + +from google.auth import exceptions +from google.oauth2 import _client +from google.oauth2 import _client_async +from google.oauth2 import challenges +from google.oauth2 import reauth + + +async def _get_challenges( + request, supported_challenge_types, access_token, requested_scopes=None +): + """Does initial request to reauth API to get the challenges. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. This must be an aiohttp request. + supported_challenge_types (Sequence[str]): list of challenge names + supported by the manager. + access_token (str): Access token with reauth scopes. + requested_scopes (Optional(Sequence[str])): Authorized scopes for the credentials. + + Returns: + dict: The response from the reauth API. + """ + body = {"supportedChallengeTypes": supported_challenge_types} + if requested_scopes: + body["oauthScopesForDomainPolicyLookup"] = requested_scopes + + return await _client_async._token_endpoint_request( + request, + reauth._REAUTH_API + ":start", + body, + access_token=access_token, + use_json=True, + ) + + +async def _send_challenge_result( + request, session_id, challenge_id, client_input, access_token +): + """Attempt to refresh access token by sending next challenge result. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. This must be an aiohttp request. + session_id (str): session id returned by the initial reauth call. + challenge_id (str): challenge id returned by the initial reauth call. + client_input: dict with a challenge-specific client input. For example: + ``{'credential': password}`` for password challenge. + access_token (str): Access token with reauth scopes. + + Returns: + dict: The response from the reauth API. + """ + body = { + "sessionId": session_id, + "challengeId": challenge_id, + "action": "RESPOND", + "proposalResponse": client_input, + } + + return await _client_async._token_endpoint_request( + request, + reauth._REAUTH_API + "/{}:continue".format(session_id), + body, + access_token=access_token, + use_json=True, + ) + + +async def _run_next_challenge(msg, request, access_token): + """Get the next challenge from msg and run it. + + Args: + msg (dict): Reauth API response body (either from the initial request to + https://reauth.googleapis.com/v2/sessions:start or from sending the + previous challenge response to + https://reauth.googleapis.com/v2/sessions/id:continue) + request (google.auth.transport.Request): A callable used to make + HTTP requests. This must be an aiohttp request. + access_token (str): reauth access token + + Returns: + dict: The response from the reauth API. + + Raises: + google.auth.exceptions.ReauthError: if reauth failed. + """ + for challenge in msg["challenges"]: + if challenge["status"] != "READY": + # Skip non-activated challenges. + continue + c = challenges.AVAILABLE_CHALLENGES.get(challenge["challengeType"], None) + if not c: + raise exceptions.ReauthFailError( + "Unsupported challenge type {0}. Supported types: {1}".format( + challenge["challengeType"], + ",".join(list(challenges.AVAILABLE_CHALLENGES.keys())), + ) + ) + if not c.is_locally_eligible: + raise exceptions.ReauthFailError( + "Challenge {0} is not locally eligible".format( + challenge["challengeType"] + ) + ) + client_input = c.obtain_challenge_input(challenge) + if not client_input: + return None + return await _send_challenge_result( + request, + msg["sessionId"], + challenge["challengeId"], + client_input, + access_token, + ) + return None + + +async def _obtain_rapt(request, access_token, requested_scopes): + """Given an http request method and reauth access token, get rapt token. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. This must be an aiohttp request. + access_token (str): reauth access token + requested_scopes (Sequence[str]): scopes required by the client application + + Returns: + str: The rapt token. + + Raises: + google.auth.exceptions.ReauthError: if reauth failed + """ + msg = await _get_challenges( + request, + list(challenges.AVAILABLE_CHALLENGES.keys()), + access_token, + requested_scopes, + ) + + if msg["status"] == reauth._AUTHENTICATED: + return msg["encodedProofOfReauthToken"] + + for _ in range(0, reauth.RUN_CHALLENGE_RETRY_LIMIT): + if not ( + msg["status"] == reauth._CHALLENGE_REQUIRED + or msg["status"] == reauth._CHALLENGE_PENDING + ): + raise exceptions.ReauthFailError( + "Reauthentication challenge failed due to API error: {}".format( + msg["status"] + ) + ) + + if not reauth.is_interactive(): + raise exceptions.ReauthFailError( + "Reauthentication challenge could not be answered because you are not" + " in an interactive session." + ) + + msg = await _run_next_challenge(msg, request, access_token) + + if msg["status"] == reauth._AUTHENTICATED: + return msg["encodedProofOfReauthToken"] + + # If we got here it means we didn't get authenticated. + raise exceptions.ReauthFailError("Failed to obtain rapt token.") + + +async def get_rapt_token( + request, client_id, client_secret, refresh_token, token_uri, scopes=None +): + """Given an http request method and refresh_token, get rapt token. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. This must be an aiohttp request. + client_id (str): client id to get access token for reauth scope. + client_secret (str): client secret for the client_id + refresh_token (str): refresh token to refresh access token + token_uri (str): uri to refresh access token + scopes (Optional(Sequence[str])): scopes required by the client application + + Returns: + str: The rapt token. + Raises: + google.auth.exceptions.RefreshError: If reauth failed. + """ + sys.stderr.write("Reauthentication required.\n") + + # Get access token for reauth. + access_token, _, _, _ = await _client_async.refresh_grant( + request=request, + client_id=client_id, + client_secret=client_secret, + refresh_token=refresh_token, + token_uri=token_uri, + scopes=[reauth._REAUTH_SCOPE], + ) + + # Get rapt token from reauth API. + rapt_token = await _obtain_rapt(request, access_token, requested_scopes=scopes) + + return rapt_token + + +async def refresh_grant( + request, + token_uri, + refresh_token, + client_id, + client_secret, + scopes=None, + rapt_token=None, + enable_reauth_refresh=False, +): + """Implements the reauthentication flow. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. This must be an aiohttp request. + token_uri (str): The OAuth 2.0 authorizations server's token endpoint + URI. + refresh_token (str): The refresh token to use to get a new access + token. + client_id (str): The OAuth 2.0 application's client ID. + client_secret (str): The Oauth 2.0 appliaction's client secret. + scopes (Optional(Sequence[str])): Scopes to request. If present, all + scopes must be authorized for the refresh token. Useful if refresh + token has a wild card scope (e.g. + 'https://www.googleapis.com/auth/any-api'). + rapt_token (Optional(str)): The rapt token for reauth. + enable_reauth_refresh (Optional[bool]): Whether reauth refresh flow + should be used. The default value is False. This option is for + gcloud only, other users should use the default value. + + Returns: + Tuple[str, Optional[str], Optional[datetime], Mapping[str, str], str]: The + access token, new refresh token, expiration, the additional data + returned by the token endpoint, and the rapt token. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + """ + body = { + "grant_type": _client._REFRESH_GRANT_TYPE, + "client_id": client_id, + "client_secret": client_secret, + "refresh_token": refresh_token, + } + if scopes: + body["scope"] = " ".join(scopes) + if rapt_token: + body["rapt"] = rapt_token + + response_status_ok, response_data, retryable_error = await _client_async._token_endpoint_request_no_throw( + request, token_uri, body + ) + if ( + not response_status_ok + and response_data.get("error") == reauth._REAUTH_NEEDED_ERROR + and ( + response_data.get("error_subtype") + == reauth._REAUTH_NEEDED_ERROR_INVALID_RAPT + or response_data.get("error_subtype") + == reauth._REAUTH_NEEDED_ERROR_RAPT_REQUIRED + ) + ): + if not enable_reauth_refresh: + raise exceptions.RefreshError( + "Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate." + ) + + rapt_token = await get_rapt_token( + request, client_id, client_secret, refresh_token, token_uri, scopes=scopes + ) + body["rapt"] = rapt_token + ( + response_status_ok, + response_data, + retryable_error, + ) = await _client_async._token_endpoint_request_no_throw( + request, token_uri, body + ) + + if not response_status_ok: + _client._handle_error_response(response_data, retryable_error) + refresh_response = _client._handle_refresh_grant_response( + response_data, refresh_token + ) + return refresh_response + (rapt_token,) diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/_service_account_async.py b/.venv/lib/python3.12/site-packages/google/oauth2/_service_account_async.py new file mode 100644 index 00000000..cfd315a7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/_service_account_async.py @@ -0,0 +1,132 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Service Accounts: JSON Web Token (JWT) Profile for OAuth 2.0 + +NOTE: This file adds asynchronous refresh methods to both credentials +classes, and therefore async/await syntax is required when calling this +method when using service account credentials with asynchronous functionality. +Otherwise, all other methods are inherited from the regular service account +credentials file google.oauth2.service_account + +""" + +from google.auth import _credentials_async as credentials_async +from google.auth import _helpers +from google.oauth2 import _client_async +from google.oauth2 import service_account + + +class Credentials( + service_account.Credentials, credentials_async.Scoped, credentials_async.Credentials +): + """Service account credentials + + Usually, you'll create these credentials with one of the helper + constructors. To create credentials using a Google service account + private key JSON file:: + + credentials = _service_account_async.Credentials.from_service_account_file( + 'service-account.json') + + Or if you already have the service account file loaded:: + + service_account_info = json.load(open('service_account.json')) + credentials = _service_account_async.Credentials.from_service_account_info( + service_account_info) + + Both helper methods pass on arguments to the constructor, so you can + specify additional scopes and a subject if necessary:: + + credentials = _service_account_async.Credentials.from_service_account_file( + 'service-account.json', + scopes=['email'], + subject='user@example.com') + + The credentials are considered immutable. If you want to modify the scopes + or the subject used for delegation, use :meth:`with_scopes` or + :meth:`with_subject`:: + + scoped_credentials = credentials.with_scopes(['email']) + delegated_credentials = credentials.with_subject(subject) + + To add a quota project, use :meth:`with_quota_project`:: + + credentials = credentials.with_quota_project('myproject-123') + """ + + @_helpers.copy_docstring(credentials_async.Credentials) + async def refresh(self, request): + assertion = self._make_authorization_grant_assertion() + access_token, expiry, _ = await _client_async.jwt_grant( + request, self._token_uri, assertion + ) + self.token = access_token + self.expiry = expiry + + +class IDTokenCredentials( + service_account.IDTokenCredentials, + credentials_async.Signing, + credentials_async.Credentials, +): + """Open ID Connect ID Token-based service account credentials. + + These credentials are largely similar to :class:`.Credentials`, but instead + of using an OAuth 2.0 Access Token as the bearer token, they use an Open + ID Connect ID Token as the bearer token. These credentials are useful when + communicating to services that require ID Tokens and can not accept access + tokens. + + Usually, you'll create these credentials with one of the helper + constructors. To create credentials using a Google service account + private key JSON file:: + + credentials = ( + _service_account_async.IDTokenCredentials.from_service_account_file( + 'service-account.json')) + + Or if you already have the service account file loaded:: + + service_account_info = json.load(open('service_account.json')) + credentials = ( + _service_account_async.IDTokenCredentials.from_service_account_info( + service_account_info)) + + Both helper methods pass on arguments to the constructor, so you can + specify additional scopes and a subject if necessary:: + + credentials = ( + _service_account_async.IDTokenCredentials.from_service_account_file( + 'service-account.json', + scopes=['email'], + subject='user@example.com')) + + The credentials are considered immutable. If you want to modify the scopes + or the subject used for delegation, use :meth:`with_scopes` or + :meth:`with_subject`:: + + scoped_credentials = credentials.with_scopes(['email']) + delegated_credentials = credentials.with_subject(subject) + + """ + + @_helpers.copy_docstring(credentials_async.Credentials) + async def refresh(self, request): + assertion = self._make_authorization_grant_assertion() + access_token, expiry, _ = await _client_async.id_token_jwt_grant( + request, self._token_uri, assertion + ) + self.token = access_token + self.expiry = expiry diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/challenges.py b/.venv/lib/python3.12/site-packages/google/oauth2/challenges.py new file mode 100644 index 00000000..6468498b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/challenges.py @@ -0,0 +1,281 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Challenges for reauthentication. +""" + +import abc +import base64 +import getpass +import sys + +from google.auth import _helpers +from google.auth import exceptions +from google.oauth2 import webauthn_handler_factory +from google.oauth2.webauthn_types import ( + AuthenticationExtensionsClientInputs, + GetRequest, + PublicKeyCredentialDescriptor, +) + + +REAUTH_ORIGIN = "https://accounts.google.com" +SAML_CHALLENGE_MESSAGE = ( + "Please run `gcloud auth login` to complete reauthentication with SAML." +) +WEBAUTHN_TIMEOUT_MS = 120000 # Two minute timeout + + +def get_user_password(text): + """Get password from user. + + Override this function with a different logic if you are using this library + outside a CLI. + + Args: + text (str): message for the password prompt. + + Returns: + str: password string. + """ + return getpass.getpass(text) + + +class ReauthChallenge(metaclass=abc.ABCMeta): + """Base class for reauth challenges.""" + + @property + @abc.abstractmethod + def name(self): # pragma: NO COVER + """Returns the name of the challenge.""" + raise NotImplementedError("name property must be implemented") + + @property + @abc.abstractmethod + def is_locally_eligible(self): # pragma: NO COVER + """Returns true if a challenge is supported locally on this machine.""" + raise NotImplementedError("is_locally_eligible property must be implemented") + + @abc.abstractmethod + def obtain_challenge_input(self, metadata): # pragma: NO COVER + """Performs logic required to obtain credentials and returns it. + + Args: + metadata (Mapping): challenge metadata returned in the 'challenges' field in + the initial reauth request. Includes the 'challengeType' field + and other challenge-specific fields. + + Returns: + response that will be send to the reauth service as the content of + the 'proposalResponse' field in the request body. Usually a dict + with the keys specific to the challenge. For example, + ``{'credential': password}`` for password challenge. + """ + raise NotImplementedError("obtain_challenge_input method must be implemented") + + +class PasswordChallenge(ReauthChallenge): + """Challenge that asks for user's password.""" + + @property + def name(self): + return "PASSWORD" + + @property + def is_locally_eligible(self): + return True + + @_helpers.copy_docstring(ReauthChallenge) + def obtain_challenge_input(self, unused_metadata): + passwd = get_user_password("Please enter your password:") + if not passwd: + passwd = " " # avoid the server crashing in case of no password :D + return {"credential": passwd} + + +class SecurityKeyChallenge(ReauthChallenge): + """Challenge that asks for user's security key touch.""" + + @property + def name(self): + return "SECURITY_KEY" + + @property + def is_locally_eligible(self): + return True + + @_helpers.copy_docstring(ReauthChallenge) + def obtain_challenge_input(self, metadata): + # Check if there is an available Webauthn Handler, if not use pyu2f + try: + factory = webauthn_handler_factory.WebauthnHandlerFactory() + webauthn_handler = factory.get_handler() + if webauthn_handler is not None: + sys.stderr.write("Please insert and touch your security key\n") + return self._obtain_challenge_input_webauthn(metadata, webauthn_handler) + except Exception: + # Attempt pyu2f if exception in webauthn flow + pass + + try: + import pyu2f.convenience.authenticator # type: ignore + import pyu2f.errors # type: ignore + import pyu2f.model # type: ignore + except ImportError: + raise exceptions.ReauthFailError( + "pyu2f dependency is required to use Security key reauth feature. " + "It can be installed via `pip install pyu2f` or `pip install google-auth[reauth]`." + ) + sk = metadata["securityKey"] + challenges = sk["challenges"] + # Read both 'applicationId' and 'relyingPartyId', if they are the same, use + # applicationId, if they are different, use relyingPartyId first and retry + # with applicationId + application_id = sk["applicationId"] + relying_party_id = sk["relyingPartyId"] + + if application_id != relying_party_id: + application_parameters = [relying_party_id, application_id] + else: + application_parameters = [application_id] + + challenge_data = [] + for c in challenges: + kh = c["keyHandle"].encode("ascii") + key = pyu2f.model.RegisteredKey(bytearray(base64.urlsafe_b64decode(kh))) + challenge = c["challenge"].encode("ascii") + challenge = base64.urlsafe_b64decode(challenge) + challenge_data.append({"key": key, "challenge": challenge}) + + # Track number of tries to suppress error message until all application_parameters + # are tried. + tries = 0 + for app_id in application_parameters: + try: + tries += 1 + api = pyu2f.convenience.authenticator.CreateCompositeAuthenticator( + REAUTH_ORIGIN + ) + response = api.Authenticate( + app_id, challenge_data, print_callback=sys.stderr.write + ) + return {"securityKey": response} + except pyu2f.errors.U2FError as e: + if e.code == pyu2f.errors.U2FError.DEVICE_INELIGIBLE: + # Only show error if all app_ids have been tried + if tries == len(application_parameters): + sys.stderr.write("Ineligible security key.\n") + return None + continue + if e.code == pyu2f.errors.U2FError.TIMEOUT: + sys.stderr.write( + "Timed out while waiting for security key touch.\n" + ) + else: + raise e + except pyu2f.errors.PluginError as e: + sys.stderr.write("Plugin error: {}.\n".format(e)) + continue + except pyu2f.errors.NoDeviceFoundError: + sys.stderr.write("No security key found.\n") + return None + + def _obtain_challenge_input_webauthn(self, metadata, webauthn_handler): + sk = metadata.get("securityKey") + if sk is None: + raise exceptions.InvalidValue("securityKey is None") + challenges = sk.get("challenges") + application_id = sk.get("applicationId") + relying_party_id = sk.get("relyingPartyId") + if challenges is None or len(challenges) < 1: + raise exceptions.InvalidValue("challenges is None or empty") + if application_id is None: + raise exceptions.InvalidValue("application_id is None") + if relying_party_id is None: + raise exceptions.InvalidValue("relying_party_id is None") + + allow_credentials = [] + for challenge in challenges: + kh = challenge.get("keyHandle") + if kh is None: + raise exceptions.InvalidValue("keyHandle is None") + key_handle = self._unpadded_urlsafe_b64recode(kh) + allow_credentials.append(PublicKeyCredentialDescriptor(id=key_handle)) + + extension = AuthenticationExtensionsClientInputs(appid=application_id) + + challenge = challenges[0].get("challenge") + if challenge is None: + raise exceptions.InvalidValue("challenge is None") + + get_request = GetRequest( + origin=REAUTH_ORIGIN, + rpid=relying_party_id, + challenge=self._unpadded_urlsafe_b64recode(challenge), + timeout_ms=WEBAUTHN_TIMEOUT_MS, + allow_credentials=allow_credentials, + user_verification="required", + extensions=extension, + ) + + try: + get_response = webauthn_handler.get(get_request) + except Exception as e: + sys.stderr.write("Webauthn Error: {}.\n".format(e)) + raise e + + response = { + "clientData": get_response.response.client_data_json, + "authenticatorData": get_response.response.authenticator_data, + "signatureData": get_response.response.signature, + "applicationId": application_id, + "keyHandle": get_response.id, + "securityKeyReplyType": 2, + } + return {"securityKey": response} + + def _unpadded_urlsafe_b64recode(self, s): + """Converts standard b64 encoded string to url safe b64 encoded string + with no padding.""" + b = base64.urlsafe_b64decode(s) + return base64.urlsafe_b64encode(b).decode().rstrip("=") + + +class SamlChallenge(ReauthChallenge): + """Challenge that asks the users to browse to their ID Providers. + + Currently SAML challenge is not supported. When obtaining the challenge + input, exception will be raised to instruct the users to run + `gcloud auth login` for reauthentication. + """ + + @property + def name(self): + return "SAML" + + @property + def is_locally_eligible(self): + return True + + def obtain_challenge_input(self, metadata): + # Magic Arch has not fully supported returning a proper dedirect URL + # for programmatic SAML users today. So we error our here and request + # users to use gcloud to complete a login. + raise exceptions.ReauthSamlChallengeFailError(SAML_CHALLENGE_MESSAGE) + + +AVAILABLE_CHALLENGES = { + challenge.name: challenge + for challenge in [SecurityKeyChallenge(), PasswordChallenge(), SamlChallenge()] +} diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/credentials.py b/.venv/lib/python3.12/site-packages/google/oauth2/credentials.py new file mode 100644 index 00000000..6e158089 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/credentials.py @@ -0,0 +1,614 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OAuth 2.0 Credentials. + +This module provides credentials based on OAuth 2.0 access and refresh tokens. +These credentials usually access resources on behalf of a user (resource +owner). + +Specifically, this is intended to use access tokens acquired using the +`Authorization Code grant`_ and can refresh those tokens using a +optional `refresh token`_. + +Obtaining the initial access and refresh token is outside of the scope of this +module. Consult `rfc6749 section 4.1`_ for complete details on the +Authorization Code grant flow. + +.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1 +.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6 +.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1 +""" + +from datetime import datetime +import io +import json +import logging +import warnings + +from google.auth import _cloud_sdk +from google.auth import _helpers +from google.auth import credentials +from google.auth import exceptions +from google.auth import metrics +from google.oauth2 import reauth + +_LOGGER = logging.getLogger(__name__) + + +# The Google OAuth 2.0 token endpoint. Used for authorized user credentials. +_GOOGLE_OAUTH2_TOKEN_ENDPOINT = "https://oauth2.googleapis.com/token" + +# The Google OAuth 2.0 token info endpoint. Used for getting token info JSON from access tokens. +_GOOGLE_OAUTH2_TOKEN_INFO_ENDPOINT = "https://oauth2.googleapis.com/tokeninfo" + + +class Credentials(credentials.ReadOnlyScoped, credentials.CredentialsWithQuotaProject): + """Credentials using OAuth 2.0 access and refresh tokens. + + The credentials are considered immutable except the tokens and the token + expiry, which are updated after refresh. If you want to modify the quota + project, use :meth:`with_quota_project` or :: + + credentials = credentials.with_quota_project('myproject-123') + + Reauth is disabled by default. To enable reauth, set the + `enable_reauth_refresh` parameter to True in the constructor. Note that + reauth feature is intended for gcloud to use only. + If reauth is enabled, `pyu2f` dependency has to be installed in order to use security + key reauth feature. Dependency can be installed via `pip install pyu2f` or `pip install + google-auth[reauth]`. + """ + + def __init__( + self, + token, + refresh_token=None, + id_token=None, + token_uri=None, + client_id=None, + client_secret=None, + scopes=None, + default_scopes=None, + quota_project_id=None, + expiry=None, + rapt_token=None, + refresh_handler=None, + enable_reauth_refresh=False, + granted_scopes=None, + trust_boundary=None, + universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN, + account=None, + ): + """ + Args: + token (Optional(str)): The OAuth 2.0 access token. Can be None + if refresh information is provided. + refresh_token (str): The OAuth 2.0 refresh token. If specified, + credentials can be refreshed. + id_token (str): The Open ID Connect ID Token. + token_uri (str): The OAuth 2.0 authorization server's token + endpoint URI. Must be specified for refresh, can be left as + None if the token can not be refreshed. + client_id (str): The OAuth 2.0 client ID. Must be specified for + refresh, can be left as None if the token can not be refreshed. + client_secret(str): The OAuth 2.0 client secret. Must be specified + for refresh, can be left as None if the token can not be + refreshed. + scopes (Sequence[str]): The scopes used to obtain authorization. + This parameter is used by :meth:`has_scopes`. OAuth 2.0 + credentials can not request additional scopes after + authorization. The scopes must be derivable from the refresh + token if refresh information is provided (e.g. The refresh + token scopes are a superset of this or contain a wild card + scope like 'https://www.googleapis.com/auth/any-api'). + default_scopes (Sequence[str]): Default scopes passed by a + Google client library. Use 'scopes' for user-defined scopes. + quota_project_id (Optional[str]): The project ID used for quota and billing. + This project may be different from the project used to + create the credentials. + rapt_token (Optional[str]): The reauth Proof Token. + refresh_handler (Optional[Callable[[google.auth.transport.Request, Sequence[str]], [str, datetime]]]): + A callable which takes in the HTTP request callable and the list of + OAuth scopes and when called returns an access token string for the + requested scopes and its expiry datetime. This is useful when no + refresh tokens are provided and tokens are obtained by calling + some external process on demand. It is particularly useful for + retrieving downscoped tokens from a token broker. + enable_reauth_refresh (Optional[bool]): Whether reauth refresh flow + should be used. This flag is for gcloud to use only. + granted_scopes (Optional[Sequence[str]]): The scopes that were consented/granted by the user. + This could be different from the requested scopes and it could be empty if granted + and requested scopes were same. + trust_boundary (str): String representation of trust boundary meta. + universe_domain (Optional[str]): The universe domain. The default + universe domain is googleapis.com. + account (Optional[str]): The account associated with the credential. + """ + super(Credentials, self).__init__() + self.token = token + self.expiry = expiry + self._refresh_token = refresh_token + self._id_token = id_token + self._scopes = scopes + self._default_scopes = default_scopes + self._granted_scopes = granted_scopes + self._token_uri = token_uri + self._client_id = client_id + self._client_secret = client_secret + self._quota_project_id = quota_project_id + self._rapt_token = rapt_token + self.refresh_handler = refresh_handler + self._enable_reauth_refresh = enable_reauth_refresh + self._trust_boundary = trust_boundary + self._universe_domain = universe_domain or credentials.DEFAULT_UNIVERSE_DOMAIN + self._account = account or "" + self._cred_file_path = None + + def __getstate__(self): + """A __getstate__ method must exist for the __setstate__ to be called + This is identical to the default implementation. + See https://docs.python.org/3.7/library/pickle.html#object.__setstate__ + """ + state_dict = self.__dict__.copy() + # Remove _refresh_handler function as there are limitations pickling and + # unpickling certain callables (lambda, functools.partial instances) + # because they need to be importable. + # Instead, the refresh_handler setter should be used to repopulate this. + if "_refresh_handler" in state_dict: + del state_dict["_refresh_handler"] + + if "_refresh_worker" in state_dict: + del state_dict["_refresh_worker"] + return state_dict + + def __setstate__(self, d): + """Credentials pickled with older versions of the class do not have + all the attributes.""" + self.token = d.get("token") + self.expiry = d.get("expiry") + self._refresh_token = d.get("_refresh_token") + self._id_token = d.get("_id_token") + self._scopes = d.get("_scopes") + self._default_scopes = d.get("_default_scopes") + self._granted_scopes = d.get("_granted_scopes") + self._token_uri = d.get("_token_uri") + self._client_id = d.get("_client_id") + self._client_secret = d.get("_client_secret") + self._quota_project_id = d.get("_quota_project_id") + self._rapt_token = d.get("_rapt_token") + self._enable_reauth_refresh = d.get("_enable_reauth_refresh") + self._trust_boundary = d.get("_trust_boundary") + self._universe_domain = ( + d.get("_universe_domain") or credentials.DEFAULT_UNIVERSE_DOMAIN + ) + self._cred_file_path = d.get("_cred_file_path") + # The refresh_handler setter should be used to repopulate this. + self._refresh_handler = None + self._refresh_worker = None + self._use_non_blocking_refresh = d.get("_use_non_blocking_refresh", False) + self._account = d.get("_account", "") + + @property + def refresh_token(self): + """Optional[str]: The OAuth 2.0 refresh token.""" + return self._refresh_token + + @property + def scopes(self): + """Optional[str]: The OAuth 2.0 permission scopes.""" + return self._scopes + + @property + def granted_scopes(self): + """Optional[Sequence[str]]: The OAuth 2.0 permission scopes that were granted by the user.""" + return self._granted_scopes + + @property + def token_uri(self): + """Optional[str]: The OAuth 2.0 authorization server's token endpoint + URI.""" + return self._token_uri + + @property + def id_token(self): + """Optional[str]: The Open ID Connect ID Token. + + Depending on the authorization server and the scopes requested, this + may be populated when credentials are obtained and updated when + :meth:`refresh` is called. This token is a JWT. It can be verified + and decoded using :func:`google.oauth2.id_token.verify_oauth2_token`. + """ + return self._id_token + + @property + def client_id(self): + """Optional[str]: The OAuth 2.0 client ID.""" + return self._client_id + + @property + def client_secret(self): + """Optional[str]: The OAuth 2.0 client secret.""" + return self._client_secret + + @property + def requires_scopes(self): + """False: OAuth 2.0 credentials have their scopes set when + the initial token is requested and can not be changed.""" + return False + + @property + def rapt_token(self): + """Optional[str]: The reauth Proof Token.""" + return self._rapt_token + + @property + def refresh_handler(self): + """Returns the refresh handler if available. + + Returns: + Optional[Callable[[google.auth.transport.Request, Sequence[str]], [str, datetime]]]: + The current refresh handler. + """ + return self._refresh_handler + + @refresh_handler.setter + def refresh_handler(self, value): + """Updates the current refresh handler. + + Args: + value (Optional[Callable[[google.auth.transport.Request, Sequence[str]], [str, datetime]]]): + The updated value of the refresh handler. + + Raises: + TypeError: If the value is not a callable or None. + """ + if not callable(value) and value is not None: + raise TypeError("The provided refresh_handler is not a callable or None.") + self._refresh_handler = value + + @property + def account(self): + """str: The user account associated with the credential. If the account is unknown an empty string is returned.""" + return self._account + + def _make_copy(self): + cred = self.__class__( + self.token, + refresh_token=self.refresh_token, + id_token=self.id_token, + token_uri=self.token_uri, + client_id=self.client_id, + client_secret=self.client_secret, + scopes=self.scopes, + default_scopes=self.default_scopes, + granted_scopes=self.granted_scopes, + quota_project_id=self.quota_project_id, + rapt_token=self.rapt_token, + enable_reauth_refresh=self._enable_reauth_refresh, + trust_boundary=self._trust_boundary, + universe_domain=self._universe_domain, + account=self._account, + ) + cred._cred_file_path = self._cred_file_path + return cred + + @_helpers.copy_docstring(credentials.Credentials) + def get_cred_info(self): + if self._cred_file_path: + cred_info = { + "credential_source": self._cred_file_path, + "credential_type": "user credentials", + } + if self.account: + cred_info["principal"] = self.account + return cred_info + return None + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + cred = self._make_copy() + cred._quota_project_id = quota_project_id + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithTokenUri) + def with_token_uri(self, token_uri): + cred = self._make_copy() + cred._token_uri = token_uri + return cred + + def with_account(self, account): + """Returns a copy of these credentials with a modified account. + + Args: + account (str): The account to set + + Returns: + google.oauth2.credentials.Credentials: A new credentials instance. + """ + cred = self._make_copy() + cred._account = account + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithUniverseDomain) + def with_universe_domain(self, universe_domain): + cred = self._make_copy() + cred._universe_domain = universe_domain + return cred + + def _metric_header_for_usage(self): + return metrics.CRED_TYPE_USER + + @_helpers.copy_docstring(credentials.Credentials) + def refresh(self, request): + if self._universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN: + raise exceptions.RefreshError( + "User credential refresh is only supported in the default " + "googleapis.com universe domain, but the current universe " + "domain is {}. If you created the credential with an access " + "token, it's likely that the provided token is expired now, " + "please update your code with a valid token.".format( + self._universe_domain + ) + ) + + scopes = self._scopes if self._scopes is not None else self._default_scopes + # Use refresh handler if available and no refresh token is + # available. This is useful in general when tokens are obtained by calling + # some external process on demand. It is particularly useful for retrieving + # downscoped tokens from a token broker. + if self._refresh_token is None and self.refresh_handler: + token, expiry = self.refresh_handler(request, scopes=scopes) + # Validate returned data. + if not isinstance(token, str): + raise exceptions.RefreshError( + "The refresh_handler returned token is not a string." + ) + if not isinstance(expiry, datetime): + raise exceptions.RefreshError( + "The refresh_handler returned expiry is not a datetime object." + ) + if _helpers.utcnow() >= expiry - _helpers.REFRESH_THRESHOLD: + raise exceptions.RefreshError( + "The credentials returned by the refresh_handler are " + "already expired." + ) + self.token = token + self.expiry = expiry + return + + if ( + self._refresh_token is None + or self._token_uri is None + or self._client_id is None + or self._client_secret is None + ): + raise exceptions.RefreshError( + "The credentials do not contain the necessary fields need to " + "refresh the access token. You must specify refresh_token, " + "token_uri, client_id, and client_secret." + ) + + ( + access_token, + refresh_token, + expiry, + grant_response, + rapt_token, + ) = reauth.refresh_grant( + request, + self._token_uri, + self._refresh_token, + self._client_id, + self._client_secret, + scopes=scopes, + rapt_token=self._rapt_token, + enable_reauth_refresh=self._enable_reauth_refresh, + ) + + self.token = access_token + self.expiry = expiry + self._refresh_token = refresh_token + self._id_token = grant_response.get("id_token") + self._rapt_token = rapt_token + + if scopes and "scope" in grant_response: + requested_scopes = frozenset(scopes) + self._granted_scopes = grant_response["scope"].split() + granted_scopes = frozenset(self._granted_scopes) + scopes_requested_but_not_granted = requested_scopes - granted_scopes + if scopes_requested_but_not_granted: + # User might be presented with unbundled scopes at the time of + # consent. So it is a valid scenario to not have all the requested + # scopes as part of granted scopes but log a warning in case the + # developer wants to debug the scenario. + _LOGGER.warning( + "Not all requested scopes were granted by the " + "authorization server, missing scopes {}.".format( + ", ".join(scopes_requested_but_not_granted) + ) + ) + + @classmethod + def from_authorized_user_info(cls, info, scopes=None): + """Creates a Credentials instance from parsed authorized user info. + + Args: + info (Mapping[str, str]): The authorized user info in Google + format. + scopes (Sequence[str]): Optional list of scopes to include in the + credentials. + + Returns: + google.oauth2.credentials.Credentials: The constructed + credentials. + + Raises: + ValueError: If the info is not in the expected format. + """ + keys_needed = set(("refresh_token", "client_id", "client_secret")) + missing = keys_needed.difference(info.keys()) + + if missing: + raise ValueError( + "Authorized user info was not in the expected format, missing " + "fields {}.".format(", ".join(missing)) + ) + + # access token expiry (datetime obj); auto-expire if not saved + expiry = info.get("expiry") + if expiry: + expiry = datetime.strptime( + expiry.rstrip("Z").split(".")[0], "%Y-%m-%dT%H:%M:%S" + ) + else: + expiry = _helpers.utcnow() - _helpers.REFRESH_THRESHOLD + + # process scopes, which needs to be a seq + if scopes is None and "scopes" in info: + scopes = info.get("scopes") + if isinstance(scopes, str): + scopes = scopes.split(" ") + + return cls( + token=info.get("token"), + refresh_token=info.get("refresh_token"), + token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT, # always overrides + scopes=scopes, + client_id=info.get("client_id"), + client_secret=info.get("client_secret"), + quota_project_id=info.get("quota_project_id"), # may not exist + expiry=expiry, + rapt_token=info.get("rapt_token"), # may not exist + trust_boundary=info.get("trust_boundary"), # may not exist + universe_domain=info.get("universe_domain"), # may not exist + account=info.get("account", ""), # may not exist + ) + + @classmethod + def from_authorized_user_file(cls, filename, scopes=None): + """Creates a Credentials instance from an authorized user json file. + + Args: + filename (str): The path to the authorized user json file. + scopes (Sequence[str]): Optional list of scopes to include in the + credentials. + + Returns: + google.oauth2.credentials.Credentials: The constructed + credentials. + + Raises: + ValueError: If the file is not in the expected format. + """ + with io.open(filename, "r", encoding="utf-8") as json_file: + data = json.load(json_file) + return cls.from_authorized_user_info(data, scopes) + + def to_json(self, strip=None): + """Utility function that creates a JSON representation of a Credentials + object. + + Args: + strip (Sequence[str]): Optional list of members to exclude from the + generated JSON. + + Returns: + str: A JSON representation of this instance. When converted into + a dictionary, it can be passed to from_authorized_user_info() + to create a new credential instance. + """ + prep = { + "token": self.token, + "refresh_token": self.refresh_token, + "token_uri": self.token_uri, + "client_id": self.client_id, + "client_secret": self.client_secret, + "scopes": self.scopes, + "rapt_token": self.rapt_token, + "universe_domain": self._universe_domain, + "account": self._account, + } + if self.expiry: # flatten expiry timestamp + prep["expiry"] = self.expiry.isoformat() + "Z" + + # Remove empty entries (those which are None) + prep = {k: v for k, v in prep.items() if v is not None} + + # Remove entries that explicitely need to be removed + if strip is not None: + prep = {k: v for k, v in prep.items() if k not in strip} + + return json.dumps(prep) + + +class UserAccessTokenCredentials(credentials.CredentialsWithQuotaProject): + """Access token credentials for user account. + + Obtain the access token for a given user account or the current active + user account with the ``gcloud auth print-access-token`` command. + + Args: + account (Optional[str]): Account to get the access token for. If not + specified, the current active account will be used. + quota_project_id (Optional[str]): The project ID used for quota + and billing. + """ + + def __init__(self, account=None, quota_project_id=None): + warnings.warn( + "UserAccessTokenCredentials is deprecated, please use " + "google.oauth2.credentials.Credentials instead. To use " + "that credential type, simply run " + "`gcloud auth application-default login` and let the " + "client libraries pick up the application default credentials." + ) + super(UserAccessTokenCredentials, self).__init__() + self._account = account + self._quota_project_id = quota_project_id + + def with_account(self, account): + """Create a new instance with the given account. + + Args: + account (str): Account to get the access token for. + + Returns: + google.oauth2.credentials.UserAccessTokenCredentials: The created + credentials with the given account. + """ + return self.__class__(account=account, quota_project_id=self._quota_project_id) + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + return self.__class__(account=self._account, quota_project_id=quota_project_id) + + def refresh(self, request): + """Refreshes the access token. + + Args: + request (google.auth.transport.Request): This argument is required + by the base class interface but not used in this implementation, + so just set it to `None`. + + Raises: + google.auth.exceptions.UserAccessTokenError: If the access token + refresh failed. + """ + self.token = _cloud_sdk.get_auth_access_token(self._account) + + @_helpers.copy_docstring(credentials.Credentials) + def before_request(self, request, method, url, headers): + self.refresh(request) + self.apply(headers) diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/gdch_credentials.py b/.venv/lib/python3.12/site-packages/google/oauth2/gdch_credentials.py new file mode 100644 index 00000000..7410cfc2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/gdch_credentials.py @@ -0,0 +1,251 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Experimental GDCH credentials support. +""" + +import datetime + +from google.auth import _helpers +from google.auth import _service_account_info +from google.auth import credentials +from google.auth import exceptions +from google.auth import jwt +from google.oauth2 import _client + + +TOKEN_EXCHANGE_TYPE = "urn:ietf:params:oauth:token-type:token-exchange" +ACCESS_TOKEN_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token" +SERVICE_ACCOUNT_TOKEN_TYPE = "urn:k8s:params:oauth:token-type:serviceaccount" +JWT_LIFETIME = datetime.timedelta(seconds=3600) # 1 hour + + +class ServiceAccountCredentials(credentials.Credentials): + """Credentials for GDCH (`Google Distributed Cloud Hosted`_) for service + account users. + + .. _Google Distributed Cloud Hosted: + https://cloud.google.com/blog/topics/hybrid-cloud/\ + announcing-google-distributed-cloud-edge-and-hosted + + To create a GDCH service account credential, first create a JSON file of + the following format:: + + { + "type": "gdch_service_account", + "format_version": "1", + "project": "<project name>", + "private_key_id": "<key id>", + "private_key": "-----BEGIN EC PRIVATE KEY-----\n<key bytes>\n-----END EC PRIVATE KEY-----\n", + "name": "<service identity name>", + "ca_cert_path": "<CA cert path>", + "token_uri": "https://service-identity.<Domain>/authenticate" + } + + The "format_version" field stands for the format of the JSON file. For now + it is always "1". The `private_key_id` and `private_key` is used for signing. + The `ca_cert_path` is used for token server TLS certificate verification. + + After the JSON file is created, set `GOOGLE_APPLICATION_CREDENTIALS` environment + variable to the JSON file path, then use the following code to create the + credential:: + + import google.auth + + credential, _ = google.auth.default() + credential = credential.with_gdch_audience("<the audience>") + + We can also create the credential directly:: + + from google.oauth import gdch_credentials + + credential = gdch_credentials.ServiceAccountCredentials.from_service_account_file("<the json file path>") + credential = credential.with_gdch_audience("<the audience>") + + The token is obtained in the following way. This class first creates a + self signed JWT. It uses the `name` value as the `iss` and `sub` claim, and + the `token_uri` as the `aud` claim, and signs the JWT with the `private_key`. + It then sends the JWT to the `token_uri` to exchange a final token for + `audience`. + """ + + def __init__( + self, signer, service_identity_name, project, audience, token_uri, ca_cert_path + ): + """ + Args: + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + service_identity_name (str): The service identity name. It will be + used as the `iss` and `sub` claim in the self signed JWT. + project (str): The project. + audience (str): The audience for the final token. + token_uri (str): The token server uri. + ca_cert_path (str): The CA cert path for token server side TLS + certificate verification. If the token server uses well known + CA, then this parameter can be `None`. + """ + super(ServiceAccountCredentials, self).__init__() + self._signer = signer + self._service_identity_name = service_identity_name + self._project = project + self._audience = audience + self._token_uri = token_uri + self._ca_cert_path = ca_cert_path + + def _create_jwt(self): + now = _helpers.utcnow() + expiry = now + JWT_LIFETIME + iss_sub_value = "system:serviceaccount:{}:{}".format( + self._project, self._service_identity_name + ) + + payload = { + "iss": iss_sub_value, + "sub": iss_sub_value, + "aud": self._token_uri, + "iat": _helpers.datetime_to_secs(now), + "exp": _helpers.datetime_to_secs(expiry), + } + + return _helpers.from_bytes(jwt.encode(self._signer, payload)) + + @_helpers.copy_docstring(credentials.Credentials) + def refresh(self, request): + import google.auth.transport.requests + + if not isinstance(request, google.auth.transport.requests.Request): + raise exceptions.RefreshError( + "For GDCH service account credentials, request must be a google.auth.transport.requests.Request object" + ) + + # Create a self signed JWT, and do token exchange. + jwt_token = self._create_jwt() + request_body = { + "grant_type": TOKEN_EXCHANGE_TYPE, + "audience": self._audience, + "requested_token_type": ACCESS_TOKEN_TOKEN_TYPE, + "subject_token": jwt_token, + "subject_token_type": SERVICE_ACCOUNT_TOKEN_TYPE, + } + response_data = _client._token_endpoint_request( + request, + self._token_uri, + request_body, + access_token=None, + use_json=True, + verify=self._ca_cert_path, + ) + + self.token, _, self.expiry, _ = _client._handle_refresh_grant_response( + response_data, None + ) + + def with_gdch_audience(self, audience): + """Create a copy of GDCH credentials with the specified audience. + + Args: + audience (str): The intended audience for GDCH credentials. + """ + return self.__class__( + self._signer, + self._service_identity_name, + self._project, + audience, + self._token_uri, + self._ca_cert_path, + ) + + @classmethod + def _from_signer_and_info(cls, signer, info): + """Creates a Credentials instance from a signer and service account + info. + + Args: + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + info (Mapping[str, str]): The service account info. + + Returns: + google.oauth2.gdch_credentials.ServiceAccountCredentials: The constructed + credentials. + + Raises: + ValueError: If the info is not in the expected format. + """ + if info["format_version"] != "1": + raise ValueError("Only format version 1 is supported") + + return cls( + signer, + info["name"], # service_identity_name + info["project"], + None, # audience + info["token_uri"], + info.get("ca_cert_path", None), + ) + + @classmethod + def from_service_account_info(cls, info): + """Creates a Credentials instance from parsed service account info. + + Args: + info (Mapping[str, str]): The service account info in Google + format. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.oauth2.gdch_credentials.ServiceAccountCredentials: The constructed + credentials. + + Raises: + ValueError: If the info is not in the expected format. + """ + signer = _service_account_info.from_dict( + info, + require=[ + "format_version", + "private_key_id", + "private_key", + "name", + "project", + "token_uri", + ], + use_rsa_signer=False, + ) + return cls._from_signer_and_info(signer, info) + + @classmethod + def from_service_account_file(cls, filename): + """Creates a Credentials instance from a service account json file. + + Args: + filename (str): The path to the service account json file. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.oauth2.gdch_credentials.ServiceAccountCredentials: The constructed + credentials. + """ + info, signer = _service_account_info.from_filename( + filename, + require=[ + "format_version", + "private_key_id", + "private_key", + "name", + "project", + "token_uri", + ], + use_rsa_signer=False, + ) + return cls._from_signer_and_info(signer, info) diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/id_token.py b/.venv/lib/python3.12/site-packages/google/oauth2/id_token.py new file mode 100644 index 00000000..b68ab6b3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/id_token.py @@ -0,0 +1,358 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google ID Token helpers. + +Provides support for verifying `OpenID Connect ID Tokens`_, especially ones +generated by Google infrastructure. + +To parse and verify an ID Token issued by Google's OAuth 2.0 authorization +server use :func:`verify_oauth2_token`. To verify an ID Token issued by +Firebase, use :func:`verify_firebase_token`. + +A general purpose ID Token verifier is available as :func:`verify_token`. + +Example:: + + from google.oauth2 import id_token + from google.auth.transport import requests + + request = requests.Request() + + id_info = id_token.verify_oauth2_token( + token, request, 'my-client-id.example.com') + + userid = id_info['sub'] + +By default, this will re-fetch certificates for each verification. Because +Google's public keys are only changed infrequently (on the order of once per +day), you may wish to take advantage of caching to reduce latency and the +potential for network errors. This can be accomplished using an external +library like `CacheControl`_ to create a cache-aware +:class:`google.auth.transport.Request`:: + + import cachecontrol + import google.auth.transport.requests + import requests + + session = requests.session() + cached_session = cachecontrol.CacheControl(session) + request = google.auth.transport.requests.Request(session=cached_session) + +.. _OpenID Connect ID Tokens: + http://openid.net/specs/openid-connect-core-1_0.html#IDToken +.. _CacheControl: https://cachecontrol.readthedocs.io +""" + +import http.client as http_client +import json +import os + +from google.auth import environment_vars +from google.auth import exceptions +from google.auth import jwt + + +# The URL that provides public certificates for verifying ID tokens issued +# by Google's OAuth 2.0 authorization server. +_GOOGLE_OAUTH2_CERTS_URL = "https://www.googleapis.com/oauth2/v1/certs" + +# The URL that provides public certificates for verifying ID tokens issued +# by Firebase and the Google APIs infrastructure +_GOOGLE_APIS_CERTS_URL = ( + "https://www.googleapis.com/robot/v1/metadata/x509" + "/securetoken@system.gserviceaccount.com" +) + +_GOOGLE_ISSUERS = ["accounts.google.com", "https://accounts.google.com"] + + +def _fetch_certs(request, certs_url): + """Fetches certificates. + + Google-style cerificate endpoints return JSON in the format of + ``{'key id': 'x509 certificate'}`` or a certificate array according + to the JWK spec (see https://tools.ietf.org/html/rfc7517). + + Args: + request (google.auth.transport.Request): The object used to make + HTTP requests. + certs_url (str): The certificate endpoint URL. + + Returns: + Mapping[str, str] | Mapping[str, list]: A mapping of public keys + in x.509 or JWK spec. + """ + response = request(certs_url, method="GET") + + if response.status != http_client.OK: + raise exceptions.TransportError( + "Could not fetch certificates at {}".format(certs_url) + ) + + return json.loads(response.data.decode("utf-8")) + + +def verify_token( + id_token, + request, + audience=None, + certs_url=_GOOGLE_OAUTH2_CERTS_URL, + clock_skew_in_seconds=0, +): + """Verifies an ID token and returns the decoded token. + + Args: + id_token (Union[str, bytes]): The encoded token. + request (google.auth.transport.Request): The object used to make + HTTP requests. + audience (str or list): The audience or audiences that this token is + intended for. If None then the audience is not verified. + certs_url (str): The URL that specifies the certificates to use to + verify the token. This URL should return JSON in the format of + ``{'key id': 'x509 certificate'}`` or a certificate array according to + the JWK spec (see https://tools.ietf.org/html/rfc7517). + clock_skew_in_seconds (int): The clock skew used for `iat` and `exp` + validation. + + Returns: + Mapping[str, Any]: The decoded token. + """ + certs = _fetch_certs(request, certs_url) + + if "keys" in certs: + try: + import jwt as jwt_lib # type: ignore + except ImportError as caught_exc: # pragma: NO COVER + raise ImportError( + "The pyjwt library is not installed, please install the pyjwt package to use the jwk certs format." + ) from caught_exc + jwks_client = jwt_lib.PyJWKClient(certs_url) + signing_key = jwks_client.get_signing_key_from_jwt(id_token) + return jwt_lib.decode( + id_token, + signing_key.key, + algorithms=[signing_key.algorithm_name], + audience=audience, + ) + else: + return jwt.decode( + id_token, + certs=certs, + audience=audience, + clock_skew_in_seconds=clock_skew_in_seconds, + ) + + +def verify_oauth2_token(id_token, request, audience=None, clock_skew_in_seconds=0): + """Verifies an ID Token issued by Google's OAuth 2.0 authorization server. + + Args: + id_token (Union[str, bytes]): The encoded token. + request (google.auth.transport.Request): The object used to make + HTTP requests. + audience (str): The audience that this token is intended for. This is + typically your application's OAuth 2.0 client ID. If None then the + audience is not verified. + clock_skew_in_seconds (int): The clock skew used for `iat` and `exp` + validation. + + Returns: + Mapping[str, Any]: The decoded token. + + Raises: + exceptions.GoogleAuthError: If the issuer is invalid. + ValueError: If token verification fails + """ + idinfo = verify_token( + id_token, + request, + audience=audience, + certs_url=_GOOGLE_OAUTH2_CERTS_URL, + clock_skew_in_seconds=clock_skew_in_seconds, + ) + + if idinfo["iss"] not in _GOOGLE_ISSUERS: + raise exceptions.GoogleAuthError( + "Wrong issuer. 'iss' should be one of the following: {}".format( + _GOOGLE_ISSUERS + ) + ) + + return idinfo + + +def verify_firebase_token(id_token, request, audience=None, clock_skew_in_seconds=0): + """Verifies an ID Token issued by Firebase Authentication. + + Args: + id_token (Union[str, bytes]): The encoded token. + request (google.auth.transport.Request): The object used to make + HTTP requests. + audience (str): The audience that this token is intended for. This is + typically your Firebase application ID. If None then the audience + is not verified. + clock_skew_in_seconds (int): The clock skew used for `iat` and `exp` + validation. + + Returns: + Mapping[str, Any]: The decoded token. + """ + return verify_token( + id_token, + request, + audience=audience, + certs_url=_GOOGLE_APIS_CERTS_URL, + clock_skew_in_seconds=clock_skew_in_seconds, + ) + + +def fetch_id_token_credentials(audience, request=None): + """Create the ID Token credentials from the current environment. + + This function acquires ID token from the environment in the following order. + See https://google.aip.dev/auth/4110. + + 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set + to the path of a valid service account JSON file, then ID token is + acquired using this service account credentials. + 2. If the application is running in Compute Engine, App Engine or Cloud Run, + then the ID token are obtained from the metadata server. + 3. If metadata server doesn't exist and no valid service account credentials + are found, :class:`~google.auth.exceptions.DefaultCredentialsError` will + be raised. + + Example:: + + import google.oauth2.id_token + import google.auth.transport.requests + + request = google.auth.transport.requests.Request() + target_audience = "https://pubsub.googleapis.com" + + # Create ID token credentials. + credentials = google.oauth2.id_token.fetch_id_token_credentials(target_audience, request=request) + + # Refresh the credential to obtain an ID token. + credentials.refresh(request) + + id_token = credentials.token + id_token_expiry = credentials.expiry + + Args: + audience (str): The audience that this ID token is intended for. + request (Optional[google.auth.transport.Request]): A callable used to make + HTTP requests. A request object will be created if not provided. + + Returns: + google.auth.credentials.Credentials: The ID token credentials. + + Raises: + ~google.auth.exceptions.DefaultCredentialsError: + If metadata server doesn't exist and no valid service account + credentials are found. + """ + # 1. Try to get credentials from the GOOGLE_APPLICATION_CREDENTIALS environment + # variable. + credentials_filename = os.environ.get(environment_vars.CREDENTIALS) + if credentials_filename: + if not ( + os.path.exists(credentials_filename) + and os.path.isfile(credentials_filename) + ): + raise exceptions.DefaultCredentialsError( + "GOOGLE_APPLICATION_CREDENTIALS path is either not found or invalid." + ) + + try: + with open(credentials_filename, "r") as f: + from google.oauth2 import service_account + + info = json.load(f) + if info.get("type") == "service_account": + return service_account.IDTokenCredentials.from_service_account_info( + info, target_audience=audience + ) + except ValueError as caught_exc: + new_exc = exceptions.DefaultCredentialsError( + "GOOGLE_APPLICATION_CREDENTIALS is not valid service account credentials.", + caught_exc, + ) + raise new_exc from caught_exc + + # 2. Try to fetch ID token from metada server if it exists. The code + # works for GAE and Cloud Run metadata server as well. + try: + from google.auth import compute_engine + from google.auth.compute_engine import _metadata + + # Create a request object if not provided. + if not request: + import google.auth.transport.requests + + request = google.auth.transport.requests.Request() + + if _metadata.ping(request): + return compute_engine.IDTokenCredentials( + request, audience, use_metadata_identity_endpoint=True + ) + except (ImportError, exceptions.TransportError): + pass + + raise exceptions.DefaultCredentialsError( + "Neither metadata server or valid service account credentials are found." + ) + + +def fetch_id_token(request, audience): + """Fetch the ID Token from the current environment. + + This function acquires ID token from the environment in the following order. + See https://google.aip.dev/auth/4110. + + 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set + to the path of a valid service account JSON file, then ID token is + acquired using this service account credentials. + 2. If the application is running in Compute Engine, App Engine or Cloud Run, + then the ID token are obtained from the metadata server. + 3. If metadata server doesn't exist and no valid service account credentials + are found, :class:`~google.auth.exceptions.DefaultCredentialsError` will + be raised. + + Example:: + + import google.oauth2.id_token + import google.auth.transport.requests + + request = google.auth.transport.requests.Request() + target_audience = "https://pubsub.googleapis.com" + + id_token = google.oauth2.id_token.fetch_id_token(request, target_audience) + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + audience (str): The audience that this ID token is intended for. + + Returns: + str: The ID token. + + Raises: + ~google.auth.exceptions.DefaultCredentialsError: + If metadata server doesn't exist and no valid service account + credentials are found. + """ + id_token_credentials = fetch_id_token_credentials(audience, request=request) + id_token_credentials.refresh(request) + return id_token_credentials.token diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/py.typed b/.venv/lib/python3.12/site-packages/google/oauth2/py.typed new file mode 100644 index 00000000..d82ed62c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561.
+# The google-oauth2 package uses inline types.
diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/reauth.py b/.venv/lib/python3.12/site-packages/google/oauth2/reauth.py new file mode 100644 index 00000000..1e39e0bc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/reauth.py @@ -0,0 +1,369 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A module that provides functions for handling rapt authentication. + +Reauth is a process of obtaining additional authentication (such as password, +security token, etc.) while refreshing OAuth 2.0 credentials for a user. + +Credentials that use the Reauth flow must have the reauth scope, +``https://www.googleapis.com/auth/accounts.reauth``. + +This module provides a high-level function for executing the Reauth process, +:func:`refresh_grant`, and lower-level helpers for doing the individual +steps of the reauth process. + +Those steps are: + +1. Obtaining a list of challenges from the reauth server. +2. Running through each challenge and sending the result back to the reauth + server. +3. Refreshing the access token using the returned rapt token. +""" + +import sys + +from google.auth import exceptions +from google.auth import metrics +from google.oauth2 import _client +from google.oauth2 import challenges + + +_REAUTH_SCOPE = "https://www.googleapis.com/auth/accounts.reauth" +_REAUTH_API = "https://reauth.googleapis.com/v2/sessions" + +_REAUTH_NEEDED_ERROR = "invalid_grant" +_REAUTH_NEEDED_ERROR_INVALID_RAPT = "invalid_rapt" +_REAUTH_NEEDED_ERROR_RAPT_REQUIRED = "rapt_required" + +_AUTHENTICATED = "AUTHENTICATED" +_CHALLENGE_REQUIRED = "CHALLENGE_REQUIRED" +_CHALLENGE_PENDING = "CHALLENGE_PENDING" + + +# Override this global variable to set custom max number of rounds of reauth +# challenges should be run. +RUN_CHALLENGE_RETRY_LIMIT = 5 + + +def is_interactive(): + """Check if we are in an interractive environment. + + Override this function with a different logic if you are using this library + outside a CLI. + + If the rapt token needs refreshing, the user needs to answer the challenges. + If the user is not in an interractive environment, the challenges can not + be answered and we just wait for timeout for no reason. + + Returns: + bool: True if is interactive environment, False otherwise. + """ + + return sys.stdin.isatty() + + +def _get_challenges( + request, supported_challenge_types, access_token, requested_scopes=None +): + """Does initial request to reauth API to get the challenges. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + supported_challenge_types (Sequence[str]): list of challenge names + supported by the manager. + access_token (str): Access token with reauth scopes. + requested_scopes (Optional(Sequence[str])): Authorized scopes for the credentials. + + Returns: + dict: The response from the reauth API. + """ + body = {"supportedChallengeTypes": supported_challenge_types} + if requested_scopes: + body["oauthScopesForDomainPolicyLookup"] = requested_scopes + metrics_header = {metrics.API_CLIENT_HEADER: metrics.reauth_start()} + + return _client._token_endpoint_request( + request, + _REAUTH_API + ":start", + body, + access_token=access_token, + use_json=True, + headers=metrics_header, + ) + + +def _send_challenge_result( + request, session_id, challenge_id, client_input, access_token +): + """Attempt to refresh access token by sending next challenge result. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + session_id (str): session id returned by the initial reauth call. + challenge_id (str): challenge id returned by the initial reauth call. + client_input: dict with a challenge-specific client input. For example: + ``{'credential': password}`` for password challenge. + access_token (str): Access token with reauth scopes. + + Returns: + dict: The response from the reauth API. + """ + body = { + "sessionId": session_id, + "challengeId": challenge_id, + "action": "RESPOND", + "proposalResponse": client_input, + } + metrics_header = {metrics.API_CLIENT_HEADER: metrics.reauth_continue()} + + return _client._token_endpoint_request( + request, + _REAUTH_API + "/{}:continue".format(session_id), + body, + access_token=access_token, + use_json=True, + headers=metrics_header, + ) + + +def _run_next_challenge(msg, request, access_token): + """Get the next challenge from msg and run it. + + Args: + msg (dict): Reauth API response body (either from the initial request to + https://reauth.googleapis.com/v2/sessions:start or from sending the + previous challenge response to + https://reauth.googleapis.com/v2/sessions/id:continue) + request (google.auth.transport.Request): A callable used to make + HTTP requests. + access_token (str): reauth access token + + Returns: + dict: The response from the reauth API. + + Raises: + google.auth.exceptions.ReauthError: if reauth failed. + """ + for challenge in msg["challenges"]: + if challenge["status"] != "READY": + # Skip non-activated challenges. + continue + c = challenges.AVAILABLE_CHALLENGES.get(challenge["challengeType"], None) + if not c: + raise exceptions.ReauthFailError( + "Unsupported challenge type {0}. Supported types: {1}".format( + challenge["challengeType"], + ",".join(list(challenges.AVAILABLE_CHALLENGES.keys())), + ) + ) + if not c.is_locally_eligible: + raise exceptions.ReauthFailError( + "Challenge {0} is not locally eligible".format( + challenge["challengeType"] + ) + ) + client_input = c.obtain_challenge_input(challenge) + if not client_input: + return None + return _send_challenge_result( + request, + msg["sessionId"], + challenge["challengeId"], + client_input, + access_token, + ) + return None + + +def _obtain_rapt(request, access_token, requested_scopes): + """Given an http request method and reauth access token, get rapt token. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + access_token (str): reauth access token + requested_scopes (Sequence[str]): scopes required by the client application + + Returns: + str: The rapt token. + + Raises: + google.auth.exceptions.ReauthError: if reauth failed + """ + msg = _get_challenges( + request, + list(challenges.AVAILABLE_CHALLENGES.keys()), + access_token, + requested_scopes, + ) + + if msg["status"] == _AUTHENTICATED: + return msg["encodedProofOfReauthToken"] + + for _ in range(0, RUN_CHALLENGE_RETRY_LIMIT): + if not ( + msg["status"] == _CHALLENGE_REQUIRED or msg["status"] == _CHALLENGE_PENDING + ): + raise exceptions.ReauthFailError( + "Reauthentication challenge failed due to API error: {}".format( + msg["status"] + ) + ) + + if not is_interactive(): + raise exceptions.ReauthFailError( + "Reauthentication challenge could not be answered because you are not" + " in an interactive session." + ) + + msg = _run_next_challenge(msg, request, access_token) + + if not msg: + raise exceptions.ReauthFailError("Failed to obtain rapt token.") + if msg["status"] == _AUTHENTICATED: + return msg["encodedProofOfReauthToken"] + + # If we got here it means we didn't get authenticated. + raise exceptions.ReauthFailError("Failed to obtain rapt token.") + + +def get_rapt_token( + request, client_id, client_secret, refresh_token, token_uri, scopes=None +): + """Given an http request method and refresh_token, get rapt token. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + client_id (str): client id to get access token for reauth scope. + client_secret (str): client secret for the client_id + refresh_token (str): refresh token to refresh access token + token_uri (str): uri to refresh access token + scopes (Optional(Sequence[str])): scopes required by the client application + + Returns: + str: The rapt token. + Raises: + google.auth.exceptions.RefreshError: If reauth failed. + """ + sys.stderr.write("Reauthentication required.\n") + + # Get access token for reauth. + access_token, _, _, _ = _client.refresh_grant( + request=request, + client_id=client_id, + client_secret=client_secret, + refresh_token=refresh_token, + token_uri=token_uri, + scopes=[_REAUTH_SCOPE], + ) + + # Get rapt token from reauth API. + rapt_token = _obtain_rapt(request, access_token, requested_scopes=scopes) + sys.stderr.write("Reauthentication successful.\n") + + return rapt_token + + +def refresh_grant( + request, + token_uri, + refresh_token, + client_id, + client_secret, + scopes=None, + rapt_token=None, + enable_reauth_refresh=False, +): + """Implements the reauthentication flow. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + token_uri (str): The OAuth 2.0 authorizations server's token endpoint + URI. + refresh_token (str): The refresh token to use to get a new access + token. + client_id (str): The OAuth 2.0 application's client ID. + client_secret (str): The Oauth 2.0 appliaction's client secret. + scopes (Optional(Sequence[str])): Scopes to request. If present, all + scopes must be authorized for the refresh token. Useful if refresh + token has a wild card scope (e.g. + 'https://www.googleapis.com/auth/any-api'). + rapt_token (Optional(str)): The rapt token for reauth. + enable_reauth_refresh (Optional[bool]): Whether reauth refresh flow + should be used. The default value is False. This option is for + gcloud only, other users should use the default value. + + Returns: + Tuple[str, Optional[str], Optional[datetime], Mapping[str, str], str]: The + access token, new refresh token, expiration, the additional data + returned by the token endpoint, and the rapt token. + + Raises: + google.auth.exceptions.RefreshError: If the token endpoint returned + an error. + """ + body = { + "grant_type": _client._REFRESH_GRANT_TYPE, + "client_id": client_id, + "client_secret": client_secret, + "refresh_token": refresh_token, + } + if scopes: + body["scope"] = " ".join(scopes) + if rapt_token: + body["rapt"] = rapt_token + metrics_header = {metrics.API_CLIENT_HEADER: metrics.token_request_user()} + + response_status_ok, response_data, retryable_error = _client._token_endpoint_request_no_throw( + request, token_uri, body, headers=metrics_header + ) + + if not response_status_ok and isinstance(response_data, str): + raise exceptions.RefreshError(response_data, retryable=False) + + if ( + not response_status_ok + and response_data.get("error") == _REAUTH_NEEDED_ERROR + and ( + response_data.get("error_subtype") == _REAUTH_NEEDED_ERROR_INVALID_RAPT + or response_data.get("error_subtype") == _REAUTH_NEEDED_ERROR_RAPT_REQUIRED + ) + ): + if not enable_reauth_refresh: + raise exceptions.RefreshError( + "Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate." + ) + + rapt_token = get_rapt_token( + request, client_id, client_secret, refresh_token, token_uri, scopes=scopes + ) + body["rapt"] = rapt_token + ( + response_status_ok, + response_data, + retryable_error, + ) = _client._token_endpoint_request_no_throw( + request, token_uri, body, headers=metrics_header + ) + + if not response_status_ok: + _client._handle_error_response(response_data, retryable_error) + return _client._handle_refresh_grant_response(response_data, refresh_token) + ( + rapt_token, + ) diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/service_account.py b/.venv/lib/python3.12/site-packages/google/oauth2/service_account.py new file mode 100644 index 00000000..3e84194a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/service_account.py @@ -0,0 +1,847 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Service Accounts: JSON Web Token (JWT) Profile for OAuth 2.0 + +This module implements the JWT Profile for OAuth 2.0 Authorization Grants +as defined by `RFC 7523`_ with particular support for how this RFC is +implemented in Google's infrastructure. Google refers to these credentials +as *Service Accounts*. + +Service accounts are used for server-to-server communication, such as +interactions between a web application server and a Google service. The +service account belongs to your application instead of to an individual end +user. In contrast to other OAuth 2.0 profiles, no users are involved and your +application "acts" as the service account. + +Typically an application uses a service account when the application uses +Google APIs to work with its own data rather than a user's data. For example, +an application that uses Google Cloud Datastore for data persistence would use +a service account to authenticate its calls to the Google Cloud Datastore API. +However, an application that needs to access a user's Drive documents would +use the normal OAuth 2.0 profile. + +Additionally, Google Apps domain administrators can grant service accounts +`domain-wide delegation`_ authority to access user data on behalf of users in +the domain. + +This profile uses a JWT to acquire an OAuth 2.0 access token. The JWT is used +in place of the usual authorization token returned during the standard +OAuth 2.0 Authorization Code grant. The JWT is only used for this purpose, as +the acquired access token is used as the bearer token when making requests +using these credentials. + +This profile differs from normal OAuth 2.0 profile because no user consent +step is required. The use of the private key allows this profile to assert +identity directly. + +This profile also differs from the :mod:`google.auth.jwt` authentication +because the JWT credentials use the JWT directly as the bearer token. This +profile instead only uses the JWT to obtain an OAuth 2.0 access token. The +obtained OAuth 2.0 access token is used as the bearer token. + +Domain-wide delegation +---------------------- + +Domain-wide delegation allows a service account to access user data on +behalf of any user in a Google Apps domain without consent from the user. +For example, an application that uses the Google Calendar API to add events to +the calendars of all users in a Google Apps domain would use a service account +to access the Google Calendar API on behalf of users. + +The Google Apps administrator must explicitly authorize the service account to +do this. This authorization step is referred to as "delegating domain-wide +authority" to a service account. + +You can use domain-wise delegation by creating a set of credentials with a +specific subject using :meth:`~Credentials.with_subject`. + +.. _RFC 7523: https://tools.ietf.org/html/rfc7523 +""" + +import copy +import datetime + +from google.auth import _helpers +from google.auth import _service_account_info +from google.auth import credentials +from google.auth import exceptions +from google.auth import iam +from google.auth import jwt +from google.auth import metrics +from google.oauth2 import _client + +_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds +_GOOGLE_OAUTH2_TOKEN_ENDPOINT = "https://oauth2.googleapis.com/token" + + +class Credentials( + credentials.Signing, + credentials.Scoped, + credentials.CredentialsWithQuotaProject, + credentials.CredentialsWithTokenUri, +): + """Service account credentials + + Usually, you'll create these credentials with one of the helper + constructors. To create credentials using a Google service account + private key JSON file:: + + credentials = service_account.Credentials.from_service_account_file( + 'service-account.json') + + Or if you already have the service account file loaded:: + + service_account_info = json.load(open('service_account.json')) + credentials = service_account.Credentials.from_service_account_info( + service_account_info) + + Both helper methods pass on arguments to the constructor, so you can + specify additional scopes and a subject if necessary:: + + credentials = service_account.Credentials.from_service_account_file( + 'service-account.json', + scopes=['email'], + subject='user@example.com') + + The credentials are considered immutable. If you want to modify the scopes + or the subject used for delegation, use :meth:`with_scopes` or + :meth:`with_subject`:: + + scoped_credentials = credentials.with_scopes(['email']) + delegated_credentials = credentials.with_subject(subject) + + To add a quota project, use :meth:`with_quota_project`:: + + credentials = credentials.with_quota_project('myproject-123') + """ + + def __init__( + self, + signer, + service_account_email, + token_uri, + scopes=None, + default_scopes=None, + subject=None, + project_id=None, + quota_project_id=None, + additional_claims=None, + always_use_jwt_access=False, + universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN, + trust_boundary=None, + ): + """ + Args: + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + service_account_email (str): The service account's email. + scopes (Sequence[str]): User-defined scopes to request during the + authorization grant. + default_scopes (Sequence[str]): Default scopes passed by a + Google client library. Use 'scopes' for user-defined scopes. + token_uri (str): The OAuth 2.0 Token URI. + subject (str): For domain-wide delegation, the email address of the + user to for which to request delegated access. + project_id (str): Project ID associated with the service account + credential. + quota_project_id (Optional[str]): The project ID used for quota and + billing. + additional_claims (Mapping[str, str]): Any additional claims for + the JWT assertion used in the authorization grant. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be always used. + universe_domain (str): The universe domain. The default + universe domain is googleapis.com. For default value self + signed jwt is used for token refresh. + trust_boundary (str): String representation of trust boundary meta. + + .. note:: Typically one of the helper constructors + :meth:`from_service_account_file` or + :meth:`from_service_account_info` are used instead of calling the + constructor directly. + """ + super(Credentials, self).__init__() + + self._cred_file_path = None + self._scopes = scopes + self._default_scopes = default_scopes + self._signer = signer + self._service_account_email = service_account_email + self._subject = subject + self._project_id = project_id + self._quota_project_id = quota_project_id + self._token_uri = token_uri + self._always_use_jwt_access = always_use_jwt_access + self._universe_domain = universe_domain or credentials.DEFAULT_UNIVERSE_DOMAIN + + if universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN: + self._always_use_jwt_access = True + + self._jwt_credentials = None + + if additional_claims is not None: + self._additional_claims = additional_claims + else: + self._additional_claims = {} + self._trust_boundary = {"locations": [], "encoded_locations": "0x0"} + + @classmethod + def _from_signer_and_info(cls, signer, info, **kwargs): + """Creates a Credentials instance from a signer and service account + info. + + Args: + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + info (Mapping[str, str]): The service account info. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.jwt.Credentials: The constructed credentials. + + Raises: + ValueError: If the info is not in the expected format. + """ + return cls( + signer, + service_account_email=info["client_email"], + token_uri=info["token_uri"], + project_id=info.get("project_id"), + universe_domain=info.get( + "universe_domain", credentials.DEFAULT_UNIVERSE_DOMAIN + ), + trust_boundary=info.get("trust_boundary"), + **kwargs, + ) + + @classmethod + def from_service_account_info(cls, info, **kwargs): + """Creates a Credentials instance from parsed service account info. + + Args: + info (Mapping[str, str]): The service account info in Google + format. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.service_account.Credentials: The constructed + credentials. + + Raises: + ValueError: If the info is not in the expected format. + """ + signer = _service_account_info.from_dict( + info, require=["client_email", "token_uri"] + ) + return cls._from_signer_and_info(signer, info, **kwargs) + + @classmethod + def from_service_account_file(cls, filename, **kwargs): + """Creates a Credentials instance from a service account json file. + + Args: + filename (str): The path to the service account json file. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.service_account.Credentials: The constructed + credentials. + """ + info, signer = _service_account_info.from_filename( + filename, require=["client_email", "token_uri"] + ) + return cls._from_signer_and_info(signer, info, **kwargs) + + @property + def service_account_email(self): + """The service account email.""" + return self._service_account_email + + @property + def project_id(self): + """Project ID associated with this credential.""" + return self._project_id + + @property + def requires_scopes(self): + """Checks if the credentials requires scopes. + + Returns: + bool: True if there are no scopes set otherwise False. + """ + return True if not self._scopes else False + + def _make_copy(self): + cred = self.__class__( + self._signer, + service_account_email=self._service_account_email, + scopes=copy.copy(self._scopes), + default_scopes=copy.copy(self._default_scopes), + token_uri=self._token_uri, + subject=self._subject, + project_id=self._project_id, + quota_project_id=self._quota_project_id, + additional_claims=self._additional_claims.copy(), + always_use_jwt_access=self._always_use_jwt_access, + universe_domain=self._universe_domain, + ) + cred._cred_file_path = self._cred_file_path + return cred + + @_helpers.copy_docstring(credentials.Scoped) + def with_scopes(self, scopes, default_scopes=None): + cred = self._make_copy() + cred._scopes = scopes + cred._default_scopes = default_scopes + return cred + + def with_always_use_jwt_access(self, always_use_jwt_access): + """Create a copy of these credentials with the specified always_use_jwt_access value. + + Args: + always_use_jwt_access (bool): Whether always use self signed JWT or not. + + Returns: + google.auth.service_account.Credentials: A new credentials + instance. + Raises: + google.auth.exceptions.InvalidValue: If the universe domain is not + default and always_use_jwt_access is False. + """ + cred = self._make_copy() + if ( + cred._universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN + and not always_use_jwt_access + ): + raise exceptions.InvalidValue( + "always_use_jwt_access should be True for non-default universe domain" + ) + cred._always_use_jwt_access = always_use_jwt_access + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithUniverseDomain) + def with_universe_domain(self, universe_domain): + cred = self._make_copy() + cred._universe_domain = universe_domain + if universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN: + cred._always_use_jwt_access = True + return cred + + def with_subject(self, subject): + """Create a copy of these credentials with the specified subject. + + Args: + subject (str): The subject claim. + + Returns: + google.auth.service_account.Credentials: A new credentials + instance. + """ + cred = self._make_copy() + cred._subject = subject + return cred + + def with_claims(self, additional_claims): + """Returns a copy of these credentials with modified claims. + + Args: + additional_claims (Mapping[str, str]): Any additional claims for + the JWT payload. This will be merged with the current + additional claims. + + Returns: + google.auth.service_account.Credentials: A new credentials + instance. + """ + new_additional_claims = copy.deepcopy(self._additional_claims) + new_additional_claims.update(additional_claims or {}) + cred = self._make_copy() + cred._additional_claims = new_additional_claims + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + cred = self._make_copy() + cred._quota_project_id = quota_project_id + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithTokenUri) + def with_token_uri(self, token_uri): + cred = self._make_copy() + cred._token_uri = token_uri + return cred + + def _make_authorization_grant_assertion(self): + """Create the OAuth 2.0 assertion. + + This assertion is used during the OAuth 2.0 grant to acquire an + access token. + + Returns: + bytes: The authorization grant assertion. + """ + now = _helpers.utcnow() + lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS) + expiry = now + lifetime + + payload = { + "iat": _helpers.datetime_to_secs(now), + "exp": _helpers.datetime_to_secs(expiry), + # The issuer must be the service account email. + "iss": self._service_account_email, + # The audience must be the auth token endpoint's URI + "aud": _GOOGLE_OAUTH2_TOKEN_ENDPOINT, + "scope": _helpers.scopes_to_string(self._scopes or ()), + } + + payload.update(self._additional_claims) + + # The subject can be a user email for domain-wide delegation. + if self._subject: + payload.setdefault("sub", self._subject) + + token = jwt.encode(self._signer, payload) + + return token + + def _use_self_signed_jwt(self): + # Since domain wide delegation doesn't work with self signed JWT. If + # subject exists, then we should not use self signed JWT. + return self._subject is None and self._jwt_credentials is not None + + def _metric_header_for_usage(self): + if self._use_self_signed_jwt(): + return metrics.CRED_TYPE_SA_JWT + return metrics.CRED_TYPE_SA_ASSERTION + + @_helpers.copy_docstring(credentials.Credentials) + def refresh(self, request): + if self._always_use_jwt_access and not self._jwt_credentials: + # If self signed jwt should be used but jwt credential is not + # created, try to create one with scopes + self._create_self_signed_jwt(None) + + if ( + self._universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN + and self._subject + ): + raise exceptions.RefreshError( + "domain wide delegation is not supported for non-default universe domain" + ) + + if self._use_self_signed_jwt(): + self._jwt_credentials.refresh(request) + self.token = self._jwt_credentials.token.decode() + self.expiry = self._jwt_credentials.expiry + else: + assertion = self._make_authorization_grant_assertion() + access_token, expiry, _ = _client.jwt_grant( + request, self._token_uri, assertion + ) + self.token = access_token + self.expiry = expiry + + def _create_self_signed_jwt(self, audience): + """Create a self-signed JWT from the credentials if requirements are met. + + Args: + audience (str): The service URL. ``https://[API_ENDPOINT]/`` + """ + # https://google.aip.dev/auth/4111 + if self._always_use_jwt_access: + if self._scopes: + additional_claims = {"scope": " ".join(self._scopes)} + if ( + self._jwt_credentials is None + or self._jwt_credentials.additional_claims != additional_claims + ): + self._jwt_credentials = jwt.Credentials.from_signing_credentials( + self, None, additional_claims=additional_claims + ) + elif audience: + if ( + self._jwt_credentials is None + or self._jwt_credentials._audience != audience + ): + + self._jwt_credentials = jwt.Credentials.from_signing_credentials( + self, audience + ) + elif self._default_scopes: + additional_claims = {"scope": " ".join(self._default_scopes)} + if ( + self._jwt_credentials is None + or additional_claims != self._jwt_credentials.additional_claims + ): + self._jwt_credentials = jwt.Credentials.from_signing_credentials( + self, None, additional_claims=additional_claims + ) + elif not self._scopes and audience: + self._jwt_credentials = jwt.Credentials.from_signing_credentials( + self, audience + ) + + @_helpers.copy_docstring(credentials.Signing) + def sign_bytes(self, message): + return self._signer.sign(message) + + @property # type: ignore + @_helpers.copy_docstring(credentials.Signing) + def signer(self): + return self._signer + + @property # type: ignore + @_helpers.copy_docstring(credentials.Signing) + def signer_email(self): + return self._service_account_email + + @_helpers.copy_docstring(credentials.Credentials) + def get_cred_info(self): + if self._cred_file_path: + return { + "credential_source": self._cred_file_path, + "credential_type": "service account credentials", + "principal": self.service_account_email, + } + return None + + +class IDTokenCredentials( + credentials.Signing, + credentials.CredentialsWithQuotaProject, + credentials.CredentialsWithTokenUri, +): + """Open ID Connect ID Token-based service account credentials. + + These credentials are largely similar to :class:`.Credentials`, but instead + of using an OAuth 2.0 Access Token as the bearer token, they use an Open + ID Connect ID Token as the bearer token. These credentials are useful when + communicating to services that require ID Tokens and can not accept access + tokens. + + Usually, you'll create these credentials with one of the helper + constructors. To create credentials using a Google service account + private key JSON file:: + + credentials = ( + service_account.IDTokenCredentials.from_service_account_file( + 'service-account.json')) + + + Or if you already have the service account file loaded:: + + service_account_info = json.load(open('service_account.json')) + credentials = ( + service_account.IDTokenCredentials.from_service_account_info( + service_account_info)) + + + Both helper methods pass on arguments to the constructor, so you can + specify additional scopes and a subject if necessary:: + + credentials = ( + service_account.IDTokenCredentials.from_service_account_file( + 'service-account.json', + scopes=['email'], + subject='user@example.com')) + + + The credentials are considered immutable. If you want to modify the scopes + or the subject used for delegation, use :meth:`with_scopes` or + :meth:`with_subject`:: + + scoped_credentials = credentials.with_scopes(['email']) + delegated_credentials = credentials.with_subject(subject) + + """ + + def __init__( + self, + signer, + service_account_email, + token_uri, + target_audience, + additional_claims=None, + quota_project_id=None, + universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN, + ): + """ + Args: + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + service_account_email (str): The service account's email. + token_uri (str): The OAuth 2.0 Token URI. + target_audience (str): The intended audience for these credentials, + used when requesting the ID Token. The ID Token's ``aud`` claim + will be set to this string. + additional_claims (Mapping[str, str]): Any additional claims for + the JWT assertion used in the authorization grant. + quota_project_id (Optional[str]): The project ID used for quota and billing. + universe_domain (str): The universe domain. The default + universe domain is googleapis.com. For default value IAM ID + token endponint is used for token refresh. Note that + iam.serviceAccountTokenCreator role is required to use the IAM + endpoint. + .. note:: Typically one of the helper constructors + :meth:`from_service_account_file` or + :meth:`from_service_account_info` are used instead of calling the + constructor directly. + """ + super(IDTokenCredentials, self).__init__() + self._signer = signer + self._service_account_email = service_account_email + self._token_uri = token_uri + self._target_audience = target_audience + self._quota_project_id = quota_project_id + self._use_iam_endpoint = False + + if not universe_domain: + self._universe_domain = credentials.DEFAULT_UNIVERSE_DOMAIN + else: + self._universe_domain = universe_domain + self._iam_id_token_endpoint = iam._IAM_IDTOKEN_ENDPOINT.replace( + "googleapis.com", self._universe_domain + ) + + if self._universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN: + self._use_iam_endpoint = True + + if additional_claims is not None: + self._additional_claims = additional_claims + else: + self._additional_claims = {} + + @classmethod + def _from_signer_and_info(cls, signer, info, **kwargs): + """Creates a credentials instance from a signer and service account + info. + + Args: + signer (google.auth.crypt.Signer): The signer used to sign JWTs. + info (Mapping[str, str]): The service account info. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.jwt.IDTokenCredentials: The constructed credentials. + + Raises: + ValueError: If the info is not in the expected format. + """ + kwargs.setdefault("service_account_email", info["client_email"]) + kwargs.setdefault("token_uri", info["token_uri"]) + if "universe_domain" in info: + kwargs["universe_domain"] = info["universe_domain"] + return cls(signer, **kwargs) + + @classmethod + def from_service_account_info(cls, info, **kwargs): + """Creates a credentials instance from parsed service account info. + + Args: + info (Mapping[str, str]): The service account info in Google + format. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.service_account.IDTokenCredentials: The constructed + credentials. + + Raises: + ValueError: If the info is not in the expected format. + """ + signer = _service_account_info.from_dict( + info, require=["client_email", "token_uri"] + ) + return cls._from_signer_and_info(signer, info, **kwargs) + + @classmethod + def from_service_account_file(cls, filename, **kwargs): + """Creates a credentials instance from a service account json file. + + Args: + filename (str): The path to the service account json file. + kwargs: Additional arguments to pass to the constructor. + + Returns: + google.auth.service_account.IDTokenCredentials: The constructed + credentials. + """ + info, signer = _service_account_info.from_filename( + filename, require=["client_email", "token_uri"] + ) + return cls._from_signer_and_info(signer, info, **kwargs) + + def _make_copy(self): + cred = self.__class__( + self._signer, + service_account_email=self._service_account_email, + token_uri=self._token_uri, + target_audience=self._target_audience, + additional_claims=self._additional_claims.copy(), + quota_project_id=self.quota_project_id, + universe_domain=self._universe_domain, + ) + # _use_iam_endpoint is not exposed in the constructor + cred._use_iam_endpoint = self._use_iam_endpoint + return cred + + def with_target_audience(self, target_audience): + """Create a copy of these credentials with the specified target + audience. + + Args: + target_audience (str): The intended audience for these credentials, + used when requesting the ID Token. + + Returns: + google.auth.service_account.IDTokenCredentials: A new credentials + instance. + """ + cred = self._make_copy() + cred._target_audience = target_audience + return cred + + def _with_use_iam_endpoint(self, use_iam_endpoint): + """Create a copy of these credentials with the use_iam_endpoint value. + + Args: + use_iam_endpoint (bool): If True, IAM generateIdToken endpoint will + be used instead of the token_uri. Note that + iam.serviceAccountTokenCreator role is required to use the IAM + endpoint. The default value is False. This feature is currently + experimental and subject to change without notice. + + Returns: + google.auth.service_account.IDTokenCredentials: A new credentials + instance. + Raises: + google.auth.exceptions.InvalidValue: If the universe domain is not + default and use_iam_endpoint is False. + """ + cred = self._make_copy() + if ( + cred._universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN + and not use_iam_endpoint + ): + raise exceptions.InvalidValue( + "use_iam_endpoint should be True for non-default universe domain" + ) + cred._use_iam_endpoint = use_iam_endpoint + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) + def with_quota_project(self, quota_project_id): + cred = self._make_copy() + cred._quota_project_id = quota_project_id + return cred + + @_helpers.copy_docstring(credentials.CredentialsWithTokenUri) + def with_token_uri(self, token_uri): + cred = self._make_copy() + cred._token_uri = token_uri + return cred + + def _make_authorization_grant_assertion(self): + """Create the OAuth 2.0 assertion. + + This assertion is used during the OAuth 2.0 grant to acquire an + ID token. + + Returns: + bytes: The authorization grant assertion. + """ + now = _helpers.utcnow() + lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS) + expiry = now + lifetime + + payload = { + "iat": _helpers.datetime_to_secs(now), + "exp": _helpers.datetime_to_secs(expiry), + # The issuer must be the service account email. + "iss": self.service_account_email, + # The audience must be the auth token endpoint's URI + "aud": _GOOGLE_OAUTH2_TOKEN_ENDPOINT, + # The target audience specifies which service the ID token is + # intended for. + "target_audience": self._target_audience, + } + + payload.update(self._additional_claims) + + token = jwt.encode(self._signer, payload) + + return token + + def _refresh_with_iam_endpoint(self, request): + """Use IAM generateIdToken endpoint to obtain an ID token. + + It works as follows: + + 1. First we create a self signed jwt with + https://www.googleapis.com/auth/iam being the scope. + + 2. Next we use the self signed jwt as the access token, and make a POST + request to IAM generateIdToken endpoint. The request body is: + { + "audience": self._target_audience, + "includeEmail": "true", + "useEmailAzp": "true", + } + + If the request is succesfully, it will return {"token":"the ID token"}, + and we can extract the ID token and compute its expiry. + """ + jwt_credentials = jwt.Credentials.from_signing_credentials( + self, + None, + additional_claims={"scope": "https://www.googleapis.com/auth/iam"}, + ) + jwt_credentials.refresh(request) + self.token, self.expiry = _client.call_iam_generate_id_token_endpoint( + request, + self._iam_id_token_endpoint, + self.signer_email, + self._target_audience, + jwt_credentials.token.decode(), + self._universe_domain, + ) + + @_helpers.copy_docstring(credentials.Credentials) + def refresh(self, request): + if self._use_iam_endpoint: + self._refresh_with_iam_endpoint(request) + else: + assertion = self._make_authorization_grant_assertion() + access_token, expiry, _ = _client.id_token_jwt_grant( + request, self._token_uri, assertion + ) + self.token = access_token + self.expiry = expiry + + @property + def service_account_email(self): + """The service account email.""" + return self._service_account_email + + @_helpers.copy_docstring(credentials.Signing) + def sign_bytes(self, message): + return self._signer.sign(message) + + @property # type: ignore + @_helpers.copy_docstring(credentials.Signing) + def signer(self): + return self._signer + + @property # type: ignore + @_helpers.copy_docstring(credentials.Signing) + def signer_email(self): + return self._service_account_email diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/sts.py b/.venv/lib/python3.12/site-packages/google/oauth2/sts.py new file mode 100644 index 00000000..ad396273 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/sts.py @@ -0,0 +1,176 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OAuth 2.0 Token Exchange Spec. + +This module defines a token exchange utility based on the `OAuth 2.0 Token +Exchange`_ spec. This will be mainly used to exchange external credentials +for GCP access tokens in workload identity pools to access Google APIs. + +The implementation will support various types of client authentication as +allowed in the spec. + +A deviation on the spec will be for additional Google specific options that +cannot be easily mapped to parameters defined in the RFC. + +The returned dictionary response will be based on the `rfc8693 section 2.2.1`_ +spec JSON response. + +.. _OAuth 2.0 Token Exchange: https://tools.ietf.org/html/rfc8693 +.. _rfc8693 section 2.2.1: https://tools.ietf.org/html/rfc8693#section-2.2.1 +""" + +import http.client as http_client +import json +import urllib + +from google.oauth2 import utils + + +_URLENCODED_HEADERS = {"Content-Type": "application/x-www-form-urlencoded"} + + +class Client(utils.OAuthClientAuthHandler): + """Implements the OAuth 2.0 token exchange spec based on + https://tools.ietf.org/html/rfc8693. + """ + + def __init__(self, token_exchange_endpoint, client_authentication=None): + """Initializes an STS client instance. + + Args: + token_exchange_endpoint (str): The token exchange endpoint. + client_authentication (Optional(google.oauth2.oauth2_utils.ClientAuthentication)): + The optional OAuth client authentication credentials if available. + """ + super(Client, self).__init__(client_authentication) + self._token_exchange_endpoint = token_exchange_endpoint + + def _make_request(self, request, headers, request_body): + # Initialize request headers. + request_headers = _URLENCODED_HEADERS.copy() + + # Inject additional headers. + if headers: + for k, v in dict(headers).items(): + request_headers[k] = v + + # Apply OAuth client authentication. + self.apply_client_authentication_options(request_headers, request_body) + + # Execute request. + response = request( + url=self._token_exchange_endpoint, + method="POST", + headers=request_headers, + body=urllib.parse.urlencode(request_body).encode("utf-8"), + ) + + response_body = ( + response.data.decode("utf-8") + if hasattr(response.data, "decode") + else response.data + ) + + # If non-200 response received, translate to OAuthError exception. + if response.status != http_client.OK: + utils.handle_error_response(response_body) + + response_data = json.loads(response_body) + + # Return successful response. + return response_data + + def exchange_token( + self, + request, + grant_type, + subject_token, + subject_token_type, + resource=None, + audience=None, + scopes=None, + requested_token_type=None, + actor_token=None, + actor_token_type=None, + additional_options=None, + additional_headers=None, + ): + """Exchanges the provided token for another type of token based on the + rfc8693 spec. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + grant_type (str): The OAuth 2.0 token exchange grant type. + subject_token (str): The OAuth 2.0 token exchange subject token. + subject_token_type (str): The OAuth 2.0 token exchange subject token type. + resource (Optional[str]): The optional OAuth 2.0 token exchange resource field. + audience (Optional[str]): The optional OAuth 2.0 token exchange audience field. + scopes (Optional[Sequence[str]]): The optional list of scopes to use. + requested_token_type (Optional[str]): The optional OAuth 2.0 token exchange requested + token type. + actor_token (Optional[str]): The optional OAuth 2.0 token exchange actor token. + actor_token_type (Optional[str]): The optional OAuth 2.0 token exchange actor token type. + additional_options (Optional[Mapping[str, str]]): The optional additional + non-standard Google specific options. + additional_headers (Optional[Mapping[str, str]]): The optional additional + headers to pass to the token exchange endpoint. + + Returns: + Mapping[str, str]: The token exchange JSON-decoded response data containing + the requested token and its expiration time. + + Raises: + google.auth.exceptions.OAuthError: If the token endpoint returned + an error. + """ + # Initialize request body. + request_body = { + "grant_type": grant_type, + "resource": resource, + "audience": audience, + "scope": " ".join(scopes or []), + "requested_token_type": requested_token_type, + "subject_token": subject_token, + "subject_token_type": subject_token_type, + "actor_token": actor_token, + "actor_token_type": actor_token_type, + "options": None, + } + # Add additional non-standard options. + if additional_options: + request_body["options"] = urllib.parse.quote(json.dumps(additional_options)) + # Remove empty fields in request body. + for k, v in dict(request_body).items(): + if v is None or v == "": + del request_body[k] + + return self._make_request(request, additional_headers, request_body) + + def refresh_token(self, request, refresh_token): + """Exchanges a refresh token for an access token based on the + RFC6749 spec. + + Args: + request (google.auth.transport.Request): A callable used to make + HTTP requests. + subject_token (str): The OAuth 2.0 refresh token. + """ + + return self._make_request( + request, + None, + {"grant_type": "refresh_token", "refresh_token": refresh_token}, + ) diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/utils.py b/.venv/lib/python3.12/site-packages/google/oauth2/utils.py new file mode 100644 index 00000000..d72ff191 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/utils.py @@ -0,0 +1,168 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OAuth 2.0 Utilities. + +This module provides implementations for various OAuth 2.0 utilities. +This includes `OAuth error handling`_ and +`Client authentication for OAuth flows`_. + +OAuth error handling +-------------------- +This will define interfaces for handling OAuth related error responses as +stated in `RFC 6749 section 5.2`_. +This will include a common function to convert these HTTP error responses to a +:class:`google.auth.exceptions.OAuthError` exception. + + +Client authentication for OAuth flows +------------------------------------- +We introduce an interface for defining client authentication credentials based +on `RFC 6749 section 2.3.1`_. This will expose the following +capabilities: + + * Ability to support basic authentication via request header. + * Ability to support bearer token authentication via request header. + * Ability to support client ID / secret authentication via request body. + +.. _RFC 6749 section 2.3.1: https://tools.ietf.org/html/rfc6749#section-2.3.1 +.. _RFC 6749 section 5.2: https://tools.ietf.org/html/rfc6749#section-5.2 +""" + +import abc +import base64 +import enum +import json + +from google.auth import exceptions + + +# OAuth client authentication based on +# https://tools.ietf.org/html/rfc6749#section-2.3. +class ClientAuthType(enum.Enum): + basic = 1 + request_body = 2 + + +class ClientAuthentication(object): + """Defines the client authentication credentials for basic and request-body + types based on https://tools.ietf.org/html/rfc6749#section-2.3.1. + """ + + def __init__(self, client_auth_type, client_id, client_secret=None): + """Instantiates a client authentication object containing the client ID + and secret credentials for basic and response-body auth. + + Args: + client_auth_type (google.oauth2.oauth_utils.ClientAuthType): The + client authentication type. + client_id (str): The client ID. + client_secret (Optional[str]): The client secret. + """ + self.client_auth_type = client_auth_type + self.client_id = client_id + self.client_secret = client_secret + + +class OAuthClientAuthHandler(metaclass=abc.ABCMeta): + """Abstract class for handling client authentication in OAuth-based + operations. + """ + + def __init__(self, client_authentication=None): + """Instantiates an OAuth client authentication handler. + + Args: + client_authentication (Optional[google.oauth2.utils.ClientAuthentication]): + The OAuth client authentication credentials if available. + """ + super(OAuthClientAuthHandler, self).__init__() + self._client_authentication = client_authentication + + def apply_client_authentication_options( + self, headers, request_body=None, bearer_token=None + ): + """Applies client authentication on the OAuth request's headers or POST + body. + + Args: + headers (Mapping[str, str]): The HTTP request header. + request_body (Optional[Mapping[str, str]]): The HTTP request body + dictionary. For requests that do not support request body, this + is None and will be ignored. + bearer_token (Optional[str]): The optional bearer token. + """ + # Inject authenticated header. + self._inject_authenticated_headers(headers, bearer_token) + # Inject authenticated request body. + if bearer_token is None: + self._inject_authenticated_request_body(request_body) + + def _inject_authenticated_headers(self, headers, bearer_token=None): + if bearer_token is not None: + headers["Authorization"] = "Bearer %s" % bearer_token + elif ( + self._client_authentication is not None + and self._client_authentication.client_auth_type is ClientAuthType.basic + ): + username = self._client_authentication.client_id + password = self._client_authentication.client_secret or "" + + credentials = base64.b64encode( + ("%s:%s" % (username, password)).encode() + ).decode() + headers["Authorization"] = "Basic %s" % credentials + + def _inject_authenticated_request_body(self, request_body): + if ( + self._client_authentication is not None + and self._client_authentication.client_auth_type + is ClientAuthType.request_body + ): + if request_body is None: + raise exceptions.OAuthError( + "HTTP request does not support request-body" + ) + else: + request_body["client_id"] = self._client_authentication.client_id + request_body["client_secret"] = ( + self._client_authentication.client_secret or "" + ) + + +def handle_error_response(response_body): + """Translates an error response from an OAuth operation into an + OAuthError exception. + + Args: + response_body (str): The decoded response data. + + Raises: + google.auth.exceptions.OAuthError + """ + try: + error_components = [] + error_data = json.loads(response_body) + + error_components.append("Error code {}".format(error_data["error"])) + if "error_description" in error_data: + error_components.append(": {}".format(error_data["error_description"])) + if "error_uri" in error_data: + error_components.append(" - {}".format(error_data["error_uri"])) + error_details = "".join(error_components) + # If no details could be extracted, use the response data. + except (KeyError, ValueError): + error_details = response_body + + raise exceptions.OAuthError(error_details, response_body) diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/webauthn_handler.py b/.venv/lib/python3.12/site-packages/google/oauth2/webauthn_handler.py new file mode 100644 index 00000000..e27c7e09 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/webauthn_handler.py @@ -0,0 +1,82 @@ +import abc +import os +import struct +import subprocess + +from google.auth import exceptions +from google.oauth2.webauthn_types import GetRequest, GetResponse + + +class WebAuthnHandler(abc.ABC): + @abc.abstractmethod + def is_available(self) -> bool: + """Check whether this WebAuthn handler is available""" + raise NotImplementedError("is_available method must be implemented") + + @abc.abstractmethod + def get(self, get_request: GetRequest) -> GetResponse: + """WebAuthn get (assertion)""" + raise NotImplementedError("get method must be implemented") + + +class PluginHandler(WebAuthnHandler): + """Offloads WebAuthn get reqeust to a pluggable command-line tool. + + Offloads WebAuthn get to a plugin which takes the form of a + command-line tool. The command-line tool is configurable via the + PluginHandler._ENV_VAR environment variable. + + The WebAuthn plugin should implement the following interface: + + Communication occurs over stdin/stdout, and messages are both sent and + received in the form: + + [4 bytes - payload size (little-endian)][variable bytes - json payload] + """ + + _ENV_VAR = "GOOGLE_AUTH_WEBAUTHN_PLUGIN" + + def is_available(self) -> bool: + try: + self._find_plugin() + except Exception: + return False + else: + return True + + def get(self, get_request: GetRequest) -> GetResponse: + request_json = get_request.to_json() + cmd = self._find_plugin() + response_json = self._call_plugin(cmd, request_json) + return GetResponse.from_json(response_json) + + def _call_plugin(self, cmd: str, input_json: str) -> str: + # Calculate length of input + input_length = len(input_json) + length_bytes_le = struct.pack("<I", input_length) + request = length_bytes_le + input_json.encode() + + # Call plugin + process_result = subprocess.run( + [cmd], input=request, capture_output=True, check=True + ) + + # Check length of response + response_len_le = process_result.stdout[:4] + response_len = struct.unpack("<I", response_len_le)[0] + response = process_result.stdout[4:] + if response_len != len(response): + raise exceptions.MalformedError( + "Plugin response length {} does not match data {}".format( + response_len, len(response) + ) + ) + return response.decode() + + def _find_plugin(self) -> str: + plugin_cmd = os.environ.get(PluginHandler._ENV_VAR) + if plugin_cmd is None: + raise exceptions.InvalidResource( + "{} env var is not set".format(PluginHandler._ENV_VAR) + ) + return plugin_cmd diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/webauthn_handler_factory.py b/.venv/lib/python3.12/site-packages/google/oauth2/webauthn_handler_factory.py new file mode 100644 index 00000000..184329fe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/webauthn_handler_factory.py @@ -0,0 +1,16 @@ +from typing import List, Optional + +from google.oauth2.webauthn_handler import PluginHandler, WebAuthnHandler + + +class WebauthnHandlerFactory: + handlers: List[WebAuthnHandler] + + def __init__(self): + self.handlers = [PluginHandler()] + + def get_handler(self) -> Optional[WebAuthnHandler]: + for handler in self.handlers: + if handler.is_available(): + return handler + return None diff --git a/.venv/lib/python3.12/site-packages/google/oauth2/webauthn_types.py b/.venv/lib/python3.12/site-packages/google/oauth2/webauthn_types.py new file mode 100644 index 00000000..7784e83d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/oauth2/webauthn_types.py @@ -0,0 +1,156 @@ +from dataclasses import dataclass +import json +from typing import Any, Dict, List, Optional + +from google.auth import exceptions + + +@dataclass(frozen=True) +class PublicKeyCredentialDescriptor: + """Descriptor for a security key based credential. + + https://www.w3.org/TR/webauthn-3/#dictionary-credential-descriptor + + Args: + id: <url-safe base64-encoded> credential id (key handle). + transports: <'usb'|'nfc'|'ble'|'internal'> List of supported transports. + """ + + id: str + transports: Optional[List[str]] = None + + def to_dict(self): + cred = {"type": "public-key", "id": self.id} + if self.transports: + cred["transports"] = self.transports + return cred + + +@dataclass +class AuthenticationExtensionsClientInputs: + """Client extensions inputs for WebAuthn extensions. + + Args: + appid: app id that can be asserted with in addition to rpid. + https://www.w3.org/TR/webauthn-3/#sctn-appid-extension + """ + + appid: Optional[str] = None + + def to_dict(self): + extensions = {} + if self.appid: + extensions["appid"] = self.appid + return extensions + + +@dataclass +class GetRequest: + """WebAuthn get request + + Args: + origin: Origin where the WebAuthn get assertion takes place. + rpid: Relying Party ID. + challenge: <url-safe base64-encoded> raw challenge. + timeout_ms: Timeout number in millisecond. + allow_credentials: List of allowed credentials. + user_verification: <'required'|'preferred'|'discouraged'> User verification requirement. + extensions: WebAuthn authentication extensions inputs. + """ + + origin: str + rpid: str + challenge: str + timeout_ms: Optional[int] = None + allow_credentials: Optional[List[PublicKeyCredentialDescriptor]] = None + user_verification: Optional[str] = None + extensions: Optional[AuthenticationExtensionsClientInputs] = None + + def to_json(self) -> str: + req_options: Dict[str, Any] = {"rpid": self.rpid, "challenge": self.challenge} + if self.timeout_ms: + req_options["timeout"] = self.timeout_ms + if self.allow_credentials: + req_options["allowCredentials"] = [ + c.to_dict() for c in self.allow_credentials + ] + if self.user_verification: + req_options["userVerification"] = self.user_verification + if self.extensions: + req_options["extensions"] = self.extensions.to_dict() + return json.dumps( + {"type": "get", "origin": self.origin, "requestData": req_options} + ) + + +@dataclass(frozen=True) +class AuthenticatorAssertionResponse: + """Authenticator response to a WebAuthn get (assertion) request. + + https://www.w3.org/TR/webauthn-3/#authenticatorassertionresponse + + Args: + client_data_json: <url-safe base64-encoded> client data JSON. + authenticator_data: <url-safe base64-encoded> authenticator data. + signature: <url-safe base64-encoded> signature. + user_handle: <url-safe base64-encoded> user handle. + """ + + client_data_json: str + authenticator_data: str + signature: str + user_handle: Optional[str] + + +@dataclass(frozen=True) +class GetResponse: + """WebAuthn get (assertion) response. + + Args: + id: <url-safe base64-encoded> credential id (key handle). + response: The authenticator assertion response. + authenticator_attachment: <'cross-platform'|'platform'> The attachment status of the authenticator. + client_extension_results: WebAuthn authentication extensions output results in a dictionary. + """ + + id: str + response: AuthenticatorAssertionResponse + authenticator_attachment: Optional[str] + client_extension_results: Optional[Dict] + + @staticmethod + def from_json(json_str: str): + """Verify and construct GetResponse from a JSON string.""" + try: + resp_json = json.loads(json_str) + except ValueError: + raise exceptions.MalformedError("Invalid Get JSON response") + if resp_json.get("type") != "getResponse": + raise exceptions.MalformedError( + "Invalid Get response type: {}".format(resp_json.get("type")) + ) + pk_cred = resp_json.get("responseData") + if pk_cred is None: + if resp_json.get("error"): + raise exceptions.ReauthFailError( + "WebAuthn.get failure: {}".format(resp_json["error"]) + ) + else: + raise exceptions.MalformedError("Get response is empty") + if pk_cred.get("type") != "public-key": + raise exceptions.MalformedError( + "Invalid credential type: {}".format(pk_cred.get("type")) + ) + assertion_json = pk_cred["response"] + assertion_resp = AuthenticatorAssertionResponse( + client_data_json=assertion_json["clientDataJSON"], + authenticator_data=assertion_json["authenticatorData"], + signature=assertion_json["signature"], + user_handle=assertion_json.get("userHandle"), + ) + return GetResponse( + id=pk_cred["id"], + response=assertion_resp, + authenticator_attachment=pk_cred.get("authenticatorAttachment"), + client_extension_results=pk_cred.get("clientExtensionResults"), + ) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/__init__.py b/.venv/lib/python3.12/site-packages/google/protobuf/__init__.py new file mode 100644 index 00000000..8998f3d6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/__init__.py @@ -0,0 +1,10 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# Copyright 2007 Google Inc. All Rights Reserved. + +__version__ = '5.29.4' diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/any.py b/.venv/lib/python3.12/site-packages/google/protobuf/any.py new file mode 100644 index 00000000..81e7013e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/any.py @@ -0,0 +1,39 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains the Any helper APIs.""" + +from typing import Optional + +from google.protobuf import descriptor +from google.protobuf.message import Message + +from google.protobuf.any_pb2 import Any + + +def pack( + msg: Message, + type_url_prefix: Optional[str] = 'type.googleapis.com/', + deterministic: Optional[bool] = None, +) -> Any: + any_msg = Any() + any_msg.Pack( + msg=msg, type_url_prefix=type_url_prefix, deterministic=deterministic + ) + return any_msg + + +def unpack(any_msg: Any, msg: Message) -> bool: + return any_msg.Unpack(msg=msg) + + +def type_name(any_msg: Any) -> str: + return any_msg.TypeName() + + +def is_type(any_msg: Any, des: descriptor.Descriptor) -> bool: + return any_msg.Is(des) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/any_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/any_pb2.py new file mode 100644 index 00000000..dfecd634 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/any_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/any.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/any.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n\x03\x41ny\x12\x19\n\x08type_url\x18\x01 \x01(\tR\x07typeUrl\x12\x14\n\x05value\x18\x02 \x01(\x0cR\x05valueBv\n\x13\x63om.google.protobufB\x08\x41nyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.any_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\010AnyProtoP\001Z,google.golang.org/protobuf/types/known/anypb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _globals['_ANY']._serialized_start=46 + _globals['_ANY']._serialized_end=100 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/api_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/api_pb2.py new file mode 100644 index 00000000..c4cc5b9e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/api_pb2.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/api.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/api.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2 +from google.protobuf import type_pb2 as google_dot_protobuf_dot_type__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/api.proto\x12\x0fgoogle.protobuf\x1a$google/protobuf/source_context.proto\x1a\x1agoogle/protobuf/type.proto\"\xc1\x02\n\x03\x41pi\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x31\n\x07methods\x18\x02 \x03(\x0b\x32\x17.google.protobuf.MethodR\x07methods\x12\x31\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.OptionR\x07options\x12\x18\n\x07version\x18\x04 \x01(\tR\x07version\x12\x45\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContextR\rsourceContext\x12.\n\x06mixins\x18\x06 \x03(\x0b\x32\x16.google.protobuf.MixinR\x06mixins\x12/\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.SyntaxR\x06syntax\"\xb2\x02\n\x06Method\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12(\n\x10request_type_url\x18\x02 \x01(\tR\x0erequestTypeUrl\x12+\n\x11request_streaming\x18\x03 \x01(\x08R\x10requestStreaming\x12*\n\x11response_type_url\x18\x04 \x01(\tR\x0fresponseTypeUrl\x12-\n\x12response_streaming\x18\x05 \x01(\x08R\x11responseStreaming\x12\x31\n\x07options\x18\x06 \x03(\x0b\x32\x17.google.protobuf.OptionR\x07options\x12/\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.SyntaxR\x06syntax\"/\n\x05Mixin\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n\x04root\x18\x02 \x01(\tR\x04rootBv\n\x13\x63om.google.protobufB\x08\x41piProtoP\x01Z,google.golang.org/protobuf/types/known/apipb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.api_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\010ApiProtoP\001Z,google.golang.org/protobuf/types/known/apipb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _globals['_API']._serialized_start=113 + _globals['_API']._serialized_end=434 + _globals['_METHOD']._serialized_start=437 + _globals['_METHOD']._serialized_end=743 + _globals['_MIXIN']._serialized_start=745 + _globals['_MIXIN']._serialized_end=792 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/compiler/__init__.py b/.venv/lib/python3.12/site-packages/google/protobuf/compiler/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/compiler/__init__.py diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/compiler/plugin_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/compiler/plugin_pb2.py new file mode 100644 index 00000000..2a4ee4ee --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/compiler/plugin_pb2.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/compiler/plugin.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/compiler/plugin.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%google/protobuf/compiler/plugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"c\n\x07Version\x12\x14\n\x05major\x18\x01 \x01(\x05R\x05major\x12\x14\n\x05minor\x18\x02 \x01(\x05R\x05minor\x12\x14\n\x05patch\x18\x03 \x01(\x05R\x05patch\x12\x16\n\x06suffix\x18\x04 \x01(\tR\x06suffix\"\xcf\x02\n\x14\x43odeGeneratorRequest\x12(\n\x10\x66ile_to_generate\x18\x01 \x03(\tR\x0e\x66ileToGenerate\x12\x1c\n\tparameter\x18\x02 \x01(\tR\tparameter\x12\x43\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProtoR\tprotoFile\x12\\\n\x17source_file_descriptors\x18\x11 \x03(\x0b\x32$.google.protobuf.FileDescriptorProtoR\x15sourceFileDescriptors\x12L\n\x10\x63ompiler_version\x18\x03 \x01(\x0b\x32!.google.protobuf.compiler.VersionR\x0f\x63ompilerVersion\"\x85\x04\n\x15\x43odeGeneratorResponse\x12\x14\n\x05\x65rror\x18\x01 \x01(\tR\x05\x65rror\x12-\n\x12supported_features\x18\x02 \x01(\x04R\x11supportedFeatures\x12\'\n\x0fminimum_edition\x18\x03 \x01(\x05R\x0eminimumEdition\x12\'\n\x0fmaximum_edition\x18\x04 \x01(\x05R\x0emaximumEdition\x12H\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.FileR\x04\x66ile\x1a\xb1\x01\n\x04\x46ile\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\'\n\x0finsertion_point\x18\x02 \x01(\tR\x0einsertionPoint\x12\x18\n\x07\x63ontent\x18\x0f \x01(\tR\x07\x63ontent\x12R\n\x13generated_code_info\x18\x10 \x01(\x0b\x32\".google.protobuf.GeneratedCodeInfoR\x11generatedCodeInfo\"W\n\x07\x46\x65\x61ture\x12\x10\n\x0c\x46\x45\x41TURE_NONE\x10\x00\x12\x1b\n\x17\x46\x45\x41TURE_PROTO3_OPTIONAL\x10\x01\x12\x1d\n\x19\x46\x45\x41TURE_SUPPORTS_EDITIONS\x10\x02\x42r\n\x1c\x63om.google.protobuf.compilerB\x0cPluginProtosZ)google.golang.org/protobuf/types/pluginpb\xaa\x02\x18Google.Protobuf.Compiler') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.compiler.plugin_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\034com.google.protobuf.compilerB\014PluginProtosZ)google.golang.org/protobuf/types/pluginpb\252\002\030Google.Protobuf.Compiler' + _globals['_VERSION']._serialized_start=101 + _globals['_VERSION']._serialized_end=200 + _globals['_CODEGENERATORREQUEST']._serialized_start=203 + _globals['_CODEGENERATORREQUEST']._serialized_end=538 + _globals['_CODEGENERATORRESPONSE']._serialized_start=541 + _globals['_CODEGENERATORRESPONSE']._serialized_end=1058 + _globals['_CODEGENERATORRESPONSE_FILE']._serialized_start=792 + _globals['_CODEGENERATORRESPONSE_FILE']._serialized_end=969 + _globals['_CODEGENERATORRESPONSE_FEATURE']._serialized_start=971 + _globals['_CODEGENERATORRESPONSE_FEATURE']._serialized_end=1058 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/descriptor.py b/.venv/lib/python3.12/site-packages/google/protobuf/descriptor.py new file mode 100644 index 00000000..d8c6a435 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/descriptor.py @@ -0,0 +1,1511 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Descriptors essentially contain exactly the information found in a .proto +file, in types that make this information accessible in Python. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import abc +import binascii +import os +import threading +import warnings + +from google.protobuf.internal import api_implementation + +_USE_C_DESCRIPTORS = False +if api_implementation.Type() != 'python': + # pylint: disable=protected-access + _message = api_implementation._c_module + # TODO: Remove this import after fix api_implementation + if _message is None: + from google.protobuf.pyext import _message + _USE_C_DESCRIPTORS = True + + +class Error(Exception): + """Base error for this module.""" + + +class TypeTransformationError(Error): + """Error transforming between python proto type and corresponding C++ type.""" + + +if _USE_C_DESCRIPTORS: + # This metaclass allows to override the behavior of code like + # isinstance(my_descriptor, FieldDescriptor) + # and make it return True when the descriptor is an instance of the extension + # type written in C++. + class DescriptorMetaclass(type): + + def __instancecheck__(cls, obj): + if super(DescriptorMetaclass, cls).__instancecheck__(obj): + return True + if isinstance(obj, cls._C_DESCRIPTOR_CLASS): + return True + return False +else: + # The standard metaclass; nothing changes. + DescriptorMetaclass = abc.ABCMeta + + +class _Lock(object): + """Wrapper class of threading.Lock(), which is allowed by 'with'.""" + + def __new__(cls): + self = object.__new__(cls) + self._lock = threading.Lock() # pylint: disable=protected-access + return self + + def __enter__(self): + self._lock.acquire() + + def __exit__(self, exc_type, exc_value, exc_tb): + self._lock.release() + + +_lock = threading.Lock() + + +def _Deprecated(name): + if _Deprecated.count > 0: + _Deprecated.count -= 1 + warnings.warn( + 'Call to deprecated create function %s(). Note: Create unlinked ' + 'descriptors is going to go away. Please use get/find descriptors from ' + 'generated code or query the descriptor_pool.' + % name, + category=DeprecationWarning, stacklevel=3) + +# These must match the values in descriptor.proto, but we can't use them +# directly because we sometimes need to reference them in feature helpers +# below *during* the build of descriptor.proto. +_FEATURESET_MESSAGE_ENCODING_DELIMITED = 2 +_FEATURESET_FIELD_PRESENCE_IMPLICIT = 2 +_FEATURESET_FIELD_PRESENCE_LEGACY_REQUIRED = 3 +_FEATURESET_REPEATED_FIELD_ENCODING_PACKED = 1 +_FEATURESET_ENUM_TYPE_CLOSED = 2 + +# Deprecated warnings will print 100 times at most which should be enough for +# users to notice and do not cause timeout. +_Deprecated.count = 100 + + +_internal_create_key = object() + + +class DescriptorBase(metaclass=DescriptorMetaclass): + + """Descriptors base class. + + This class is the base of all descriptor classes. It provides common options + related functionality. + + Attributes: + has_options: True if the descriptor has non-default options. Usually it is + not necessary to read this -- just call GetOptions() which will happily + return the default instance. However, it's sometimes useful for + efficiency, and also useful inside the protobuf implementation to avoid + some bootstrapping issues. + file (FileDescriptor): Reference to file info. + """ + + if _USE_C_DESCRIPTORS: + # The class, or tuple of classes, that are considered as "virtual + # subclasses" of this descriptor class. + _C_DESCRIPTOR_CLASS = () + + def __init__(self, file, options, serialized_options, options_class_name): + """Initialize the descriptor given its options message and the name of the + class of the options message. The name of the class is required in case + the options message is None and has to be created. + """ + self._features = None + self.file = file + self._options = options + self._loaded_options = None + self._options_class_name = options_class_name + self._serialized_options = serialized_options + + # Does this descriptor have non-default options? + self.has_options = (self._options is not None) or ( + self._serialized_options is not None + ) + + @property + @abc.abstractmethod + def _parent(self): + pass + + def _InferLegacyFeatures(self, edition, options, features): + """Infers features from proto2/proto3 syntax so that editions logic can be used everywhere. + + Args: + edition: The edition to infer features for. + options: The options for this descriptor that are being processed. + features: The feature set object to modify with inferred features. + """ + pass + + def _GetFeatures(self): + if not self._features: + self._LazyLoadOptions() + return self._features + + def _ResolveFeatures(self, edition, raw_options): + """Resolves features from the raw options of this descriptor. + + Args: + edition: The edition to use for feature defaults. + raw_options: The options for this descriptor that are being processed. + + Returns: + A fully resolved feature set for making runtime decisions. + """ + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + + if self._parent: + features = descriptor_pb2.FeatureSet() + features.CopyFrom(self._parent._GetFeatures()) + else: + features = self.file.pool._CreateDefaultFeatures(edition) + unresolved = descriptor_pb2.FeatureSet() + unresolved.CopyFrom(raw_options.features) + self._InferLegacyFeatures(edition, raw_options, unresolved) + features.MergeFrom(unresolved) + + # Use the feature cache to reduce memory bloat. + return self.file.pool._InternFeatures(features) + + def _LazyLoadOptions(self): + """Lazily initializes descriptor options towards the end of the build.""" + if self._loaded_options: + return + + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + + if not hasattr(descriptor_pb2, self._options_class_name): + raise RuntimeError( + 'Unknown options class name %s!' % self._options_class_name + ) + options_class = getattr(descriptor_pb2, self._options_class_name) + features = None + edition = self.file._edition + + if not self.has_options: + if not self._features: + features = self._ResolveFeatures( + descriptor_pb2.Edition.Value(edition), options_class() + ) + with _lock: + self._loaded_options = options_class() + if not self._features: + self._features = features + else: + if not self._serialized_options: + options = self._options + else: + options = _ParseOptions(options_class(), self._serialized_options) + + if not self._features: + features = self._ResolveFeatures( + descriptor_pb2.Edition.Value(edition), options + ) + with _lock: + self._loaded_options = options + if not self._features: + self._features = features + if options.HasField('features'): + options.ClearField('features') + if not options.SerializeToString(): + self._loaded_options = options_class() + self.has_options = False + + def GetOptions(self): + """Retrieves descriptor options. + + Returns: + The options set on this descriptor. + """ + if not self._loaded_options: + self._LazyLoadOptions() + return self._loaded_options + + +class _NestedDescriptorBase(DescriptorBase): + """Common class for descriptors that can be nested.""" + + def __init__(self, options, options_class_name, name, full_name, + file, containing_type, serialized_start=None, + serialized_end=None, serialized_options=None): + """Constructor. + + Args: + options: Protocol message options or None to use default message options. + options_class_name (str): The class name of the above options. + name (str): Name of this protocol message type. + full_name (str): Fully-qualified name of this protocol message type, which + will include protocol "package" name and the name of any enclosing + types. + containing_type: if provided, this is a nested descriptor, with this + descriptor as parent, otherwise None. + serialized_start: The start index (inclusive) in block in the + file.serialized_pb that describes this descriptor. + serialized_end: The end index (exclusive) in block in the + file.serialized_pb that describes this descriptor. + serialized_options: Protocol message serialized options or None. + """ + super(_NestedDescriptorBase, self).__init__( + file, options, serialized_options, options_class_name + ) + + self.name = name + # TODO: Add function to calculate full_name instead of having it in + # memory? + self.full_name = full_name + self.containing_type = containing_type + + self._serialized_start = serialized_start + self._serialized_end = serialized_end + + def CopyToProto(self, proto): + """Copies this to the matching proto in descriptor_pb2. + + Args: + proto: An empty proto instance from descriptor_pb2. + + Raises: + Error: If self couldn't be serialized, due to to few constructor + arguments. + """ + if (self.file is not None and + self._serialized_start is not None and + self._serialized_end is not None): + proto.ParseFromString(self.file.serialized_pb[ + self._serialized_start:self._serialized_end]) + else: + raise Error('Descriptor does not contain serialization.') + + +class Descriptor(_NestedDescriptorBase): + + """Descriptor for a protocol message type. + + Attributes: + name (str): Name of this protocol message type. + full_name (str): Fully-qualified name of this protocol message type, + which will include protocol "package" name and the name of any + enclosing types. + containing_type (Descriptor): Reference to the descriptor of the type + containing us, or None if this is top-level. + fields (list[FieldDescriptor]): Field descriptors for all fields in + this type. + fields_by_number (dict(int, FieldDescriptor)): Same + :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed + by "number" attribute in each FieldDescriptor. + fields_by_name (dict(str, FieldDescriptor)): Same + :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed by + "name" attribute in each :class:`FieldDescriptor`. + nested_types (list[Descriptor]): Descriptor references + for all protocol message types nested within this one. + nested_types_by_name (dict(str, Descriptor)): Same Descriptor + objects as in :attr:`nested_types`, but indexed by "name" attribute + in each Descriptor. + enum_types (list[EnumDescriptor]): :class:`EnumDescriptor` references + for all enums contained within this type. + enum_types_by_name (dict(str, EnumDescriptor)): Same + :class:`EnumDescriptor` objects as in :attr:`enum_types`, but + indexed by "name" attribute in each EnumDescriptor. + enum_values_by_name (dict(str, EnumValueDescriptor)): Dict mapping + from enum value name to :class:`EnumValueDescriptor` for that value. + extensions (list[FieldDescriptor]): All extensions defined directly + within this message type (NOT within a nested type). + extensions_by_name (dict(str, FieldDescriptor)): Same FieldDescriptor + objects as :attr:`extensions`, but indexed by "name" attribute of each + FieldDescriptor. + is_extendable (bool): Does this type define any extension ranges? + oneofs (list[OneofDescriptor]): The list of descriptors for oneof fields + in this message. + oneofs_by_name (dict(str, OneofDescriptor)): Same objects as in + :attr:`oneofs`, but indexed by "name" attribute. + file (FileDescriptor): Reference to file descriptor. + is_map_entry: If the message type is a map entry. + + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.Descriptor + + def __new__( + cls, + name=None, + full_name=None, + filename=None, + containing_type=None, + fields=None, + nested_types=None, + enum_types=None, + extensions=None, + options=None, + serialized_options=None, + is_extendable=True, + extension_ranges=None, + oneofs=None, + file=None, # pylint: disable=redefined-builtin + serialized_start=None, + serialized_end=None, + syntax=None, + is_map_entry=False, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindMessageTypeByName(full_name) + + # NOTE: The file argument redefining a builtin is nothing we can + # fix right now since we don't know how many clients already rely on the + # name of the argument. + def __init__(self, name, full_name, filename, containing_type, fields, + nested_types, enum_types, extensions, options=None, + serialized_options=None, + is_extendable=True, extension_ranges=None, oneofs=None, + file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin + syntax=None, is_map_entry=False, create_key=None): + """Arguments to __init__() are as described in the description + of Descriptor fields above. + + Note that filename is an obsolete argument, that is not used anymore. + Please use file.name to access this as an attribute. + """ + if create_key is not _internal_create_key: + _Deprecated('Descriptor') + + super(Descriptor, self).__init__( + options, 'MessageOptions', name, full_name, file, + containing_type, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + + # We have fields in addition to fields_by_name and fields_by_number, + # so that: + # 1. Clients can index fields by "order in which they're listed." + # 2. Clients can easily iterate over all fields with the terse + # syntax: for f in descriptor.fields: ... + self.fields = fields + for field in self.fields: + field.containing_type = self + field.file = file + self.fields_by_number = dict((f.number, f) for f in fields) + self.fields_by_name = dict((f.name, f) for f in fields) + self._fields_by_camelcase_name = None + + self.nested_types = nested_types + for nested_type in nested_types: + nested_type.containing_type = self + self.nested_types_by_name = dict((t.name, t) for t in nested_types) + + self.enum_types = enum_types + for enum_type in self.enum_types: + enum_type.containing_type = self + self.enum_types_by_name = dict((t.name, t) for t in enum_types) + self.enum_values_by_name = dict( + (v.name, v) for t in enum_types for v in t.values) + + self.extensions = extensions + for extension in self.extensions: + extension.extension_scope = self + self.extensions_by_name = dict((f.name, f) for f in extensions) + self.is_extendable = is_extendable + self.extension_ranges = extension_ranges + self.oneofs = oneofs if oneofs is not None else [] + self.oneofs_by_name = dict((o.name, o) for o in self.oneofs) + for oneof in self.oneofs: + oneof.containing_type = self + oneof.file = file + self._is_map_entry = is_map_entry + + @property + def _parent(self): + return self.containing_type or self.file + + @property + def fields_by_camelcase_name(self): + """Same FieldDescriptor objects as in :attr:`fields`, but indexed by + :attr:`FieldDescriptor.camelcase_name`. + """ + if self._fields_by_camelcase_name is None: + self._fields_by_camelcase_name = dict( + (f.camelcase_name, f) for f in self.fields) + return self._fields_by_camelcase_name + + def EnumValueName(self, enum, value): + """Returns the string name of an enum value. + + This is just a small helper method to simplify a common operation. + + Args: + enum: string name of the Enum. + value: int, value of the enum. + + Returns: + string name of the enum value. + + Raises: + KeyError if either the Enum doesn't exist or the value is not a valid + value for the enum. + """ + return self.enum_types_by_name[enum].values_by_number[value].name + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.DescriptorProto. + + Args: + proto: An empty descriptor_pb2.DescriptorProto. + """ + # This function is overridden to give a better doc comment. + super(Descriptor, self).CopyToProto(proto) + + +# TODO: We should have aggressive checking here, +# for example: +# * If you specify a repeated field, you should not be allowed +# to specify a default value. +# * [Other examples here as needed]. +# +# TODO: for this and other *Descriptor classes, we +# might also want to lock things down aggressively (e.g., +# prevent clients from setting the attributes). Having +# stronger invariants here in general will reduce the number +# of runtime checks we must do in reflection.py... +class FieldDescriptor(DescriptorBase): + + """Descriptor for a single field in a .proto file. + + Attributes: + name (str): Name of this field, exactly as it appears in .proto. + full_name (str): Name of this field, including containing scope. This is + particularly relevant for extensions. + index (int): Dense, 0-indexed index giving the order that this + field textually appears within its message in the .proto file. + number (int): Tag number declared for this field in the .proto file. + + type (int): (One of the TYPE_* constants below) Declared type. + cpp_type (int): (One of the CPPTYPE_* constants below) C++ type used to + represent this field. + + label (int): (One of the LABEL_* constants below) Tells whether this + field is optional, required, or repeated. + has_default_value (bool): True if this field has a default value defined, + otherwise false. + default_value (Varies): Default value of this field. Only + meaningful for non-repeated scalar fields. Repeated fields + should always set this to [], and non-repeated composite + fields should always set this to None. + + containing_type (Descriptor): Descriptor of the protocol message + type that contains this field. Set by the Descriptor constructor + if we're passed into one. + Somewhat confusingly, for extension fields, this is the + descriptor of the EXTENDED message, not the descriptor + of the message containing this field. (See is_extension and + extension_scope below). + message_type (Descriptor): If a composite field, a descriptor + of the message type contained in this field. Otherwise, this is None. + enum_type (EnumDescriptor): If this field contains an enum, a + descriptor of that enum. Otherwise, this is None. + + is_extension: True iff this describes an extension field. + extension_scope (Descriptor): Only meaningful if is_extension is True. + Gives the message that immediately contains this extension field. + Will be None iff we're a top-level (file-level) extension field. + + options (descriptor_pb2.FieldOptions): Protocol message field options or + None to use default field options. + + containing_oneof (OneofDescriptor): If the field is a member of a oneof + union, contains its descriptor. Otherwise, None. + + file (FileDescriptor): Reference to file descriptor. + """ + + # Must be consistent with C++ FieldDescriptor::Type enum in + # descriptor.h. + # + # TODO: Find a way to eliminate this repetition. + TYPE_DOUBLE = 1 + TYPE_FLOAT = 2 + TYPE_INT64 = 3 + TYPE_UINT64 = 4 + TYPE_INT32 = 5 + TYPE_FIXED64 = 6 + TYPE_FIXED32 = 7 + TYPE_BOOL = 8 + TYPE_STRING = 9 + TYPE_GROUP = 10 + TYPE_MESSAGE = 11 + TYPE_BYTES = 12 + TYPE_UINT32 = 13 + TYPE_ENUM = 14 + TYPE_SFIXED32 = 15 + TYPE_SFIXED64 = 16 + TYPE_SINT32 = 17 + TYPE_SINT64 = 18 + MAX_TYPE = 18 + + # Must be consistent with C++ FieldDescriptor::CppType enum in + # descriptor.h. + # + # TODO: Find a way to eliminate this repetition. + CPPTYPE_INT32 = 1 + CPPTYPE_INT64 = 2 + CPPTYPE_UINT32 = 3 + CPPTYPE_UINT64 = 4 + CPPTYPE_DOUBLE = 5 + CPPTYPE_FLOAT = 6 + CPPTYPE_BOOL = 7 + CPPTYPE_ENUM = 8 + CPPTYPE_STRING = 9 + CPPTYPE_MESSAGE = 10 + MAX_CPPTYPE = 10 + + _PYTHON_TO_CPP_PROTO_TYPE_MAP = { + TYPE_DOUBLE: CPPTYPE_DOUBLE, + TYPE_FLOAT: CPPTYPE_FLOAT, + TYPE_ENUM: CPPTYPE_ENUM, + TYPE_INT64: CPPTYPE_INT64, + TYPE_SINT64: CPPTYPE_INT64, + TYPE_SFIXED64: CPPTYPE_INT64, + TYPE_UINT64: CPPTYPE_UINT64, + TYPE_FIXED64: CPPTYPE_UINT64, + TYPE_INT32: CPPTYPE_INT32, + TYPE_SFIXED32: CPPTYPE_INT32, + TYPE_SINT32: CPPTYPE_INT32, + TYPE_UINT32: CPPTYPE_UINT32, + TYPE_FIXED32: CPPTYPE_UINT32, + TYPE_BYTES: CPPTYPE_STRING, + TYPE_STRING: CPPTYPE_STRING, + TYPE_BOOL: CPPTYPE_BOOL, + TYPE_MESSAGE: CPPTYPE_MESSAGE, + TYPE_GROUP: CPPTYPE_MESSAGE + } + + # Must be consistent with C++ FieldDescriptor::Label enum in + # descriptor.h. + # + # TODO: Find a way to eliminate this repetition. + LABEL_OPTIONAL = 1 + LABEL_REQUIRED = 2 + LABEL_REPEATED = 3 + MAX_LABEL = 3 + + # Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber, + # and kLastReservedNumber in descriptor.h + MAX_FIELD_NUMBER = (1 << 29) - 1 + FIRST_RESERVED_FIELD_NUMBER = 19000 + LAST_RESERVED_FIELD_NUMBER = 19999 + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.FieldDescriptor + + def __new__(cls, name, full_name, index, number, type, cpp_type, label, + default_value, message_type, enum_type, containing_type, + is_extension, extension_scope, options=None, + serialized_options=None, + has_default_value=True, containing_oneof=None, json_name=None, + file=None, create_key=None): # pylint: disable=redefined-builtin + _message.Message._CheckCalledFromGeneratedFile() + if is_extension: + return _message.default_pool.FindExtensionByName(full_name) + else: + return _message.default_pool.FindFieldByName(full_name) + + def __init__(self, name, full_name, index, number, type, cpp_type, label, + default_value, message_type, enum_type, containing_type, + is_extension, extension_scope, options=None, + serialized_options=None, + has_default_value=True, containing_oneof=None, json_name=None, + file=None, create_key=None): # pylint: disable=redefined-builtin + """The arguments are as described in the description of FieldDescriptor + attributes above. + + Note that containing_type may be None, and may be set later if necessary + (to deal with circular references between message types, for example). + Likewise for extension_scope. + """ + if create_key is not _internal_create_key: + _Deprecated('FieldDescriptor') + + super(FieldDescriptor, self).__init__( + file, options, serialized_options, 'FieldOptions' + ) + self.name = name + self.full_name = full_name + self._camelcase_name = None + if json_name is None: + self.json_name = _ToJsonName(name) + else: + self.json_name = json_name + self.index = index + self.number = number + self._type = type + self.cpp_type = cpp_type + self._label = label + self.has_default_value = has_default_value + self.default_value = default_value + self.containing_type = containing_type + self.message_type = message_type + self.enum_type = enum_type + self.is_extension = is_extension + self.extension_scope = extension_scope + self.containing_oneof = containing_oneof + if api_implementation.Type() == 'python': + self._cdescriptor = None + else: + if is_extension: + self._cdescriptor = _message.default_pool.FindExtensionByName(full_name) + else: + self._cdescriptor = _message.default_pool.FindFieldByName(full_name) + + @property + def _parent(self): + if self.containing_oneof: + return self.containing_oneof + if self.is_extension: + return self.extension_scope or self.file + return self.containing_type + + def _InferLegacyFeatures(self, edition, options, features): + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + + if edition >= descriptor_pb2.Edition.EDITION_2023: + return + + if self._label == FieldDescriptor.LABEL_REQUIRED: + features.field_presence = ( + descriptor_pb2.FeatureSet.FieldPresence.LEGACY_REQUIRED + ) + + if self._type == FieldDescriptor.TYPE_GROUP: + features.message_encoding = ( + descriptor_pb2.FeatureSet.MessageEncoding.DELIMITED + ) + + if options.HasField('packed'): + features.repeated_field_encoding = ( + descriptor_pb2.FeatureSet.RepeatedFieldEncoding.PACKED + if options.packed + else descriptor_pb2.FeatureSet.RepeatedFieldEncoding.EXPANDED + ) + + @property + def type(self): + if ( + self._GetFeatures().message_encoding + == _FEATURESET_MESSAGE_ENCODING_DELIMITED + and self.message_type + and not self.message_type.GetOptions().map_entry + and not self.containing_type.GetOptions().map_entry + ): + return FieldDescriptor.TYPE_GROUP + return self._type + + @type.setter + def type(self, val): + self._type = val + + @property + def label(self): + if ( + self._GetFeatures().field_presence + == _FEATURESET_FIELD_PRESENCE_LEGACY_REQUIRED + ): + return FieldDescriptor.LABEL_REQUIRED + return self._label + + @property + def camelcase_name(self): + """Camelcase name of this field. + + Returns: + str: the name in CamelCase. + """ + if self._camelcase_name is None: + self._camelcase_name = _ToCamelCase(self.name) + return self._camelcase_name + + @property + def has_presence(self): + """Whether the field distinguishes between unpopulated and default values. + + Raises: + RuntimeError: singular field that is not linked with message nor file. + """ + if self.label == FieldDescriptor.LABEL_REPEATED: + return False + if ( + self.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE + or self.is_extension + or self.containing_oneof + ): + return True + + return ( + self._GetFeatures().field_presence + != _FEATURESET_FIELD_PRESENCE_IMPLICIT + ) + + @property + def is_packed(self): + """Returns if the field is packed.""" + if self.label != FieldDescriptor.LABEL_REPEATED: + return False + field_type = self.type + if (field_type == FieldDescriptor.TYPE_STRING or + field_type == FieldDescriptor.TYPE_GROUP or + field_type == FieldDescriptor.TYPE_MESSAGE or + field_type == FieldDescriptor.TYPE_BYTES): + return False + + return ( + self._GetFeatures().repeated_field_encoding + == _FEATURESET_REPEATED_FIELD_ENCODING_PACKED + ) + + @staticmethod + def ProtoTypeToCppProtoType(proto_type): + """Converts from a Python proto type to a C++ Proto Type. + + The Python ProtocolBuffer classes specify both the 'Python' datatype and the + 'C++' datatype - and they're not the same. This helper method should + translate from one to another. + + Args: + proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*) + Returns: + int: descriptor.FieldDescriptor.CPPTYPE_*, the C++ type. + Raises: + TypeTransformationError: when the Python proto type isn't known. + """ + try: + return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type] + except KeyError: + raise TypeTransformationError('Unknown proto_type: %s' % proto_type) + + +class EnumDescriptor(_NestedDescriptorBase): + + """Descriptor for an enum defined in a .proto file. + + Attributes: + name (str): Name of the enum type. + full_name (str): Full name of the type, including package name + and any enclosing type(s). + + values (list[EnumValueDescriptor]): List of the values + in this enum. + values_by_name (dict(str, EnumValueDescriptor)): Same as :attr:`values`, + but indexed by the "name" field of each EnumValueDescriptor. + values_by_number (dict(int, EnumValueDescriptor)): Same as :attr:`values`, + but indexed by the "number" field of each EnumValueDescriptor. + containing_type (Descriptor): Descriptor of the immediate containing + type of this enum, or None if this is an enum defined at the + top level in a .proto file. Set by Descriptor's constructor + if we're passed into one. + file (FileDescriptor): Reference to file descriptor. + options (descriptor_pb2.EnumOptions): Enum options message or + None to use default enum options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.EnumDescriptor + + def __new__(cls, name, full_name, filename, values, + containing_type=None, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindEnumTypeByName(full_name) + + def __init__(self, name, full_name, filename, values, + containing_type=None, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + """Arguments are as described in the attribute description above. + + Note that filename is an obsolete argument, that is not used anymore. + Please use file.name to access this as an attribute. + """ + if create_key is not _internal_create_key: + _Deprecated('EnumDescriptor') + + super(EnumDescriptor, self).__init__( + options, 'EnumOptions', name, full_name, file, + containing_type, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + + self.values = values + for value in self.values: + value.file = file + value.type = self + self.values_by_name = dict((v.name, v) for v in values) + # Values are reversed to ensure that the first alias is retained. + self.values_by_number = dict((v.number, v) for v in reversed(values)) + + @property + def _parent(self): + return self.containing_type or self.file + + @property + def is_closed(self): + """Returns true whether this is a "closed" enum. + + This means that it: + - Has a fixed set of values, rather than being equivalent to an int32. + - Encountering values not in this set causes them to be treated as unknown + fields. + - The first value (i.e., the default) may be nonzero. + + WARNING: Some runtimes currently have a quirk where non-closed enums are + treated as closed when used as the type of fields defined in a + `syntax = proto2;` file. This quirk is not present in all runtimes; as of + writing, we know that: + + - C++, Java, and C++-based Python share this quirk. + - UPB and UPB-based Python do not. + - PHP and Ruby treat all enums as open regardless of declaration. + + Care should be taken when using this function to respect the target + runtime's enum handling quirks. + """ + return self._GetFeatures().enum_type == _FEATURESET_ENUM_TYPE_CLOSED + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.EnumDescriptorProto. + + Args: + proto (descriptor_pb2.EnumDescriptorProto): An empty descriptor proto. + """ + # This function is overridden to give a better doc comment. + super(EnumDescriptor, self).CopyToProto(proto) + + +class EnumValueDescriptor(DescriptorBase): + + """Descriptor for a single value within an enum. + + Attributes: + name (str): Name of this value. + index (int): Dense, 0-indexed index giving the order that this + value appears textually within its enum in the .proto file. + number (int): Actual number assigned to this enum value. + type (EnumDescriptor): :class:`EnumDescriptor` to which this value + belongs. Set by :class:`EnumDescriptor`'s constructor if we're + passed into one. + options (descriptor_pb2.EnumValueOptions): Enum value options message or + None to use default enum value options options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.EnumValueDescriptor + + def __new__(cls, name, index, number, + type=None, # pylint: disable=redefined-builtin + options=None, serialized_options=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + # There is no way we can build a complete EnumValueDescriptor with the + # given parameters (the name of the Enum is not known, for example). + # Fortunately generated files just pass it to the EnumDescriptor() + # constructor, which will ignore it, so returning None is good enough. + return None + + def __init__(self, name, index, number, + type=None, # pylint: disable=redefined-builtin + options=None, serialized_options=None, create_key=None): + """Arguments are as described in the attribute description above.""" + if create_key is not _internal_create_key: + _Deprecated('EnumValueDescriptor') + + super(EnumValueDescriptor, self).__init__( + type.file if type else None, + options, + serialized_options, + 'EnumValueOptions', + ) + self.name = name + self.index = index + self.number = number + self.type = type + + @property + def _parent(self): + return self.type + + +class OneofDescriptor(DescriptorBase): + """Descriptor for a oneof field. + + Attributes: + name (str): Name of the oneof field. + full_name (str): Full name of the oneof field, including package name. + index (int): 0-based index giving the order of the oneof field inside + its containing type. + containing_type (Descriptor): :class:`Descriptor` of the protocol message + type that contains this field. Set by the :class:`Descriptor` constructor + if we're passed into one. + fields (list[FieldDescriptor]): The list of field descriptors this + oneof can contain. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.OneofDescriptor + + def __new__( + cls, name, full_name, index, containing_type, fields, options=None, + serialized_options=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindOneofByName(full_name) + + def __init__( + self, name, full_name, index, containing_type, fields, options=None, + serialized_options=None, create_key=None): + """Arguments are as described in the attribute description above.""" + if create_key is not _internal_create_key: + _Deprecated('OneofDescriptor') + + super(OneofDescriptor, self).__init__( + containing_type.file if containing_type else None, + options, + serialized_options, + 'OneofOptions', + ) + self.name = name + self.full_name = full_name + self.index = index + self.containing_type = containing_type + self.fields = fields + + @property + def _parent(self): + return self.containing_type + + +class ServiceDescriptor(_NestedDescriptorBase): + + """Descriptor for a service. + + Attributes: + name (str): Name of the service. + full_name (str): Full name of the service, including package name. + index (int): 0-indexed index giving the order that this services + definition appears within the .proto file. + methods (list[MethodDescriptor]): List of methods provided by this + service. + methods_by_name (dict(str, MethodDescriptor)): Same + :class:`MethodDescriptor` objects as in :attr:`methods_by_name`, but + indexed by "name" attribute in each :class:`MethodDescriptor`. + options (descriptor_pb2.ServiceOptions): Service options message or + None to use default service options. + file (FileDescriptor): Reference to file info. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.ServiceDescriptor + + def __new__( + cls, + name=None, + full_name=None, + index=None, + methods=None, + options=None, + serialized_options=None, + file=None, # pylint: disable=redefined-builtin + serialized_start=None, + serialized_end=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access + return _message.default_pool.FindServiceByName(full_name) + + def __init__(self, name, full_name, index, methods, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + if create_key is not _internal_create_key: + _Deprecated('ServiceDescriptor') + + super(ServiceDescriptor, self).__init__( + options, 'ServiceOptions', name, full_name, file, + None, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + self.index = index + self.methods = methods + self.methods_by_name = dict((m.name, m) for m in methods) + # Set the containing service for each method in this service. + for method in self.methods: + method.file = self.file + method.containing_service = self + + @property + def _parent(self): + return self.file + + def FindMethodByName(self, name): + """Searches for the specified method, and returns its descriptor. + + Args: + name (str): Name of the method. + + Returns: + MethodDescriptor: The descriptor for the requested method. + + Raises: + KeyError: if the method cannot be found in the service. + """ + return self.methods_by_name[name] + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.ServiceDescriptorProto. + + Args: + proto (descriptor_pb2.ServiceDescriptorProto): An empty descriptor proto. + """ + # This function is overridden to give a better doc comment. + super(ServiceDescriptor, self).CopyToProto(proto) + + +class MethodDescriptor(DescriptorBase): + + """Descriptor for a method in a service. + + Attributes: + name (str): Name of the method within the service. + full_name (str): Full name of method. + index (int): 0-indexed index of the method inside the service. + containing_service (ServiceDescriptor): The service that contains this + method. + input_type (Descriptor): The descriptor of the message that this method + accepts. + output_type (Descriptor): The descriptor of the message that this method + returns. + client_streaming (bool): Whether this method uses client streaming. + server_streaming (bool): Whether this method uses server streaming. + options (descriptor_pb2.MethodOptions or None): Method options message, or + None to use default method options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.MethodDescriptor + + def __new__(cls, + name, + full_name, + index, + containing_service, + input_type, + output_type, + client_streaming=False, + server_streaming=False, + options=None, + serialized_options=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access + return _message.default_pool.FindMethodByName(full_name) + + def __init__(self, + name, + full_name, + index, + containing_service, + input_type, + output_type, + client_streaming=False, + server_streaming=False, + options=None, + serialized_options=None, + create_key=None): + """The arguments are as described in the description of MethodDescriptor + attributes above. + + Note that containing_service may be None, and may be set later if necessary. + """ + if create_key is not _internal_create_key: + _Deprecated('MethodDescriptor') + + super(MethodDescriptor, self).__init__( + containing_service.file if containing_service else None, + options, + serialized_options, + 'MethodOptions', + ) + self.name = name + self.full_name = full_name + self.index = index + self.containing_service = containing_service + self.input_type = input_type + self.output_type = output_type + self.client_streaming = client_streaming + self.server_streaming = server_streaming + + @property + def _parent(self): + return self.containing_service + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.MethodDescriptorProto. + + Args: + proto (descriptor_pb2.MethodDescriptorProto): An empty descriptor proto. + + Raises: + Error: If self couldn't be serialized, due to too few constructor + arguments. + """ + if self.containing_service is not None: + from google.protobuf import descriptor_pb2 + service_proto = descriptor_pb2.ServiceDescriptorProto() + self.containing_service.CopyToProto(service_proto) + proto.CopyFrom(service_proto.method[self.index]) + else: + raise Error('Descriptor does not contain a service.') + + +class FileDescriptor(DescriptorBase): + """Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto. + + Note that :attr:`enum_types_by_name`, :attr:`extensions_by_name`, and + :attr:`dependencies` fields are only set by the + :py:mod:`google.protobuf.message_factory` module, and not by the generated + proto code. + + Attributes: + name (str): Name of file, relative to root of source tree. + package (str): Name of the package + edition (Edition): Enum value indicating edition of the file + serialized_pb (bytes): Byte string of serialized + :class:`descriptor_pb2.FileDescriptorProto`. + dependencies (list[FileDescriptor]): List of other :class:`FileDescriptor` + objects this :class:`FileDescriptor` depends on. + public_dependencies (list[FileDescriptor]): A subset of + :attr:`dependencies`, which were declared as "public". + message_types_by_name (dict(str, Descriptor)): Mapping from message names to + their :class:`Descriptor`. + enum_types_by_name (dict(str, EnumDescriptor)): Mapping from enum names to + their :class:`EnumDescriptor`. + extensions_by_name (dict(str, FieldDescriptor)): Mapping from extension + names declared at file scope to their :class:`FieldDescriptor`. + services_by_name (dict(str, ServiceDescriptor)): Mapping from services' + names to their :class:`ServiceDescriptor`. + pool (DescriptorPool): The pool this descriptor belongs to. When not passed + to the constructor, the global default pool is used. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.FileDescriptor + + def __new__( + cls, + name, + package, + options=None, + serialized_options=None, + serialized_pb=None, + dependencies=None, + public_dependencies=None, + syntax=None, + edition=None, + pool=None, + create_key=None, + ): + # FileDescriptor() is called from various places, not only from generated + # files, to register dynamic proto files and messages. + # pylint: disable=g-explicit-bool-comparison + if serialized_pb: + return _message.default_pool.AddSerializedFile(serialized_pb) + else: + return super(FileDescriptor, cls).__new__(cls) + + def __init__( + self, + name, + package, + options=None, + serialized_options=None, + serialized_pb=None, + dependencies=None, + public_dependencies=None, + syntax=None, + edition=None, + pool=None, + create_key=None, + ): + """Constructor.""" + if create_key is not _internal_create_key: + _Deprecated('FileDescriptor') + + super(FileDescriptor, self).__init__( + self, options, serialized_options, 'FileOptions' + ) + + if edition and edition != 'EDITION_UNKNOWN': + self._edition = edition + elif syntax == 'proto3': + self._edition = 'EDITION_PROTO3' + else: + self._edition = 'EDITION_PROTO2' + + if pool is None: + from google.protobuf import descriptor_pool + pool = descriptor_pool.Default() + self.pool = pool + self.message_types_by_name = {} + self.name = name + self.package = package + self.serialized_pb = serialized_pb + + self.enum_types_by_name = {} + self.extensions_by_name = {} + self.services_by_name = {} + self.dependencies = (dependencies or []) + self.public_dependencies = (public_dependencies or []) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.FileDescriptorProto. + + Args: + proto: An empty descriptor_pb2.FileDescriptorProto. + """ + proto.ParseFromString(self.serialized_pb) + + @property + def _parent(self): + return None + + +def _ParseOptions(message, string): + """Parses serialized options. + + This helper function is used to parse serialized options in generated + proto2 files. It must not be used outside proto2. + """ + message.ParseFromString(string) + return message + + +def _ToCamelCase(name): + """Converts name to camel-case and returns it.""" + capitalize_next = False + result = [] + + for c in name: + if c == '_': + if result: + capitalize_next = True + elif capitalize_next: + result.append(c.upper()) + capitalize_next = False + else: + result += c + + # Lower-case the first letter. + if result and result[0].isupper(): + result[0] = result[0].lower() + return ''.join(result) + + +def _OptionsOrNone(descriptor_proto): + """Returns the value of the field `options`, or None if it is not set.""" + if descriptor_proto.HasField('options'): + return descriptor_proto.options + else: + return None + + +def _ToJsonName(name): + """Converts name to Json name and returns it.""" + capitalize_next = False + result = [] + + for c in name: + if c == '_': + capitalize_next = True + elif capitalize_next: + result.append(c.upper()) + capitalize_next = False + else: + result += c + + return ''.join(result) + + +def MakeDescriptor( + desc_proto, + package='', + build_file_if_cpp=True, + syntax=None, + edition=None, + file_desc=None, +): + """Make a protobuf Descriptor given a DescriptorProto protobuf. + + Handles nested descriptors. Note that this is limited to the scope of defining + a message inside of another message. Composite fields can currently only be + resolved if the message is defined in the same scope as the field. + + Args: + desc_proto: The descriptor_pb2.DescriptorProto protobuf message. + package: Optional package name for the new message Descriptor (string). + build_file_if_cpp: Update the C++ descriptor pool if api matches. Set to + False on recursion, so no duplicates are created. + syntax: The syntax/semantics that should be used. Set to "proto3" to get + proto3 field presence semantics. + edition: The edition that should be used if syntax is "edition". + file_desc: A FileDescriptor to place this descriptor into. + + Returns: + A Descriptor for protobuf messages. + """ + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + + # Generate a random name for this proto file to prevent conflicts with any + # imported ones. We need to specify a file name so the descriptor pool + # accepts our FileDescriptorProto, but it is not important what that file + # name is actually set to. + proto_name = binascii.hexlify(os.urandom(16)).decode('ascii') + + if package: + file_name = os.path.join(package.replace('.', '/'), proto_name + '.proto') + else: + file_name = proto_name + '.proto' + + if api_implementation.Type() != 'python' and build_file_if_cpp: + # The C++ implementation requires all descriptors to be backed by the same + # definition in the C++ descriptor pool. To do this, we build a + # FileDescriptorProto with the same definition as this descriptor and build + # it into the pool. + file_descriptor_proto = descriptor_pb2.FileDescriptorProto() + file_descriptor_proto.message_type.add().MergeFrom(desc_proto) + + if package: + file_descriptor_proto.package = package + file_descriptor_proto.name = file_name + + _message.default_pool.Add(file_descriptor_proto) + result = _message.default_pool.FindFileByName(file_descriptor_proto.name) + + if _USE_C_DESCRIPTORS: + return result.message_types_by_name[desc_proto.name] + + if file_desc is None: + file_desc = FileDescriptor( + pool=None, + name=file_name, + package=package, + syntax=syntax, + edition=edition, + options=None, + serialized_pb='', + dependencies=[], + public_dependencies=[], + create_key=_internal_create_key, + ) + full_message_name = [desc_proto.name] + if package: full_message_name.insert(0, package) + + # Create Descriptors for enum types + enum_types = {} + for enum_proto in desc_proto.enum_type: + full_name = '.'.join(full_message_name + [enum_proto.name]) + enum_desc = EnumDescriptor( + enum_proto.name, + full_name, + None, + [ + EnumValueDescriptor( + enum_val.name, + ii, + enum_val.number, + create_key=_internal_create_key, + ) + for ii, enum_val in enumerate(enum_proto.value) + ], + file=file_desc, + create_key=_internal_create_key, + ) + enum_types[full_name] = enum_desc + + # Create Descriptors for nested types + nested_types = {} + for nested_proto in desc_proto.nested_type: + full_name = '.'.join(full_message_name + [nested_proto.name]) + # Nested types are just those defined inside of the message, not all types + # used by fields in the message, so no loops are possible here. + nested_desc = MakeDescriptor( + nested_proto, + package='.'.join(full_message_name), + build_file_if_cpp=False, + syntax=syntax, + edition=edition, + file_desc=file_desc, + ) + nested_types[full_name] = nested_desc + + fields = [] + for field_proto in desc_proto.field: + full_name = '.'.join(full_message_name + [field_proto.name]) + enum_desc = None + nested_desc = None + if field_proto.json_name: + json_name = field_proto.json_name + else: + json_name = None + if field_proto.HasField('type_name'): + type_name = field_proto.type_name + full_type_name = '.'.join(full_message_name + + [type_name[type_name.rfind('.')+1:]]) + if full_type_name in nested_types: + nested_desc = nested_types[full_type_name] + elif full_type_name in enum_types: + enum_desc = enum_types[full_type_name] + # Else type_name references a non-local type, which isn't implemented + field = FieldDescriptor( + field_proto.name, + full_name, + field_proto.number - 1, + field_proto.number, + field_proto.type, + FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type), + field_proto.label, + None, + nested_desc, + enum_desc, + None, + False, + None, + options=_OptionsOrNone(field_proto), + has_default_value=False, + json_name=json_name, + file=file_desc, + create_key=_internal_create_key, + ) + fields.append(field) + + desc_name = '.'.join(full_message_name) + return Descriptor( + desc_proto.name, + desc_name, + None, + None, + fields, + list(nested_types.values()), + list(enum_types.values()), + [], + options=_OptionsOrNone(desc_proto), + file=file_desc, + create_key=_internal_create_key, + ) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/descriptor_database.py b/.venv/lib/python3.12/site-packages/google/protobuf/descriptor_database.py new file mode 100644 index 00000000..46a893e3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/descriptor_database.py @@ -0,0 +1,154 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Provides a container for DescriptorProtos.""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import warnings + + +class Error(Exception): + pass + + +class DescriptorDatabaseConflictingDefinitionError(Error): + """Raised when a proto is added with the same name & different descriptor.""" + + +class DescriptorDatabase(object): + """A container accepting FileDescriptorProtos and maps DescriptorProtos.""" + + def __init__(self): + self._file_desc_protos_by_file = {} + self._file_desc_protos_by_symbol = {} + + def Add(self, file_desc_proto): + """Adds the FileDescriptorProto and its types to this database. + + Args: + file_desc_proto: The FileDescriptorProto to add. + Raises: + DescriptorDatabaseConflictingDefinitionError: if an attempt is made to + add a proto with the same name but different definition than an + existing proto in the database. + """ + proto_name = file_desc_proto.name + if proto_name not in self._file_desc_protos_by_file: + self._file_desc_protos_by_file[proto_name] = file_desc_proto + elif self._file_desc_protos_by_file[proto_name] != file_desc_proto: + raise DescriptorDatabaseConflictingDefinitionError( + '%s already added, but with different descriptor.' % proto_name) + else: + return + + # Add all the top-level descriptors to the index. + package = file_desc_proto.package + for message in file_desc_proto.message_type: + for name in _ExtractSymbols(message, package): + self._AddSymbol(name, file_desc_proto) + for enum in file_desc_proto.enum_type: + self._AddSymbol(('.'.join((package, enum.name))), file_desc_proto) + for enum_value in enum.value: + self._file_desc_protos_by_symbol[ + '.'.join((package, enum_value.name))] = file_desc_proto + for extension in file_desc_proto.extension: + self._AddSymbol(('.'.join((package, extension.name))), file_desc_proto) + for service in file_desc_proto.service: + self._AddSymbol(('.'.join((package, service.name))), file_desc_proto) + + def FindFileByName(self, name): + """Finds the file descriptor proto by file name. + + Typically the file name is a relative path ending to a .proto file. The + proto with the given name will have to have been added to this database + using the Add method or else an error will be raised. + + Args: + name: The file name to find. + + Returns: + The file descriptor proto matching the name. + + Raises: + KeyError if no file by the given name was added. + """ + + return self._file_desc_protos_by_file[name] + + def FindFileContainingSymbol(self, symbol): + """Finds the file descriptor proto containing the specified symbol. + + The symbol should be a fully qualified name including the file descriptor's + package and any containing messages. Some examples: + + 'some.package.name.Message' + 'some.package.name.Message.NestedEnum' + 'some.package.name.Message.some_field' + + The file descriptor proto containing the specified symbol must be added to + this database using the Add method or else an error will be raised. + + Args: + symbol: The fully qualified symbol name. + + Returns: + The file descriptor proto containing the symbol. + + Raises: + KeyError if no file contains the specified symbol. + """ + try: + return self._file_desc_protos_by_symbol[symbol] + except KeyError: + # Fields, enum values, and nested extensions are not in + # _file_desc_protos_by_symbol. Try to find the top level + # descriptor. Non-existent nested symbol under a valid top level + # descriptor can also be found. The behavior is the same with + # protobuf C++. + top_level, _, _ = symbol.rpartition('.') + try: + return self._file_desc_protos_by_symbol[top_level] + except KeyError: + # Raise the original symbol as a KeyError for better diagnostics. + raise KeyError(symbol) + + def FindFileContainingExtension(self, extendee_name, extension_number): + # TODO: implement this API. + return None + + def FindAllExtensionNumbers(self, extendee_name): + # TODO: implement this API. + return [] + + def _AddSymbol(self, name, file_desc_proto): + if name in self._file_desc_protos_by_symbol: + warn_msg = ('Conflict register for file "' + file_desc_proto.name + + '": ' + name + + ' is already defined in file "' + + self._file_desc_protos_by_symbol[name].name + '"') + warnings.warn(warn_msg, RuntimeWarning) + self._file_desc_protos_by_symbol[name] = file_desc_proto + + +def _ExtractSymbols(desc_proto, package): + """Pulls out all the symbols from a descriptor proto. + + Args: + desc_proto: The proto to extract symbols from. + package: The package containing the descriptor type. + + Yields: + The fully qualified name found in the descriptor. + """ + message_name = package + '.' + desc_proto.name if package else desc_proto.name + yield message_name + for nested_type in desc_proto.nested_type: + for symbol in _ExtractSymbols(nested_type, message_name): + yield symbol + for enum_type in desc_proto.enum_type: + yield '.'.join((message_name, enum_type.name)) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/descriptor_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/descriptor_pb2.py new file mode 100644 index 00000000..e9a6ce25 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/descriptor_pb2.py @@ -0,0 +1,3169 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/descriptor.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/descriptor.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR = _descriptor.FileDescriptor( + name='google/protobuf/descriptor.proto', + package='google.protobuf', + syntax='proto2', + edition='EDITION_PROTO2', + serialized_options=b'\n\023com.google.protobufB\020DescriptorProtosH\001Z-google.golang.org/protobuf/types/descriptorpb\370\001\001\242\002\003GPB\252\002\032Google.Protobuf.Reflection', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n\x11\x46ileDescriptorSet\x12\x38\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProtoR\x04\x66ile*\x0c\x08\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n\x13\x46ileDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n\x07package\x18\x02 \x01(\tR\x07package\x12\x1e\n\ndependency\x18\x03 \x03(\tR\ndependency\x12+\n\x11public_dependency\x18\n \x03(\x05R\x10publicDependency\x12\'\n\x0fweak_dependency\x18\x0b \x03(\x05R\x0eweakDependency\x12\x43\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProtoR\x0bmessageType\x12\x41\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProtoR\x08\x65numType\x12\x41\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProtoR\x07service\x12\x43\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\textension\x12\x36\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptionsR\x07options\x12I\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n\x06syntax\x18\x0c \x01(\tR\x06syntax\x12\x32\n\x07\x65\x64ition\x18\x0e \x01(\x0e\x32\x18.google.protobuf.EditionR\x07\x65\x64ition\"\xb9\x06\n\x0f\x44\x65scriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12;\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\x05\x66ield\x12\x43\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\textension\x12\x41\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProtoR\nnestedType\x12\x41\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProtoR\x08\x65numType\x12X\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRangeR\x0e\x65xtensionRange\x12\x44\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x12\x39\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptionsR\x07options\x12U\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n\rreserved_name\x18\n \x03(\tR\x0creservedName\x1az\n\x0e\x45xtensionRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\x12@\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptionsR\x07options\x1a\x37\n\rReservedRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\"\xcc\x04\n\x15\x45xtensionRangeOptions\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n\x0b\x64\x65\x63laration\x18\x02 \x03(\x0b\x32\x32.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\x0b\x64\x65\x63laration\x12\x37\n\x08\x66\x65\x61tures\x18\x32 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12m\n\x0cverification\x18\x03 \x01(\x0e\x32\x38.google.protobuf.ExtensionRangeOptions.VerificationState:\nUNVERIFIEDB\x03\x88\x01\x02R\x0cverification\x1a\x94\x01\n\x0b\x44\x65\x63laration\x12\x16\n\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n\tfull_name\x18\x02 \x01(\tR\x08\x66ullName\x12\x12\n\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n\x08reserved\x18\x05 \x01(\x08R\x08reserved\x12\x1a\n\x08repeated\x18\x06 \x01(\x08R\x08repeatedJ\x04\x08\x04\x10\x05\"4\n\x11VerificationState\x12\x0f\n\x0b\x44\x45\x43LARATION\x10\x00\x12\x0e\n\nUNVERIFIED\x10\x01*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xc1\x06\n\x14\x46ieldDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n\x06number\x18\x03 \x01(\x05R\x06number\x12\x41\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n\ttype_name\x18\x06 \x01(\tR\x08typeName\x12\x1a\n\x08\x65xtendee\x18\x02 \x01(\tR\x08\x65xtendee\x12#\n\rdefault_value\x18\x07 \x01(\tR\x0c\x64\x65\x66\x61ultValue\x12\x1f\n\x0boneof_index\x18\t \x01(\x05R\noneofIndex\x12\x1b\n\tjson_name\x18\n \x01(\tR\x08jsonName\x12\x37\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptionsR\x07options\x12\'\n\x0fproto3_optional\x18\x11 \x01(\x08R\x0eproto3Optional\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REPEATED\x10\x03\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\"c\n\x14OneofDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x37\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptionsR\x07options\"\xe3\x02\n\x13\x45numDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12?\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProtoR\x05value\x12\x36\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptionsR\x07options\x12]\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n\rreserved_name\x18\x05 \x03(\tR\x0creservedName\x1a;\n\x11\x45numReservedRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\"\x83\x01\n\x18\x45numValueDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n\x06number\x18\x02 \x01(\x05R\x06number\x12;\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptionsR\x07options\"\xa7\x01\n\x16ServiceDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12>\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProtoR\x06method\x12\x39\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptionsR\x07options\"\x89\x02\n\x15MethodDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n\ninput_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n\x0boutput_type\x18\x03 \x01(\tR\noutputType\x12\x38\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptionsR\x07options\x12\x30\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lseR\x0f\x63lientStreaming\x12\x30\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lseR\x0fserverStreaming\"\xad\t\n\x0b\x46ileOptions\x12!\n\x0cjava_package\x18\x01 \x01(\tR\x0bjavaPackage\x12\x30\n\x14java_outer_classname\x18\x08 \x01(\tR\x12javaOuterClassname\x12\x35\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lseR\x11javaMultipleFiles\x12\x44\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lseR\x13javaStringCheckUtf8\x12S\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\x0boptimizeFor\x12\x1d\n\ngo_package\x18\x0b \x01(\tR\tgoPackage\x12\x35\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lseR\x11\x63\x63GenericServices\x12\x39\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lseR\x13javaGenericServices\x12\x35\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lseR\x11pyGenericServices\x12%\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12.\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04trueR\x0e\x63\x63\x45nableArenas\x12*\n\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n\x10\x63sharp_namespace\x18% \x01(\tR\x0f\x63sharpNamespace\x12!\n\x0cswift_prefix\x18\' \x01(\tR\x0bswiftPrefix\x12(\n\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n\rphp_namespace\x18) \x01(\tR\x0cphpNamespace\x12\x34\n\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n\x0cruby_package\x18- \x01(\tR\x0brubyPackage\x12\x37\n\x08\x66\x65\x61tures\x18\x32 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08*\x10+J\x04\x08&\x10\'R\x14php_generic_services\"\xf4\x03\n\x0eMessageOptions\x12<\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lseR\x14messageSetWireFormat\x12L\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lseR\x1cnoStandardDescriptorAccessor\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12\x1b\n\tmap_entry\x18\x07 \x01(\x08R\x08mapEntry\x12V\n&deprecated_legacy_json_field_conflicts\x18\x0b \x01(\x08\x42\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x12\x37\n\x08\x66\x65\x61tures\x18\x0c \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\x9d\r\n\x0c\x46ieldOptions\x12\x41\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05\x63type\x12\x16\n\x06packed\x18\x02 \x01(\x08R\x06packed\x12G\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lseR\x04lazy\x12.\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lseR\x0eunverifiedLazy\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12\x19\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lseR\x04weak\x12(\n\x0c\x64\x65\x62ug_redact\x18\x10 \x01(\x08:\x05\x66\x61lseR\x0b\x64\x65\x62ugRedact\x12K\n\tretention\x18\x11 \x01(\x0e\x32-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n\x07targets\x18\x13 \x03(\x0e\x32..google.protobuf.FieldOptions.OptionTargetTypeR\x07targets\x12W\n\x10\x65\x64ition_defaults\x18\x14 \x03(\x0b\x32,.google.protobuf.FieldOptions.EditionDefaultR\x0f\x65\x64itionDefaults\x12\x37\n\x08\x66\x65\x61tures\x18\x15 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12U\n\x0f\x66\x65\x61ture_support\x18\x16 \x01(\x0b\x32,.google.protobuf.FieldOptions.FeatureSupportR\x0e\x66\x65\x61tureSupport\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n\x0e\x45\x64itionDefault\x12\x32\n\x07\x65\x64ition\x18\x03 \x01(\x0e\x32\x18.google.protobuf.EditionR\x07\x65\x64ition\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n\x0e\x46\x65\x61tureSupport\x12G\n\x12\x65\x64ition_introduced\x18\x01 \x01(\x0e\x32\x18.google.protobuf.EditionR\x11\x65\x64itionIntroduced\x12G\n\x12\x65\x64ition_deprecated\x18\x02 \x01(\x0e\x32\x18.google.protobuf.EditionR\x11\x65\x64itionDeprecated\x12/\n\x13\x64\x65precation_warning\x18\x03 \x01(\tR\x12\x64\x65precationWarning\x12\x41\n\x0f\x65\x64ition_removed\x18\x04 \x01(\x0e\x32\x18.google.protobuf.EditionR\x0e\x65\x64itionRemoved\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02\"U\n\x0fOptionRetention\x12\x15\n\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n\x11RETENTION_RUNTIME\x10\x01\x12\x14\n\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n\x10OptionTargetType\x12\x17\n\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n\x16TARGET_TYPE_ENUM_ENTRY\x10\x07\x12\x17\n\x13TARGET_TYPE_SERVICE\x10\x08\x12\x16\n\x12TARGET_TYPE_METHOD\x10\t*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x12\x10\x13\"\xac\x01\n\x0cOneofOptions\x12\x37\n\x08\x66\x65\x61tures\x18\x01 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd1\x02\n\x0b\x45numOptions\x12\x1f\n\x0b\x61llow_alias\x18\x02 \x01(\x08R\nallowAlias\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12V\n&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\x08\x42\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x12\x37\n\x08\x66\x65\x61tures\x18\x07 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"\xd8\x02\n\x10\x45numValueOptions\x12%\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12\x37\n\x08\x66\x65\x61tures\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12(\n\x0c\x64\x65\x62ug_redact\x18\x03 \x01(\x08:\x05\x66\x61lseR\x0b\x64\x65\x62ugRedact\x12U\n\x0f\x66\x65\x61ture_support\x18\x04 \x01(\x0b\x32,.google.protobuf.FieldOptions.FeatureSupportR\x0e\x66\x65\x61tureSupport\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd5\x01\n\x0eServiceOptions\x12\x37\n\x08\x66\x65\x61tures\x18\" \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12%\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x99\x03\n\rMethodOptions\x12%\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12q\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x12\x37\n\x08\x66\x65\x61tures\x18# \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9a\x03\n\x13UninterpretedOption\x12\x41\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n\x0c\x64ouble_value\x18\x06 \x01(\x01R\x0b\x64oubleValue\x12!\n\x0cstring_value\x18\x07 \x01(\x0cR\x0bstringValue\x12\'\n\x0f\x61ggregate_value\x18\x08 \x01(\tR\x0e\x61ggregateValue\x1aJ\n\x08NamePart\x12\x1b\n\tname_part\x18\x01 \x02(\tR\x08namePart\x12!\n\x0cis_extension\x18\x02 \x02(\x08R\x0bisExtension\"\xa7\n\n\nFeatureSet\x12\x91\x01\n\x0e\x66ield_presence\x18\x01 \x01(\x0e\x32).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\x08\x45XPLICIT\x18\x84\x07\xa2\x01\r\x12\x08IMPLICIT\x18\xe7\x07\xa2\x01\r\x12\x08\x45XPLICIT\x18\xe8\x07\xb2\x01\x03\x08\xe8\x07R\rfieldPresence\x12l\n\tenum_type\x18\x02 \x01(\x0e\x32$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\x0b\x12\x06\x43LOSED\x18\x84\x07\xa2\x01\t\x12\x04OPEN\x18\xe7\x07\xb2\x01\x03\x08\xe8\x07R\x08\x65numType\x12\x98\x01\n\x17repeated_field_encoding\x18\x03 \x01(\x0e\x32\x31.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\x08\x45XPANDED\x18\x84\x07\xa2\x01\x0b\x12\x06PACKED\x18\xe7\x07\xb2\x01\x03\x08\xe8\x07R\x15repeatedFieldEncoding\x12~\n\x0futf8_validation\x18\x04 \x01(\x0e\x32*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\x07\xa2\x01\x0b\x12\x06VERIFY\x18\xe7\x07\xb2\x01\x03\x08\xe8\x07R\x0eutf8Validation\x12~\n\x10message_encoding\x18\x05 \x01(\x0e\x32+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\x07\xb2\x01\x03\x08\xe8\x07R\x0fmessageEncoding\x12\x82\x01\n\x0bjson_format\x18\x06 \x01(\x0e\x32&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\x07\xa2\x01\n\x12\x05\x41LLOW\x18\xe7\x07\xb2\x01\x03\x08\xe8\x07R\njsonFormat\"\\\n\rFieldPresence\x12\x1a\n\x16\x46IELD_PRESENCE_UNKNOWN\x10\x00\x12\x0c\n\x08\x45XPLICIT\x10\x01\x12\x0c\n\x08IMPLICIT\x10\x02\x12\x13\n\x0fLEGACY_REQUIRED\x10\x03\"7\n\x08\x45numType\x12\x15\n\x11\x45NUM_TYPE_UNKNOWN\x10\x00\x12\x08\n\x04OPEN\x10\x01\x12\n\n\x06\x43LOSED\x10\x02\"V\n\x15RepeatedFieldEncoding\x12#\n\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n\n\x06PACKED\x10\x01\x12\x0c\n\x08\x45XPANDED\x10\x02\"I\n\x0eUtf8Validation\x12\x1b\n\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n\n\x06VERIFY\x10\x02\x12\x08\n\x04NONE\x10\x03\"\x04\x08\x01\x10\x01\"S\n\x0fMessageEncoding\x12\x1c\n\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n\x0fLENGTH_PREFIXED\x10\x01\x12\r\n\tDELIMITED\x10\x02\"H\n\nJsonFormat\x12\x17\n\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n\x05\x41LLOW\x10\x01\x12\x16\n\x12LEGACY_BEST_EFFORT\x10\x02*\x06\x08\xe8\x07\x10\x8bN*\x06\x08\x8bN\x10\x90N*\x06\x08\x90N\x10\x91NJ\x06\x08\xe7\x07\x10\xe8\x07\"\xef\x03\n\x12\x46\x65\x61tureSetDefaults\x12X\n\x08\x64\x65\x66\x61ults\x18\x01 \x03(\x0b\x32<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\x08\x64\x65\x66\x61ults\x12\x41\n\x0fminimum_edition\x18\x04 \x01(\x0e\x32\x18.google.protobuf.EditionR\x0eminimumEdition\x12\x41\n\x0fmaximum_edition\x18\x05 \x01(\x0e\x32\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n\x18\x46\x65\x61tureSetEditionDefault\x12\x32\n\x07\x65\x64ition\x18\x03 \x01(\x0e\x32\x18.google.protobuf.EditionR\x07\x65\x64ition\x12N\n\x14overridable_features\x18\x04 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12\x42\n\x0e\x66ixed_features\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03R\x08\x66\x65\x61tures\"\xb5\x02\n\x0eSourceCodeInfo\x12\x44\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.LocationR\x08location\x1a\xce\x01\n\x08Location\x12\x16\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01R\x04path\x12\x16\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01R\x04span\x12)\n\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\x0c\x08\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n\x11GeneratedCodeInfo\x12M\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.AnnotationR\nannotation\x1a\xeb\x01\n\nAnnotation\x12\x16\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01R\x04path\x12\x1f\n\x0bsource_file\x18\x02 \x01(\tR\nsourceFile\x12\x14\n\x05\x62\x65gin\x18\x03 \x01(\x05R\x05\x62\x65gin\x12\x10\n\x03\x65nd\x18\x04 \x01(\x05R\x03\x65nd\x12R\n\x08semantic\x18\x05 \x01(\x0e\x32\x36.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\x08semantic\"(\n\x08Semantic\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03SET\x10\x01\x12\t\n\x05\x41LIAS\x10\x02*\xa7\x02\n\x07\x45\x64ition\x12\x13\n\x0f\x45\x44ITION_UNKNOWN\x10\x00\x12\x13\n\x0e\x45\x44ITION_LEGACY\x10\x84\x07\x12\x13\n\x0e\x45\x44ITION_PROTO2\x10\xe6\x07\x12\x13\n\x0e\x45\x44ITION_PROTO3\x10\xe7\x07\x12\x11\n\x0c\x45\x44ITION_2023\x10\xe8\x07\x12\x11\n\x0c\x45\x44ITION_2024\x10\xe9\x07\x12\x17\n\x13\x45\x44ITION_1_TEST_ONLY\x10\x01\x12\x17\n\x13\x45\x44ITION_2_TEST_ONLY\x10\x02\x12\x1d\n\x17\x45\x44ITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n\x17\x45\x44ITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n\x17\x45\x44ITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n\x0b\x45\x44ITION_MAX\x10\xff\xff\xff\xff\x07\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection' + ) +else: + DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n\x11\x46ileDescriptorSet\x12\x38\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProtoR\x04\x66ile*\x0c\x08\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n\x13\x46ileDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n\x07package\x18\x02 \x01(\tR\x07package\x12\x1e\n\ndependency\x18\x03 \x03(\tR\ndependency\x12+\n\x11public_dependency\x18\n \x03(\x05R\x10publicDependency\x12\'\n\x0fweak_dependency\x18\x0b \x03(\x05R\x0eweakDependency\x12\x43\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProtoR\x0bmessageType\x12\x41\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProtoR\x08\x65numType\x12\x41\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProtoR\x07service\x12\x43\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\textension\x12\x36\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptionsR\x07options\x12I\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n\x06syntax\x18\x0c \x01(\tR\x06syntax\x12\x32\n\x07\x65\x64ition\x18\x0e \x01(\x0e\x32\x18.google.protobuf.EditionR\x07\x65\x64ition\"\xb9\x06\n\x0f\x44\x65scriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12;\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\x05\x66ield\x12\x43\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\textension\x12\x41\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProtoR\nnestedType\x12\x41\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProtoR\x08\x65numType\x12X\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRangeR\x0e\x65xtensionRange\x12\x44\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x12\x39\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptionsR\x07options\x12U\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n\rreserved_name\x18\n \x03(\tR\x0creservedName\x1az\n\x0e\x45xtensionRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\x12@\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptionsR\x07options\x1a\x37\n\rReservedRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\"\xcc\x04\n\x15\x45xtensionRangeOptions\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n\x0b\x64\x65\x63laration\x18\x02 \x03(\x0b\x32\x32.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\x0b\x64\x65\x63laration\x12\x37\n\x08\x66\x65\x61tures\x18\x32 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12m\n\x0cverification\x18\x03 \x01(\x0e\x32\x38.google.protobuf.ExtensionRangeOptions.VerificationState:\nUNVERIFIEDB\x03\x88\x01\x02R\x0cverification\x1a\x94\x01\n\x0b\x44\x65\x63laration\x12\x16\n\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n\tfull_name\x18\x02 \x01(\tR\x08\x66ullName\x12\x12\n\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n\x08reserved\x18\x05 \x01(\x08R\x08reserved\x12\x1a\n\x08repeated\x18\x06 \x01(\x08R\x08repeatedJ\x04\x08\x04\x10\x05\"4\n\x11VerificationState\x12\x0f\n\x0b\x44\x45\x43LARATION\x10\x00\x12\x0e\n\nUNVERIFIED\x10\x01*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xc1\x06\n\x14\x46ieldDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n\x06number\x18\x03 \x01(\x05R\x06number\x12\x41\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n\ttype_name\x18\x06 \x01(\tR\x08typeName\x12\x1a\n\x08\x65xtendee\x18\x02 \x01(\tR\x08\x65xtendee\x12#\n\rdefault_value\x18\x07 \x01(\tR\x0c\x64\x65\x66\x61ultValue\x12\x1f\n\x0boneof_index\x18\t \x01(\x05R\noneofIndex\x12\x1b\n\tjson_name\x18\n \x01(\tR\x08jsonName\x12\x37\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptionsR\x07options\x12\'\n\x0fproto3_optional\x18\x11 \x01(\x08R\x0eproto3Optional\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REPEATED\x10\x03\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\"c\n\x14OneofDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x37\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptionsR\x07options\"\xe3\x02\n\x13\x45numDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12?\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProtoR\x05value\x12\x36\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptionsR\x07options\x12]\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n\rreserved_name\x18\x05 \x03(\tR\x0creservedName\x1a;\n\x11\x45numReservedRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\"\x83\x01\n\x18\x45numValueDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n\x06number\x18\x02 \x01(\x05R\x06number\x12;\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptionsR\x07options\"\xa7\x01\n\x16ServiceDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12>\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProtoR\x06method\x12\x39\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptionsR\x07options\"\x89\x02\n\x15MethodDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n\ninput_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n\x0boutput_type\x18\x03 \x01(\tR\noutputType\x12\x38\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptionsR\x07options\x12\x30\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lseR\x0f\x63lientStreaming\x12\x30\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lseR\x0fserverStreaming\"\xad\t\n\x0b\x46ileOptions\x12!\n\x0cjava_package\x18\x01 \x01(\tR\x0bjavaPackage\x12\x30\n\x14java_outer_classname\x18\x08 \x01(\tR\x12javaOuterClassname\x12\x35\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lseR\x11javaMultipleFiles\x12\x44\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lseR\x13javaStringCheckUtf8\x12S\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\x0boptimizeFor\x12\x1d\n\ngo_package\x18\x0b \x01(\tR\tgoPackage\x12\x35\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lseR\x11\x63\x63GenericServices\x12\x39\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lseR\x13javaGenericServices\x12\x35\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lseR\x11pyGenericServices\x12%\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12.\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04trueR\x0e\x63\x63\x45nableArenas\x12*\n\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n\x10\x63sharp_namespace\x18% \x01(\tR\x0f\x63sharpNamespace\x12!\n\x0cswift_prefix\x18\' \x01(\tR\x0bswiftPrefix\x12(\n\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n\rphp_namespace\x18) \x01(\tR\x0cphpNamespace\x12\x34\n\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n\x0cruby_package\x18- \x01(\tR\x0brubyPackage\x12\x37\n\x08\x66\x65\x61tures\x18\x32 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08*\x10+J\x04\x08&\x10\'R\x14php_generic_services\"\xf4\x03\n\x0eMessageOptions\x12<\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lseR\x14messageSetWireFormat\x12L\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lseR\x1cnoStandardDescriptorAccessor\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12\x1b\n\tmap_entry\x18\x07 \x01(\x08R\x08mapEntry\x12V\n&deprecated_legacy_json_field_conflicts\x18\x0b \x01(\x08\x42\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x12\x37\n\x08\x66\x65\x61tures\x18\x0c \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\x9d\r\n\x0c\x46ieldOptions\x12\x41\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05\x63type\x12\x16\n\x06packed\x18\x02 \x01(\x08R\x06packed\x12G\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lseR\x04lazy\x12.\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lseR\x0eunverifiedLazy\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12\x19\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lseR\x04weak\x12(\n\x0c\x64\x65\x62ug_redact\x18\x10 \x01(\x08:\x05\x66\x61lseR\x0b\x64\x65\x62ugRedact\x12K\n\tretention\x18\x11 \x01(\x0e\x32-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n\x07targets\x18\x13 \x03(\x0e\x32..google.protobuf.FieldOptions.OptionTargetTypeR\x07targets\x12W\n\x10\x65\x64ition_defaults\x18\x14 \x03(\x0b\x32,.google.protobuf.FieldOptions.EditionDefaultR\x0f\x65\x64itionDefaults\x12\x37\n\x08\x66\x65\x61tures\x18\x15 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12U\n\x0f\x66\x65\x61ture_support\x18\x16 \x01(\x0b\x32,.google.protobuf.FieldOptions.FeatureSupportR\x0e\x66\x65\x61tureSupport\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n\x0e\x45\x64itionDefault\x12\x32\n\x07\x65\x64ition\x18\x03 \x01(\x0e\x32\x18.google.protobuf.EditionR\x07\x65\x64ition\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n\x0e\x46\x65\x61tureSupport\x12G\n\x12\x65\x64ition_introduced\x18\x01 \x01(\x0e\x32\x18.google.protobuf.EditionR\x11\x65\x64itionIntroduced\x12G\n\x12\x65\x64ition_deprecated\x18\x02 \x01(\x0e\x32\x18.google.protobuf.EditionR\x11\x65\x64itionDeprecated\x12/\n\x13\x64\x65precation_warning\x18\x03 \x01(\tR\x12\x64\x65precationWarning\x12\x41\n\x0f\x65\x64ition_removed\x18\x04 \x01(\x0e\x32\x18.google.protobuf.EditionR\x0e\x65\x64itionRemoved\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02\"U\n\x0fOptionRetention\x12\x15\n\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n\x11RETENTION_RUNTIME\x10\x01\x12\x14\n\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n\x10OptionTargetType\x12\x17\n\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n\x16TARGET_TYPE_ENUM_ENTRY\x10\x07\x12\x17\n\x13TARGET_TYPE_SERVICE\x10\x08\x12\x16\n\x12TARGET_TYPE_METHOD\x10\t*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x12\x10\x13\"\xac\x01\n\x0cOneofOptions\x12\x37\n\x08\x66\x65\x61tures\x18\x01 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd1\x02\n\x0b\x45numOptions\x12\x1f\n\x0b\x61llow_alias\x18\x02 \x01(\x08R\nallowAlias\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12V\n&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\x08\x42\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x12\x37\n\x08\x66\x65\x61tures\x18\x07 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"\xd8\x02\n\x10\x45numValueOptions\x12%\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12\x37\n\x08\x66\x65\x61tures\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12(\n\x0c\x64\x65\x62ug_redact\x18\x03 \x01(\x08:\x05\x66\x61lseR\x0b\x64\x65\x62ugRedact\x12U\n\x0f\x66\x65\x61ture_support\x18\x04 \x01(\x0b\x32,.google.protobuf.FieldOptions.FeatureSupportR\x0e\x66\x65\x61tureSupport\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd5\x01\n\x0eServiceOptions\x12\x37\n\x08\x66\x65\x61tures\x18\" \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12%\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x99\x03\n\rMethodOptions\x12%\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12q\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x12\x37\n\x08\x66\x65\x61tures\x18# \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x08\x66\x65\x61tures\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9a\x03\n\x13UninterpretedOption\x12\x41\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n\x0c\x64ouble_value\x18\x06 \x01(\x01R\x0b\x64oubleValue\x12!\n\x0cstring_value\x18\x07 \x01(\x0cR\x0bstringValue\x12\'\n\x0f\x61ggregate_value\x18\x08 \x01(\tR\x0e\x61ggregateValue\x1aJ\n\x08NamePart\x12\x1b\n\tname_part\x18\x01 \x02(\tR\x08namePart\x12!\n\x0cis_extension\x18\x02 \x02(\x08R\x0bisExtension\"\xa7\n\n\nFeatureSet\x12\x91\x01\n\x0e\x66ield_presence\x18\x01 \x01(\x0e\x32).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\x08\x45XPLICIT\x18\x84\x07\xa2\x01\r\x12\x08IMPLICIT\x18\xe7\x07\xa2\x01\r\x12\x08\x45XPLICIT\x18\xe8\x07\xb2\x01\x03\x08\xe8\x07R\rfieldPresence\x12l\n\tenum_type\x18\x02 \x01(\x0e\x32$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\x0b\x12\x06\x43LOSED\x18\x84\x07\xa2\x01\t\x12\x04OPEN\x18\xe7\x07\xb2\x01\x03\x08\xe8\x07R\x08\x65numType\x12\x98\x01\n\x17repeated_field_encoding\x18\x03 \x01(\x0e\x32\x31.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\x08\x45XPANDED\x18\x84\x07\xa2\x01\x0b\x12\x06PACKED\x18\xe7\x07\xb2\x01\x03\x08\xe8\x07R\x15repeatedFieldEncoding\x12~\n\x0futf8_validation\x18\x04 \x01(\x0e\x32*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\x07\xa2\x01\x0b\x12\x06VERIFY\x18\xe7\x07\xb2\x01\x03\x08\xe8\x07R\x0eutf8Validation\x12~\n\x10message_encoding\x18\x05 \x01(\x0e\x32+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\x07\xb2\x01\x03\x08\xe8\x07R\x0fmessageEncoding\x12\x82\x01\n\x0bjson_format\x18\x06 \x01(\x0e\x32&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\x07\xa2\x01\n\x12\x05\x41LLOW\x18\xe7\x07\xb2\x01\x03\x08\xe8\x07R\njsonFormat\"\\\n\rFieldPresence\x12\x1a\n\x16\x46IELD_PRESENCE_UNKNOWN\x10\x00\x12\x0c\n\x08\x45XPLICIT\x10\x01\x12\x0c\n\x08IMPLICIT\x10\x02\x12\x13\n\x0fLEGACY_REQUIRED\x10\x03\"7\n\x08\x45numType\x12\x15\n\x11\x45NUM_TYPE_UNKNOWN\x10\x00\x12\x08\n\x04OPEN\x10\x01\x12\n\n\x06\x43LOSED\x10\x02\"V\n\x15RepeatedFieldEncoding\x12#\n\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n\n\x06PACKED\x10\x01\x12\x0c\n\x08\x45XPANDED\x10\x02\"I\n\x0eUtf8Validation\x12\x1b\n\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n\n\x06VERIFY\x10\x02\x12\x08\n\x04NONE\x10\x03\"\x04\x08\x01\x10\x01\"S\n\x0fMessageEncoding\x12\x1c\n\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n\x0fLENGTH_PREFIXED\x10\x01\x12\r\n\tDELIMITED\x10\x02\"H\n\nJsonFormat\x12\x17\n\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n\x05\x41LLOW\x10\x01\x12\x16\n\x12LEGACY_BEST_EFFORT\x10\x02*\x06\x08\xe8\x07\x10\x8bN*\x06\x08\x8bN\x10\x90N*\x06\x08\x90N\x10\x91NJ\x06\x08\xe7\x07\x10\xe8\x07\"\xef\x03\n\x12\x46\x65\x61tureSetDefaults\x12X\n\x08\x64\x65\x66\x61ults\x18\x01 \x03(\x0b\x32<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\x08\x64\x65\x66\x61ults\x12\x41\n\x0fminimum_edition\x18\x04 \x01(\x0e\x32\x18.google.protobuf.EditionR\x0eminimumEdition\x12\x41\n\x0fmaximum_edition\x18\x05 \x01(\x0e\x32\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n\x18\x46\x65\x61tureSetEditionDefault\x12\x32\n\x07\x65\x64ition\x18\x03 \x01(\x0e\x32\x18.google.protobuf.EditionR\x07\x65\x64ition\x12N\n\x14overridable_features\x18\x04 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12\x42\n\x0e\x66ixed_features\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03R\x08\x66\x65\x61tures\"\xb5\x02\n\x0eSourceCodeInfo\x12\x44\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.LocationR\x08location\x1a\xce\x01\n\x08Location\x12\x16\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01R\x04path\x12\x16\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01R\x04span\x12)\n\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\x0c\x08\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n\x11GeneratedCodeInfo\x12M\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.AnnotationR\nannotation\x1a\xeb\x01\n\nAnnotation\x12\x16\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01R\x04path\x12\x1f\n\x0bsource_file\x18\x02 \x01(\tR\nsourceFile\x12\x14\n\x05\x62\x65gin\x18\x03 \x01(\x05R\x05\x62\x65gin\x12\x10\n\x03\x65nd\x18\x04 \x01(\x05R\x03\x65nd\x12R\n\x08semantic\x18\x05 \x01(\x0e\x32\x36.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\x08semantic\"(\n\x08Semantic\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03SET\x10\x01\x12\t\n\x05\x41LIAS\x10\x02*\xa7\x02\n\x07\x45\x64ition\x12\x13\n\x0f\x45\x44ITION_UNKNOWN\x10\x00\x12\x13\n\x0e\x45\x44ITION_LEGACY\x10\x84\x07\x12\x13\n\x0e\x45\x44ITION_PROTO2\x10\xe6\x07\x12\x13\n\x0e\x45\x44ITION_PROTO3\x10\xe7\x07\x12\x11\n\x0c\x45\x44ITION_2023\x10\xe8\x07\x12\x11\n\x0c\x45\x44ITION_2024\x10\xe9\x07\x12\x17\n\x13\x45\x44ITION_1_TEST_ONLY\x10\x01\x12\x17\n\x13\x45\x44ITION_2_TEST_ONLY\x10\x02\x12\x1d\n\x17\x45\x44ITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n\x17\x45\x44ITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n\x17\x45\x44ITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n\x0b\x45\x44ITION_MAX\x10\xff\xff\xff\xff\x07\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection') + +_globals = globals() +if not _descriptor._USE_C_DESCRIPTORS: + _EDITION = _descriptor.EnumDescriptor( + name='Edition', + full_name='google.protobuf.Edition', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='EDITION_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_LEGACY', index=1, number=900, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_PROTO2', index=2, number=998, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_PROTO3', index=3, number=999, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_2023', index=4, number=1000, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_2024', index=5, number=1001, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_1_TEST_ONLY', index=6, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_2_TEST_ONLY', index=7, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_99997_TEST_ONLY', index=8, number=99997, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_99998_TEST_ONLY', index=9, number=99998, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_99999_TEST_ONLY', index=10, number=99999, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EDITION_MAX', index=11, number=2147483647, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_EDITION) + + _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE = _descriptor.EnumDescriptor( + name='VerificationState', + full_name='google.protobuf.ExtensionRangeOptions.VerificationState', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='DECLARATION', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='UNVERIFIED', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE) + + _FIELDDESCRIPTORPROTO_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='google.protobuf.FieldDescriptorProto.Type', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TYPE_DOUBLE', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FLOAT', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_INT64', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_UINT64', index=3, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_INT32', index=4, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FIXED64', index=5, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FIXED32', index=6, number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_BOOL', index=7, number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_STRING', index=8, number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_GROUP', index=9, number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_MESSAGE', index=10, number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_BYTES', index=11, number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_UINT32', index=12, number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_ENUM', index=13, number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SFIXED32', index=14, number=15, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SFIXED64', index=15, number=16, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SINT32', index=16, number=17, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SINT64', index=17, number=18, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_TYPE) + + _FIELDDESCRIPTORPROTO_LABEL = _descriptor.EnumDescriptor( + name='Label', + full_name='google.protobuf.FieldDescriptorProto.Label', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='LABEL_OPTIONAL', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LABEL_REPEATED', index=1, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LABEL_REQUIRED', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_LABEL) + + _FILEOPTIONS_OPTIMIZEMODE = _descriptor.EnumDescriptor( + name='OptimizeMode', + full_name='google.protobuf.FileOptions.OptimizeMode', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='SPEED', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CODE_SIZE', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LITE_RUNTIME', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FILEOPTIONS_OPTIMIZEMODE) + + _FIELDOPTIONS_CTYPE = _descriptor.EnumDescriptor( + name='CType', + full_name='google.protobuf.FieldOptions.CType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='STRING', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CORD', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='STRING_PIECE', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_CTYPE) + + _FIELDOPTIONS_JSTYPE = _descriptor.EnumDescriptor( + name='JSType', + full_name='google.protobuf.FieldOptions.JSType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='JS_NORMAL', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='JS_STRING', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='JS_NUMBER', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_JSTYPE) + + _FIELDOPTIONS_OPTIONRETENTION = _descriptor.EnumDescriptor( + name='OptionRetention', + full_name='google.protobuf.FieldOptions.OptionRetention', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='RETENTION_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='RETENTION_RUNTIME', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='RETENTION_SOURCE', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_OPTIONRETENTION) + + _FIELDOPTIONS_OPTIONTARGETTYPE = _descriptor.EnumDescriptor( + name='OptionTargetType', + full_name='google.protobuf.FieldOptions.OptionTargetType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TARGET_TYPE_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TARGET_TYPE_FILE', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TARGET_TYPE_EXTENSION_RANGE', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TARGET_TYPE_MESSAGE', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TARGET_TYPE_FIELD', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TARGET_TYPE_ONEOF', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TARGET_TYPE_ENUM', index=6, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TARGET_TYPE_ENUM_ENTRY', index=7, number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TARGET_TYPE_SERVICE', index=8, number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TARGET_TYPE_METHOD', index=9, number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_OPTIONTARGETTYPE) + + _METHODOPTIONS_IDEMPOTENCYLEVEL = _descriptor.EnumDescriptor( + name='IdempotencyLevel', + full_name='google.protobuf.MethodOptions.IdempotencyLevel', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='IDEMPOTENCY_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='NO_SIDE_EFFECTS', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='IDEMPOTENT', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_METHODOPTIONS_IDEMPOTENCYLEVEL) + + _FEATURESET_FIELDPRESENCE = _descriptor.EnumDescriptor( + name='FieldPresence', + full_name='google.protobuf.FeatureSet.FieldPresence', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='FIELD_PRESENCE_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EXPLICIT', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='IMPLICIT', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LEGACY_REQUIRED', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FEATURESET_FIELDPRESENCE) + + _FEATURESET_ENUMTYPE = _descriptor.EnumDescriptor( + name='EnumType', + full_name='google.protobuf.FeatureSet.EnumType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='ENUM_TYPE_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='OPEN', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CLOSED', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FEATURESET_ENUMTYPE) + + _FEATURESET_REPEATEDFIELDENCODING = _descriptor.EnumDescriptor( + name='RepeatedFieldEncoding', + full_name='google.protobuf.FeatureSet.RepeatedFieldEncoding', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='REPEATED_FIELD_ENCODING_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='PACKED', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='EXPANDED', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FEATURESET_REPEATEDFIELDENCODING) + + _FEATURESET_UTF8VALIDATION = _descriptor.EnumDescriptor( + name='Utf8Validation', + full_name='google.protobuf.FeatureSet.Utf8Validation', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='UTF8_VALIDATION_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='VERIFY', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='NONE', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FEATURESET_UTF8VALIDATION) + + _FEATURESET_MESSAGEENCODING = _descriptor.EnumDescriptor( + name='MessageEncoding', + full_name='google.protobuf.FeatureSet.MessageEncoding', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='MESSAGE_ENCODING_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LENGTH_PREFIXED', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='DELIMITED', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FEATURESET_MESSAGEENCODING) + + _FEATURESET_JSONFORMAT = _descriptor.EnumDescriptor( + name='JsonFormat', + full_name='google.protobuf.FeatureSet.JsonFormat', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='JSON_FORMAT_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ALLOW', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LEGACY_BEST_EFFORT', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FEATURESET_JSONFORMAT) + + _GENERATEDCODEINFO_ANNOTATION_SEMANTIC = _descriptor.EnumDescriptor( + name='Semantic', + full_name='google.protobuf.GeneratedCodeInfo.Annotation.Semantic', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='NONE', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='SET', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ALIAS', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_GENERATEDCODEINFO_ANNOTATION_SEMANTIC) + + + _FILEDESCRIPTORSET = _descriptor.Descriptor( + name='FileDescriptorSet', + full_name='google.protobuf.FileDescriptorSet', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='file', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(536000000, 536000001), ], + oneofs=[ + ], + ) + + + _FILEDESCRIPTORPROTO = _descriptor.Descriptor( + name='FileDescriptorProto', + full_name='google.protobuf.FileDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='package', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='dependency', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='public_dependency', full_name='google.protobuf.FileDescriptorProto.public_dependency', index=3, + number=10, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='publicDependency', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='weak_dependency', full_name='google.protobuf.FileDescriptorProto.weak_dependency', index=4, + number=11, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='weakDependency', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=5, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='messageType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=6, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='enumType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='service', full_name='google.protobuf.FileDescriptorProto.service', index=7, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='service', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=8, + number=7, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='extension', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.FileDescriptorProto.options', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='source_code_info', full_name='google.protobuf.FileDescriptorProto.source_code_info', index=10, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='sourceCodeInfo', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='syntax', full_name='google.protobuf.FileDescriptorProto.syntax', index=11, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='syntax', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='edition', full_name='google.protobuf.FileDescriptorProto.edition', index=12, + number=14, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='edition', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + + _DESCRIPTORPROTO_EXTENSIONRANGE = _descriptor.Descriptor( + name='ExtensionRange', + full_name='google.protobuf.DescriptorProto.ExtensionRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='start', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='end', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.DescriptorProto.ExtensionRange.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _DESCRIPTORPROTO_RESERVEDRANGE = _descriptor.Descriptor( + name='ReservedRange', + full_name='google.protobuf.DescriptorProto.ReservedRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.DescriptorProto.ReservedRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='start', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.DescriptorProto.ReservedRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='end', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _DESCRIPTORPROTO = _descriptor.Descriptor( + name='DescriptorProto', + full_name='google.protobuf.DescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.DescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='field', full_name='google.protobuf.DescriptorProto.field', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='field', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='extension', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='nestedType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='enumType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='extensionRange', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='oneof_decl', full_name='google.protobuf.DescriptorProto.oneof_decl', index=6, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='oneofDecl', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.DescriptorProto.options', index=7, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_range', full_name='google.protobuf.DescriptorProto.reserved_range', index=8, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='reservedRange', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_name', full_name='google.protobuf.DescriptorProto.reserved_name', index=9, + number=10, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='reservedName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, _DESCRIPTORPROTO_RESERVEDRANGE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + + _EXTENSIONRANGEOPTIONS_DECLARATION = _descriptor.Descriptor( + name='Declaration', + full_name='google.protobuf.ExtensionRangeOptions.Declaration', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.ExtensionRangeOptions.Declaration.number', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='number', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='full_name', full_name='google.protobuf.ExtensionRangeOptions.Declaration.full_name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='fullName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type', full_name='google.protobuf.ExtensionRangeOptions.Declaration.type', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='type', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved', full_name='google.protobuf.ExtensionRangeOptions.Declaration.reserved', index=3, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='reserved', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='repeated', full_name='google.protobuf.ExtensionRangeOptions.Declaration.repeated', index=4, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='repeated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _EXTENSIONRANGEOPTIONS = _descriptor.Descriptor( + name='ExtensionRangeOptions', + full_name='google.protobuf.ExtensionRangeOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.ExtensionRangeOptions.uninterpreted_option', index=0, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='declaration', full_name='google.protobuf.ExtensionRangeOptions.declaration', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\210\001\002', json_name='declaration', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='features', full_name='google.protobuf.ExtensionRangeOptions.features', index=2, + number=50, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='features', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='verification', full_name='google.protobuf.ExtensionRangeOptions.verification', index=3, + number=3, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\210\001\002', json_name='verification', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_EXTENSIONRANGEOPTIONS_DECLARATION, ], + enum_types=[ + _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE, + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _FIELDDESCRIPTORPROTO = _descriptor.Descriptor( + name='FieldDescriptorProto', + full_name='google.protobuf.FieldDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='number', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='label', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='type', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='typeName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='extendee', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='defaultValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='oneof_index', full_name='google.protobuf.FieldDescriptorProto.oneof_index', index=7, + number=9, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='oneofIndex', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='json_name', full_name='google.protobuf.FieldDescriptorProto.json_name', index=8, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='jsonName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='proto3_optional', full_name='google.protobuf.FieldDescriptorProto.proto3_optional', index=10, + number=17, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='proto3Optional', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FIELDDESCRIPTORPROTO_TYPE, + _FIELDDESCRIPTORPROTO_LABEL, + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + + _ONEOFDESCRIPTORPROTO = _descriptor.Descriptor( + name='OneofDescriptorProto', + full_name='google.protobuf.OneofDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.OneofDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.OneofDescriptorProto.options', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE = _descriptor.Descriptor( + name='EnumReservedRange', + full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='start', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='end', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _ENUMDESCRIPTORPROTO = _descriptor.Descriptor( + name='EnumDescriptorProto', + full_name='google.protobuf.EnumDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='value', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_range', full_name='google.protobuf.EnumDescriptorProto.reserved_range', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='reservedRange', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_name', full_name='google.protobuf.EnumDescriptorProto.reserved_name', index=4, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='reservedName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + + _ENUMVALUEDESCRIPTORPROTO = _descriptor.Descriptor( + name='EnumValueDescriptorProto', + full_name='google.protobuf.EnumValueDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='number', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + + _SERVICEDESCRIPTORPROTO = _descriptor.Descriptor( + name='ServiceDescriptorProto', + full_name='google.protobuf.ServiceDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='method', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + + _METHODDESCRIPTORPROTO = _descriptor.Descriptor( + name='MethodDescriptorProto', + full_name='google.protobuf.MethodDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='inputType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='outputType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='client_streaming', full_name='google.protobuf.MethodDescriptorProto.client_streaming', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='clientStreaming', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='server_streaming', full_name='google.protobuf.MethodDescriptorProto.server_streaming', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='serverStreaming', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + + _FILEOPTIONS = _descriptor.Descriptor( + name='FileOptions', + full_name='google.protobuf.FileOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='javaPackage', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='javaOuterClassname', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='javaMultipleFiles', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_generate_equals_and_hash', full_name='google.protobuf.FileOptions.java_generate_equals_and_hash', index=3, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\030\001', json_name='javaGenerateEqualsAndHash', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_string_check_utf8', full_name='google.protobuf.FileOptions.java_string_check_utf8', index=4, + number=27, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='javaStringCheckUtf8', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=5, + number=9, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='optimizeFor', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='go_package', full_name='google.protobuf.FileOptions.go_package', index=6, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='goPackage', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=7, + number=16, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='ccGenericServices', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=8, + number=17, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='javaGenericServices', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=9, + number=18, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='pyGenericServices', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.FileOptions.deprecated', index=10, + number=23, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cc_enable_arenas', full_name='google.protobuf.FileOptions.cc_enable_arenas', index=11, + number=31, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='ccEnableArenas', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='objc_class_prefix', full_name='google.protobuf.FileOptions.objc_class_prefix', index=12, + number=36, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='objcClassPrefix', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='csharp_namespace', full_name='google.protobuf.FileOptions.csharp_namespace', index=13, + number=37, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='csharpNamespace', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='swift_prefix', full_name='google.protobuf.FileOptions.swift_prefix', index=14, + number=39, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='swiftPrefix', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_class_prefix', full_name='google.protobuf.FileOptions.php_class_prefix', index=15, + number=40, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='phpClassPrefix', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_namespace', full_name='google.protobuf.FileOptions.php_namespace', index=16, + number=41, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='phpNamespace', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_metadata_namespace', full_name='google.protobuf.FileOptions.php_metadata_namespace', index=17, + number=44, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='phpMetadataNamespace', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ruby_package', full_name='google.protobuf.FileOptions.ruby_package', index=18, + number=45, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='rubyPackage', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='features', full_name='google.protobuf.FileOptions.features', index=19, + number=50, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='features', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=20, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FILEOPTIONS_OPTIMIZEMODE, + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _MESSAGEOPTIONS = _descriptor.Descriptor( + name='MessageOptions', + full_name='google.protobuf.MessageOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='messageSetWireFormat', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='noStandardDescriptorAccessor', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.MessageOptions.deprecated', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='map_entry', full_name='google.protobuf.MessageOptions.map_entry', index=3, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='mapEntry', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated_legacy_json_field_conflicts', full_name='google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts', index=4, + number=11, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\030\001', json_name='deprecatedLegacyJsonFieldConflicts', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='features', full_name='google.protobuf.MessageOptions.features', index=5, + number=12, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='features', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=6, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _FIELDOPTIONS_EDITIONDEFAULT = _descriptor.Descriptor( + name='EditionDefault', + full_name='google.protobuf.FieldOptions.EditionDefault', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='edition', full_name='google.protobuf.FieldOptions.EditionDefault.edition', index=0, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='edition', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='google.protobuf.FieldOptions.EditionDefault.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='value', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _FIELDOPTIONS_FEATURESUPPORT = _descriptor.Descriptor( + name='FeatureSupport', + full_name='google.protobuf.FieldOptions.FeatureSupport', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='edition_introduced', full_name='google.protobuf.FieldOptions.FeatureSupport.edition_introduced', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='editionIntroduced', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='edition_deprecated', full_name='google.protobuf.FieldOptions.FeatureSupport.edition_deprecated', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='editionDeprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecation_warning', full_name='google.protobuf.FieldOptions.FeatureSupport.deprecation_warning', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='deprecationWarning', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='edition_removed', full_name='google.protobuf.FieldOptions.FeatureSupport.edition_removed', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='editionRemoved', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _FIELDOPTIONS = _descriptor.Descriptor( + name='FieldOptions', + full_name='google.protobuf.FieldOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='ctype', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='packed', full_name='google.protobuf.FieldOptions.packed', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='packed', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='jstype', full_name='google.protobuf.FieldOptions.jstype', index=2, + number=6, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='jstype', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='lazy', full_name='google.protobuf.FieldOptions.lazy', index=3, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='lazy', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unverified_lazy', full_name='google.protobuf.FieldOptions.unverified_lazy', index=4, + number=15, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='unverifiedLazy', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=5, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='weak', full_name='google.protobuf.FieldOptions.weak', index=6, + number=10, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='weak', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='debug_redact', full_name='google.protobuf.FieldOptions.debug_redact', index=7, + number=16, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='debugRedact', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='retention', full_name='google.protobuf.FieldOptions.retention', index=8, + number=17, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='retention', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='targets', full_name='google.protobuf.FieldOptions.targets', index=9, + number=19, type=14, cpp_type=8, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='targets', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='edition_defaults', full_name='google.protobuf.FieldOptions.edition_defaults', index=10, + number=20, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='editionDefaults', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='features', full_name='google.protobuf.FieldOptions.features', index=11, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='features', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='feature_support', full_name='google.protobuf.FieldOptions.feature_support', index=12, + number=22, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='featureSupport', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=13, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_FIELDOPTIONS_EDITIONDEFAULT, _FIELDOPTIONS_FEATURESUPPORT, ], + enum_types=[ + _FIELDOPTIONS_CTYPE, + _FIELDOPTIONS_JSTYPE, + _FIELDOPTIONS_OPTIONRETENTION, + _FIELDOPTIONS_OPTIONTARGETTYPE, + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ONEOFOPTIONS = _descriptor.Descriptor( + name='OneofOptions', + full_name='google.protobuf.OneofOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='features', full_name='google.protobuf.OneofOptions.features', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='features', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.OneofOptions.uninterpreted_option', index=1, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ENUMOPTIONS = _descriptor.Descriptor( + name='EnumOptions', + full_name='google.protobuf.EnumOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='allow_alias', full_name='google.protobuf.EnumOptions.allow_alias', index=0, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='allowAlias', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.EnumOptions.deprecated', index=1, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated_legacy_json_field_conflicts', full_name='google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts', index=2, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\030\001', json_name='deprecatedLegacyJsonFieldConflicts', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='features', full_name='google.protobuf.EnumOptions.features', index=3, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='features', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=4, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ENUMVALUEOPTIONS = _descriptor.Descriptor( + name='EnumValueOptions', + full_name='google.protobuf.EnumValueOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.EnumValueOptions.deprecated', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='features', full_name='google.protobuf.EnumValueOptions.features', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='features', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='debug_redact', full_name='google.protobuf.EnumValueOptions.debug_redact', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='debugRedact', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='feature_support', full_name='google.protobuf.EnumValueOptions.feature_support', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='featureSupport', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=4, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _SERVICEOPTIONS = _descriptor.Descriptor( + name='ServiceOptions', + full_name='google.protobuf.ServiceOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='features', full_name='google.protobuf.ServiceOptions.features', index=0, + number=34, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='features', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.ServiceOptions.deprecated', index=1, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=2, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _METHODOPTIONS = _descriptor.Descriptor( + name='MethodOptions', + full_name='google.protobuf.MethodOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.MethodOptions.deprecated', index=0, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='idempotency_level', full_name='google.protobuf.MethodOptions.idempotency_level', index=1, + number=34, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='idempotencyLevel', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='features', full_name='google.protobuf.MethodOptions.features', index=2, + number=35, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='features', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=3, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _METHODOPTIONS_IDEMPOTENCYLEVEL, + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _UNINTERPRETEDOPTION_NAMEPART = _descriptor.Descriptor( + name='NamePart', + full_name='google.protobuf.UninterpretedOption.NamePart', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0, + number=1, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='namePart', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1, + number=2, type=8, cpp_type=7, label=2, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='isExtension', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _UNINTERPRETEDOPTION = _descriptor.Descriptor( + name='UninterpretedOption', + full_name='google.protobuf.UninterpretedOption', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.UninterpretedOption.name', index=0, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='identifierValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2, + number=4, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='positiveIntValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3, + number=5, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='negativeIntValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4, + number=6, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='doubleValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5, + number=7, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='stringValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='aggregate_value', full_name='google.protobuf.UninterpretedOption.aggregate_value', index=6, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='aggregateValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + + _FEATURESET = _descriptor.Descriptor( + name='FeatureSet', + full_name='google.protobuf.FeatureSet', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='field_presence', full_name='google.protobuf.FeatureSet.field_presence', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\210\001\001\230\001\004\230\001\001\242\001\r\022\010EXPLICIT\030\204\007\242\001\r\022\010IMPLICIT\030\347\007\242\001\r\022\010EXPLICIT\030\350\007\262\001\003\010\350\007', json_name='fieldPresence', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.FeatureSet.enum_type', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\210\001\001\230\001\006\230\001\001\242\001\013\022\006CLOSED\030\204\007\242\001\t\022\004OPEN\030\347\007\262\001\003\010\350\007', json_name='enumType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='repeated_field_encoding', full_name='google.protobuf.FeatureSet.repeated_field_encoding', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\210\001\001\230\001\004\230\001\001\242\001\r\022\010EXPANDED\030\204\007\242\001\013\022\006PACKED\030\347\007\262\001\003\010\350\007', json_name='repeatedFieldEncoding', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='utf8_validation', full_name='google.protobuf.FeatureSet.utf8_validation', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\210\001\001\230\001\004\230\001\001\242\001\t\022\004NONE\030\204\007\242\001\013\022\006VERIFY\030\347\007\262\001\003\010\350\007', json_name='utf8Validation', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message_encoding', full_name='google.protobuf.FeatureSet.message_encoding', index=4, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\210\001\001\230\001\004\230\001\001\242\001\024\022\017LENGTH_PREFIXED\030\204\007\262\001\003\010\350\007', json_name='messageEncoding', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='json_format', full_name='google.protobuf.FeatureSet.json_format', index=5, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\210\001\001\230\001\003\230\001\006\230\001\001\242\001\027\022\022LEGACY_BEST_EFFORT\030\204\007\242\001\n\022\005ALLOW\030\347\007\262\001\003\010\350\007', json_name='jsonFormat', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FEATURESET_FIELDPRESENCE, + _FEATURESET_ENUMTYPE, + _FEATURESET_REPEATEDFIELDENCODING, + _FEATURESET_UTF8VALIDATION, + _FEATURESET_MESSAGEENCODING, + _FEATURESET_JSONFORMAT, + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(1000, 9995), (9995, 10000), (10000, 10001), ], + oneofs=[ + ], + ) + + + _FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT = _descriptor.Descriptor( + name='FeatureSetEditionDefault', + full_name='google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='edition', full_name='google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition', index=0, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='edition', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='overridable_features', full_name='google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features', index=1, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='overridableFeatures', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fixed_features', full_name='google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features', index=2, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='fixedFeatures', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _FEATURESETDEFAULTS = _descriptor.Descriptor( + name='FeatureSetDefaults', + full_name='google.protobuf.FeatureSetDefaults', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='defaults', full_name='google.protobuf.FeatureSetDefaults.defaults', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='defaults', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='minimum_edition', full_name='google.protobuf.FeatureSetDefaults.minimum_edition', index=1, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='minimumEdition', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='maximum_edition', full_name='google.protobuf.FeatureSetDefaults.maximum_edition', index=2, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='maximumEdition', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + + _SOURCECODEINFO_LOCATION = _descriptor.Descriptor( + name='Location', + full_name='google.protobuf.SourceCodeInfo.Location', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='path', full_name='google.protobuf.SourceCodeInfo.Location.path', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\020\001', json_name='path', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='span', full_name='google.protobuf.SourceCodeInfo.Location.span', index=1, + number=2, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\020\001', json_name='span', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='leading_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_comments', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='leadingComments', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='trailing_comments', full_name='google.protobuf.SourceCodeInfo.Location.trailing_comments', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='trailingComments', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='leading_detached_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_detached_comments', index=4, + number=6, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='leadingDetachedComments', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _SOURCECODEINFO = _descriptor.Descriptor( + name='SourceCodeInfo', + full_name='google.protobuf.SourceCodeInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='location', full_name='google.protobuf.SourceCodeInfo.location', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='location', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SOURCECODEINFO_LOCATION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + extension_ranges=[(536000000, 536000001), ], + oneofs=[ + ], + ) + + + _GENERATEDCODEINFO_ANNOTATION = _descriptor.Descriptor( + name='Annotation', + full_name='google.protobuf.GeneratedCodeInfo.Annotation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='path', full_name='google.protobuf.GeneratedCodeInfo.Annotation.path', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\020\001', json_name='path', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='source_file', full_name='google.protobuf.GeneratedCodeInfo.Annotation.source_file', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='sourceFile', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='begin', full_name='google.protobuf.GeneratedCodeInfo.Annotation.begin', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='begin', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.GeneratedCodeInfo.Annotation.end', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='end', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='semantic', full_name='google.protobuf.GeneratedCodeInfo.Annotation.semantic', index=4, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='semantic', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _GENERATEDCODEINFO_ANNOTATION_SEMANTIC, + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _GENERATEDCODEINFO = _descriptor.Descriptor( + name='GeneratedCodeInfo', + full_name='google.protobuf.GeneratedCodeInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='annotation', full_name='google.protobuf.GeneratedCodeInfo.annotation', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, json_name='annotation', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_GENERATEDCODEINFO_ANNOTATION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + ) + + _FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS + _FILEDESCRIPTORPROTO.fields_by_name['source_code_info'].message_type = _SOURCECODEINFO + _FILEDESCRIPTORPROTO.fields_by_name['edition'].enum_type = _EDITION + _DESCRIPTORPROTO_EXTENSIONRANGE.fields_by_name['options'].message_type = _EXTENSIONRANGEOPTIONS + _DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO_RESERVEDRANGE.containing_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE + _DESCRIPTORPROTO.fields_by_name['oneof_decl'].message_type = _ONEOFDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS + _DESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _DESCRIPTORPROTO_RESERVEDRANGE + _EXTENSIONRANGEOPTIONS_DECLARATION.containing_type = _EXTENSIONRANGEOPTIONS + _EXTENSIONRANGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _EXTENSIONRANGEOPTIONS.fields_by_name['declaration'].message_type = _EXTENSIONRANGEOPTIONS_DECLARATION + _EXTENSIONRANGEOPTIONS.fields_by_name['features'].message_type = _FEATURESET + _EXTENSIONRANGEOPTIONS.fields_by_name['verification'].enum_type = _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE + _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE.containing_type = _EXTENSIONRANGEOPTIONS + _FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL + _FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE + _FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS + _FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO + _FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO + _ONEOFDESCRIPTORPROTO.fields_by_name['options'].message_type = _ONEOFOPTIONS + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE.containing_type = _ENUMDESCRIPTORPROTO + _ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO + _ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS + _ENUMDESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE + _ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS + _SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO + _SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS + _METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS + _FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE + _FILEOPTIONS.fields_by_name['features'].message_type = _FEATURESET + _FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS + _MESSAGEOPTIONS.fields_by_name['features'].message_type = _FEATURESET + _MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDOPTIONS_EDITIONDEFAULT.fields_by_name['edition'].enum_type = _EDITION + _FIELDOPTIONS_EDITIONDEFAULT.containing_type = _FIELDOPTIONS + _FIELDOPTIONS_FEATURESUPPORT.fields_by_name['edition_introduced'].enum_type = _EDITION + _FIELDOPTIONS_FEATURESUPPORT.fields_by_name['edition_deprecated'].enum_type = _EDITION + _FIELDOPTIONS_FEATURESUPPORT.fields_by_name['edition_removed'].enum_type = _EDITION + _FIELDOPTIONS_FEATURESUPPORT.containing_type = _FIELDOPTIONS + _FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE + _FIELDOPTIONS.fields_by_name['jstype'].enum_type = _FIELDOPTIONS_JSTYPE + _FIELDOPTIONS.fields_by_name['retention'].enum_type = _FIELDOPTIONS_OPTIONRETENTION + _FIELDOPTIONS.fields_by_name['targets'].enum_type = _FIELDOPTIONS_OPTIONTARGETTYPE + _FIELDOPTIONS.fields_by_name['edition_defaults'].message_type = _FIELDOPTIONS_EDITIONDEFAULT + _FIELDOPTIONS.fields_by_name['features'].message_type = _FEATURESET + _FIELDOPTIONS.fields_by_name['feature_support'].message_type = _FIELDOPTIONS_FEATURESUPPORT + _FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS + _FIELDOPTIONS_JSTYPE.containing_type = _FIELDOPTIONS + _FIELDOPTIONS_OPTIONRETENTION.containing_type = _FIELDOPTIONS + _FIELDOPTIONS_OPTIONTARGETTYPE.containing_type = _FIELDOPTIONS + _ONEOFOPTIONS.fields_by_name['features'].message_type = _FEATURESET + _ONEOFOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _ENUMOPTIONS.fields_by_name['features'].message_type = _FEATURESET + _ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _ENUMVALUEOPTIONS.fields_by_name['features'].message_type = _FEATURESET + _ENUMVALUEOPTIONS.fields_by_name['feature_support'].message_type = _FIELDOPTIONS_FEATURESUPPORT + _ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _SERVICEOPTIONS.fields_by_name['features'].message_type = _FEATURESET + _SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _METHODOPTIONS.fields_by_name['idempotency_level'].enum_type = _METHODOPTIONS_IDEMPOTENCYLEVEL + _METHODOPTIONS.fields_by_name['features'].message_type = _FEATURESET + _METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _METHODOPTIONS_IDEMPOTENCYLEVEL.containing_type = _METHODOPTIONS + _UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION + _UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART + _FEATURESET.fields_by_name['field_presence'].enum_type = _FEATURESET_FIELDPRESENCE + _FEATURESET.fields_by_name['enum_type'].enum_type = _FEATURESET_ENUMTYPE + _FEATURESET.fields_by_name['repeated_field_encoding'].enum_type = _FEATURESET_REPEATEDFIELDENCODING + _FEATURESET.fields_by_name['utf8_validation'].enum_type = _FEATURESET_UTF8VALIDATION + _FEATURESET.fields_by_name['message_encoding'].enum_type = _FEATURESET_MESSAGEENCODING + _FEATURESET.fields_by_name['json_format'].enum_type = _FEATURESET_JSONFORMAT + _FEATURESET_FIELDPRESENCE.containing_type = _FEATURESET + _FEATURESET_ENUMTYPE.containing_type = _FEATURESET + _FEATURESET_REPEATEDFIELDENCODING.containing_type = _FEATURESET + _FEATURESET_UTF8VALIDATION.containing_type = _FEATURESET + _FEATURESET_MESSAGEENCODING.containing_type = _FEATURESET + _FEATURESET_JSONFORMAT.containing_type = _FEATURESET + _FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT.fields_by_name['edition'].enum_type = _EDITION + _FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT.fields_by_name['overridable_features'].message_type = _FEATURESET + _FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT.fields_by_name['fixed_features'].message_type = _FEATURESET + _FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT.containing_type = _FEATURESETDEFAULTS + _FEATURESETDEFAULTS.fields_by_name['defaults'].message_type = _FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT + _FEATURESETDEFAULTS.fields_by_name['minimum_edition'].enum_type = _EDITION + _FEATURESETDEFAULTS.fields_by_name['maximum_edition'].enum_type = _EDITION + _SOURCECODEINFO_LOCATION.containing_type = _SOURCECODEINFO + _SOURCECODEINFO.fields_by_name['location'].message_type = _SOURCECODEINFO_LOCATION + _GENERATEDCODEINFO_ANNOTATION.fields_by_name['semantic'].enum_type = _GENERATEDCODEINFO_ANNOTATION_SEMANTIC + _GENERATEDCODEINFO_ANNOTATION.containing_type = _GENERATEDCODEINFO + _GENERATEDCODEINFO_ANNOTATION_SEMANTIC.containing_type = _GENERATEDCODEINFO_ANNOTATION + _GENERATEDCODEINFO.fields_by_name['annotation'].message_type = _GENERATEDCODEINFO_ANNOTATION + DESCRIPTOR.message_types_by_name['FileDescriptorSet'] = _FILEDESCRIPTORSET + DESCRIPTOR.message_types_by_name['FileDescriptorProto'] = _FILEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['DescriptorProto'] = _DESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['ExtensionRangeOptions'] = _EXTENSIONRANGEOPTIONS + DESCRIPTOR.message_types_by_name['FieldDescriptorProto'] = _FIELDDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['OneofDescriptorProto'] = _ONEOFDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['EnumDescriptorProto'] = _ENUMDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['EnumValueDescriptorProto'] = _ENUMVALUEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['ServiceDescriptorProto'] = _SERVICEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['MethodDescriptorProto'] = _METHODDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['FileOptions'] = _FILEOPTIONS + DESCRIPTOR.message_types_by_name['MessageOptions'] = _MESSAGEOPTIONS + DESCRIPTOR.message_types_by_name['FieldOptions'] = _FIELDOPTIONS + DESCRIPTOR.message_types_by_name['OneofOptions'] = _ONEOFOPTIONS + DESCRIPTOR.message_types_by_name['EnumOptions'] = _ENUMOPTIONS + DESCRIPTOR.message_types_by_name['EnumValueOptions'] = _ENUMVALUEOPTIONS + DESCRIPTOR.message_types_by_name['ServiceOptions'] = _SERVICEOPTIONS + DESCRIPTOR.message_types_by_name['MethodOptions'] = _METHODOPTIONS + DESCRIPTOR.message_types_by_name['UninterpretedOption'] = _UNINTERPRETEDOPTION + DESCRIPTOR.message_types_by_name['FeatureSet'] = _FEATURESET + DESCRIPTOR.message_types_by_name['FeatureSetDefaults'] = _FEATURESETDEFAULTS + DESCRIPTOR.message_types_by_name['SourceCodeInfo'] = _SOURCECODEINFO + DESCRIPTOR.message_types_by_name['GeneratedCodeInfo'] = _GENERATEDCODEINFO + DESCRIPTOR.enum_types_by_name['Edition'] = _EDITION + _sym_db.RegisterFileDescriptor(DESCRIPTOR) + + class _ResolvedFeatures: + def __init__(self, features = None, **kwargs): + if features: + for k, v in features.FIELDS.items(): + setattr(self, k, getattr(features, k)) + else: + for k, v in kwargs.items(): + setattr(self, k, v) + DESCRIPTOR._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORSET._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORSET.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[6]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[7]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[8]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[9]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[10]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[11]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEDESCRIPTORPROTO.fields[12]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO.fields[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO.fields[6]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO.fields[7]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO.fields[8]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO.fields[9]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO_EXTENSIONRANGE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO_EXTENSIONRANGE.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO_EXTENSIONRANGE.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO_EXTENSIONRANGE.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO_RESERVEDRANGE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO_RESERVEDRANGE.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _DESCRIPTORPROTO_RESERVEDRANGE.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS_DECLARATION._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS_DECLARATION.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS_DECLARATION.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS_DECLARATION.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS_DECLARATION.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS_DECLARATION.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[6]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[7]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[8]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[9]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO.fields[10]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ONEOFDESCRIPTORPROTO._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ONEOFDESCRIPTORPROTO.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ONEOFDESCRIPTORPROTO.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMDESCRIPTORPROTO._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMDESCRIPTORPROTO.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMDESCRIPTORPROTO.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMDESCRIPTORPROTO.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMDESCRIPTORPROTO.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMDESCRIPTORPROTO.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMVALUEDESCRIPTORPROTO._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMVALUEDESCRIPTORPROTO.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMVALUEDESCRIPTORPROTO.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMVALUEDESCRIPTORPROTO.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SERVICEDESCRIPTORPROTO._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SERVICEDESCRIPTORPROTO.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SERVICEDESCRIPTORPROTO.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SERVICEDESCRIPTORPROTO.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODDESCRIPTORPROTO._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODDESCRIPTORPROTO.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODDESCRIPTORPROTO.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODDESCRIPTORPROTO.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODDESCRIPTORPROTO.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODDESCRIPTORPROTO.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODDESCRIPTORPROTO.fields[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[6]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[7]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[8]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[9]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[10]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[11]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[12]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[13]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[14]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[15]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[16]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[17]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[18]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[19]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS.fields[20]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _MESSAGEOPTIONS._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _MESSAGEOPTIONS.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _MESSAGEOPTIONS.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _MESSAGEOPTIONS.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _MESSAGEOPTIONS.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _MESSAGEOPTIONS.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _MESSAGEOPTIONS.fields[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _MESSAGEOPTIONS.fields[6]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[6]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[7]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[8]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[9]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[10]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[11]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[12]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS.fields[13]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_EDITIONDEFAULT._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_EDITIONDEFAULT.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_EDITIONDEFAULT.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_FEATURESUPPORT._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_FEATURESUPPORT.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_FEATURESUPPORT.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_FEATURESUPPORT.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_FEATURESUPPORT.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ONEOFOPTIONS._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ONEOFOPTIONS.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ONEOFOPTIONS.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMOPTIONS._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMOPTIONS.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMOPTIONS.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMOPTIONS.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMOPTIONS.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMOPTIONS.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMVALUEOPTIONS._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMVALUEOPTIONS.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMVALUEOPTIONS.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMVALUEOPTIONS.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMVALUEOPTIONS.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _ENUMVALUEOPTIONS.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SERVICEOPTIONS._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SERVICEOPTIONS.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SERVICEOPTIONS.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SERVICEOPTIONS.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODOPTIONS._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODOPTIONS.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODOPTIONS.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODOPTIONS.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODOPTIONS.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION.fields[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION.fields[6]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION_NAMEPART._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION_NAMEPART.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["LEGACY_REQUIRED"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _UNINTERPRETEDOPTION_NAMEPART.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["LEGACY_REQUIRED"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET.fields[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESETDEFAULTS._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESETDEFAULTS.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESETDEFAULTS.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESETDEFAULTS.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SOURCECODEINFO._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SOURCECODEINFO.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SOURCECODEINFO_LOCATION._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SOURCECODEINFO_LOCATION.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["PACKED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SOURCECODEINFO_LOCATION.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["PACKED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SOURCECODEINFO_LOCATION.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SOURCECODEINFO_LOCATION.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _SOURCECODEINFO_LOCATION.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO_ANNOTATION._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO_ANNOTATION.fields[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["PACKED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO_ANNOTATION.fields[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO_ANNOTATION.fields[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO_ANNOTATION.fields[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO_ANNOTATION.fields[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[6]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[7]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[8]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[9]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[10]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[11]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[12]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[13]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[14]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[15]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[16]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_TYPE.values[17]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_LABEL._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_LABEL.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_LABEL.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDDESCRIPTORPROTO_LABEL.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS_OPTIMIZEMODE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS_OPTIMIZEMODE.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS_OPTIMIZEMODE.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FILEOPTIONS_OPTIMIZEMODE.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_CTYPE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_CTYPE.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_CTYPE.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_CTYPE.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_JSTYPE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_JSTYPE.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_JSTYPE.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_JSTYPE.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONRETENTION._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONRETENTION.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONRETENTION.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONRETENTION.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE.values[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE.values[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE.values[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE.values[6]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE.values[7]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE.values[8]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FIELDOPTIONS_OPTIONTARGETTYPE.values[9]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODOPTIONS_IDEMPOTENCYLEVEL._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODOPTIONS_IDEMPOTENCYLEVEL.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODOPTIONS_IDEMPOTENCYLEVEL.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _METHODOPTIONS_IDEMPOTENCYLEVEL.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_FIELDPRESENCE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_FIELDPRESENCE.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_FIELDPRESENCE.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_FIELDPRESENCE.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_FIELDPRESENCE.values[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_ENUMTYPE._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_ENUMTYPE.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_ENUMTYPE.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_ENUMTYPE.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_REPEATEDFIELDENCODING._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_REPEATEDFIELDENCODING.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_REPEATEDFIELDENCODING.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_REPEATEDFIELDENCODING.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_UTF8VALIDATION._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_UTF8VALIDATION.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_UTF8VALIDATION.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_UTF8VALIDATION.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_MESSAGEENCODING._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_MESSAGEENCODING.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_MESSAGEENCODING.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_MESSAGEENCODING.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_JSONFORMAT._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_JSONFORMAT.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_JSONFORMAT.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _FEATURESET_JSONFORMAT.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO_ANNOTATION_SEMANTIC._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO_ANNOTATION_SEMANTIC.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO_ANNOTATION_SEMANTIC.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _GENERATEDCODEINFO_ANNOTATION_SEMANTIC.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[0]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[1]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[2]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[3]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[4]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[5]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[6]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[7]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[8]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[9]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[10]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) + _EDITION.values[11]._features = _ResolvedFeatures(field_presence=_FEATURESET_FIELDPRESENCE.values_by_name["EXPLICIT"].number,enum_type=_FEATURESET_ENUMTYPE.values_by_name["CLOSED"].number,repeated_field_encoding=_FEATURESET_REPEATEDFIELDENCODING.values_by_name["EXPANDED"].number,utf8_validation=_FEATURESET_UTF8VALIDATION.values_by_name["NONE"].number,message_encoding=_FEATURESET_MESSAGEENCODING.values_by_name["LENGTH_PREFIXED"].number,json_format=_FEATURESET_JSONFORMAT.values_by_name["LEGACY_BEST_EFFORT"].number) +else: + _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.descriptor_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\020DescriptorProtosH\001Z-google.golang.org/protobuf/types/descriptorpb\370\001\001\242\002\003GPB\252\002\032Google.Protobuf.Reflection' + _globals['_EXTENSIONRANGEOPTIONS'].fields_by_name['declaration']._loaded_options = None + _globals['_EXTENSIONRANGEOPTIONS'].fields_by_name['declaration']._serialized_options = b'\210\001\002' + _globals['_EXTENSIONRANGEOPTIONS'].fields_by_name['verification']._loaded_options = None + _globals['_EXTENSIONRANGEOPTIONS'].fields_by_name['verification']._serialized_options = b'\210\001\002' + _globals['_FILEOPTIONS'].fields_by_name['java_generate_equals_and_hash']._loaded_options = None + _globals['_FILEOPTIONS'].fields_by_name['java_generate_equals_and_hash']._serialized_options = b'\030\001' + _globals['_MESSAGEOPTIONS'].fields_by_name['deprecated_legacy_json_field_conflicts']._loaded_options = None + _globals['_MESSAGEOPTIONS'].fields_by_name['deprecated_legacy_json_field_conflicts']._serialized_options = b'\030\001' + _globals['_ENUMOPTIONS'].fields_by_name['deprecated_legacy_json_field_conflicts']._loaded_options = None + _globals['_ENUMOPTIONS'].fields_by_name['deprecated_legacy_json_field_conflicts']._serialized_options = b'\030\001' + _globals['_FEATURESET'].fields_by_name['field_presence']._loaded_options = None + _globals['_FEATURESET'].fields_by_name['field_presence']._serialized_options = b'\210\001\001\230\001\004\230\001\001\242\001\r\022\010EXPLICIT\030\204\007\242\001\r\022\010IMPLICIT\030\347\007\242\001\r\022\010EXPLICIT\030\350\007\262\001\003\010\350\007' + _globals['_FEATURESET'].fields_by_name['enum_type']._loaded_options = None + _globals['_FEATURESET'].fields_by_name['enum_type']._serialized_options = b'\210\001\001\230\001\006\230\001\001\242\001\013\022\006CLOSED\030\204\007\242\001\t\022\004OPEN\030\347\007\262\001\003\010\350\007' + _globals['_FEATURESET'].fields_by_name['repeated_field_encoding']._loaded_options = None + _globals['_FEATURESET'].fields_by_name['repeated_field_encoding']._serialized_options = b'\210\001\001\230\001\004\230\001\001\242\001\r\022\010EXPANDED\030\204\007\242\001\013\022\006PACKED\030\347\007\262\001\003\010\350\007' + _globals['_FEATURESET'].fields_by_name['utf8_validation']._loaded_options = None + _globals['_FEATURESET'].fields_by_name['utf8_validation']._serialized_options = b'\210\001\001\230\001\004\230\001\001\242\001\t\022\004NONE\030\204\007\242\001\013\022\006VERIFY\030\347\007\262\001\003\010\350\007' + _globals['_FEATURESET'].fields_by_name['message_encoding']._loaded_options = None + _globals['_FEATURESET'].fields_by_name['message_encoding']._serialized_options = b'\210\001\001\230\001\004\230\001\001\242\001\024\022\017LENGTH_PREFIXED\030\204\007\262\001\003\010\350\007' + _globals['_FEATURESET'].fields_by_name['json_format']._loaded_options = None + _globals['_FEATURESET'].fields_by_name['json_format']._serialized_options = b'\210\001\001\230\001\003\230\001\006\230\001\001\242\001\027\022\022LEGACY_BEST_EFFORT\030\204\007\242\001\n\022\005ALLOW\030\347\007\262\001\003\010\350\007' + _globals['_SOURCECODEINFO_LOCATION'].fields_by_name['path']._loaded_options = None + _globals['_SOURCECODEINFO_LOCATION'].fields_by_name['path']._serialized_options = b'\020\001' + _globals['_SOURCECODEINFO_LOCATION'].fields_by_name['span']._loaded_options = None + _globals['_SOURCECODEINFO_LOCATION'].fields_by_name['span']._serialized_options = b'\020\001' + _globals['_GENERATEDCODEINFO_ANNOTATION'].fields_by_name['path']._loaded_options = None + _globals['_GENERATEDCODEINFO_ANNOTATION'].fields_by_name['path']._serialized_options = b'\020\001' + _globals['_EDITION']._serialized_start=11873 + _globals['_EDITION']._serialized_end=12168 + _globals['_FILEDESCRIPTORSET']._serialized_start=53 + _globals['_FILEDESCRIPTORSET']._serialized_end=144 + _globals['_FILEDESCRIPTORPROTO']._serialized_start=147 + _globals['_FILEDESCRIPTORPROTO']._serialized_end=811 + _globals['_DESCRIPTORPROTO']._serialized_start=814 + _globals['_DESCRIPTORPROTO']._serialized_end=1639 + _globals['_DESCRIPTORPROTO_EXTENSIONRANGE']._serialized_start=1460 + _globals['_DESCRIPTORPROTO_EXTENSIONRANGE']._serialized_end=1582 + _globals['_DESCRIPTORPROTO_RESERVEDRANGE']._serialized_start=1584 + _globals['_DESCRIPTORPROTO_RESERVEDRANGE']._serialized_end=1639 + _globals['_EXTENSIONRANGEOPTIONS']._serialized_start=1642 + _globals['_EXTENSIONRANGEOPTIONS']._serialized_end=2230 + _globals['_EXTENSIONRANGEOPTIONS_DECLARATION']._serialized_start=2017 + _globals['_EXTENSIONRANGEOPTIONS_DECLARATION']._serialized_end=2165 + _globals['_EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE']._serialized_start=2167 + _globals['_EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE']._serialized_end=2219 + _globals['_FIELDDESCRIPTORPROTO']._serialized_start=2233 + _globals['_FIELDDESCRIPTORPROTO']._serialized_end=3066 + _globals['_FIELDDESCRIPTORPROTO_TYPE']._serialized_start=2687 + _globals['_FIELDDESCRIPTORPROTO_TYPE']._serialized_end=2997 + _globals['_FIELDDESCRIPTORPROTO_LABEL']._serialized_start=2999 + _globals['_FIELDDESCRIPTORPROTO_LABEL']._serialized_end=3066 + _globals['_ONEOFDESCRIPTORPROTO']._serialized_start=3068 + _globals['_ONEOFDESCRIPTORPROTO']._serialized_end=3167 + _globals['_ENUMDESCRIPTORPROTO']._serialized_start=3170 + _globals['_ENUMDESCRIPTORPROTO']._serialized_end=3525 + _globals['_ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE']._serialized_start=3466 + _globals['_ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE']._serialized_end=3525 + _globals['_ENUMVALUEDESCRIPTORPROTO']._serialized_start=3528 + _globals['_ENUMVALUEDESCRIPTORPROTO']._serialized_end=3659 + _globals['_SERVICEDESCRIPTORPROTO']._serialized_start=3662 + _globals['_SERVICEDESCRIPTORPROTO']._serialized_end=3829 + _globals['_METHODDESCRIPTORPROTO']._serialized_start=3832 + _globals['_METHODDESCRIPTORPROTO']._serialized_end=4097 + _globals['_FILEOPTIONS']._serialized_start=4100 + _globals['_FILEOPTIONS']._serialized_end=5297 + _globals['_FILEOPTIONS_OPTIMIZEMODE']._serialized_start=5194 + _globals['_FILEOPTIONS_OPTIMIZEMODE']._serialized_end=5252 + _globals['_MESSAGEOPTIONS']._serialized_start=5300 + _globals['_MESSAGEOPTIONS']._serialized_end=5800 + _globals['_FIELDOPTIONS']._serialized_start=5803 + _globals['_FIELDOPTIONS']._serialized_end=7496 + _globals['_FIELDOPTIONS_EDITIONDEFAULT']._serialized_start=6640 + _globals['_FIELDOPTIONS_EDITIONDEFAULT']._serialized_end=6730 + _globals['_FIELDOPTIONS_FEATURESUPPORT']._serialized_start=6733 + _globals['_FIELDOPTIONS_FEATURESUPPORT']._serialized_end=7011 + _globals['_FIELDOPTIONS_CTYPE']._serialized_start=7013 + _globals['_FIELDOPTIONS_CTYPE']._serialized_end=7060 + _globals['_FIELDOPTIONS_JSTYPE']._serialized_start=7062 + _globals['_FIELDOPTIONS_JSTYPE']._serialized_end=7115 + _globals['_FIELDOPTIONS_OPTIONRETENTION']._serialized_start=7117 + _globals['_FIELDOPTIONS_OPTIONRETENTION']._serialized_end=7202 + _globals['_FIELDOPTIONS_OPTIONTARGETTYPE']._serialized_start=7205 + _globals['_FIELDOPTIONS_OPTIONTARGETTYPE']._serialized_end=7473 + _globals['_ONEOFOPTIONS']._serialized_start=7499 + _globals['_ONEOFOPTIONS']._serialized_end=7671 + _globals['_ENUMOPTIONS']._serialized_start=7674 + _globals['_ENUMOPTIONS']._serialized_end=8011 + _globals['_ENUMVALUEOPTIONS']._serialized_start=8014 + _globals['_ENUMVALUEOPTIONS']._serialized_end=8358 + _globals['_SERVICEOPTIONS']._serialized_start=8361 + _globals['_SERVICEOPTIONS']._serialized_end=8574 + _globals['_METHODOPTIONS']._serialized_start=8577 + _globals['_METHODOPTIONS']._serialized_end=8986 + _globals['_METHODOPTIONS_IDEMPOTENCYLEVEL']._serialized_start=8895 + _globals['_METHODOPTIONS_IDEMPOTENCYLEVEL']._serialized_end=8975 + _globals['_UNINTERPRETEDOPTION']._serialized_start=8989 + _globals['_UNINTERPRETEDOPTION']._serialized_end=9399 + _globals['_UNINTERPRETEDOPTION_NAMEPART']._serialized_start=9325 + _globals['_UNINTERPRETEDOPTION_NAMEPART']._serialized_end=9399 + _globals['_FEATURESET']._serialized_start=9402 + _globals['_FEATURESET']._serialized_end=10721 + _globals['_FEATURESET_FIELDPRESENCE']._serialized_start=10218 + _globals['_FEATURESET_FIELDPRESENCE']._serialized_end=10310 + _globals['_FEATURESET_ENUMTYPE']._serialized_start=10312 + _globals['_FEATURESET_ENUMTYPE']._serialized_end=10367 + _globals['_FEATURESET_REPEATEDFIELDENCODING']._serialized_start=10369 + _globals['_FEATURESET_REPEATEDFIELDENCODING']._serialized_end=10455 + _globals['_FEATURESET_UTF8VALIDATION']._serialized_start=10457 + _globals['_FEATURESET_UTF8VALIDATION']._serialized_end=10530 + _globals['_FEATURESET_MESSAGEENCODING']._serialized_start=10532 + _globals['_FEATURESET_MESSAGEENCODING']._serialized_end=10615 + _globals['_FEATURESET_JSONFORMAT']._serialized_start=10617 + _globals['_FEATURESET_JSONFORMAT']._serialized_end=10689 + _globals['_FEATURESETDEFAULTS']._serialized_start=10724 + _globals['_FEATURESETDEFAULTS']._serialized_end=11219 + _globals['_FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT']._serialized_start=10971 + _globals['_FEATURESETDEFAULTS_FEATURESETEDITIONDEFAULT']._serialized_end=11219 + _globals['_SOURCECODEINFO']._serialized_start=11222 + _globals['_SOURCECODEINFO']._serialized_end=11531 + _globals['_SOURCECODEINFO_LOCATION']._serialized_start=11311 + _globals['_SOURCECODEINFO_LOCATION']._serialized_end=11517 + _globals['_GENERATEDCODEINFO']._serialized_start=11534 + _globals['_GENERATEDCODEINFO']._serialized_end=11870 + _globals['_GENERATEDCODEINFO_ANNOTATION']._serialized_start=11635 + _globals['_GENERATEDCODEINFO_ANNOTATION']._serialized_end=11870 + _globals['_GENERATEDCODEINFO_ANNOTATION_SEMANTIC']._serialized_start=11830 + _globals['_GENERATEDCODEINFO_ANNOTATION_SEMANTIC']._serialized_end=11870 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/descriptor_pool.py b/.venv/lib/python3.12/site-packages/google/protobuf/descriptor_pool.py new file mode 100644 index 00000000..5545aed9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/descriptor_pool.py @@ -0,0 +1,1355 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Provides DescriptorPool to use as a container for proto2 descriptors. + +The DescriptorPool is used in conjection with a DescriptorDatabase to maintain +a collection of protocol buffer descriptors for use when dynamically creating +message types at runtime. + +For most applications protocol buffers should be used via modules generated by +the protocol buffer compiler tool. This should only be used when the type of +protocol buffers used in an application or library cannot be predetermined. + +Below is a straightforward example on how to use this class:: + + pool = DescriptorPool() + file_descriptor_protos = [ ... ] + for file_descriptor_proto in file_descriptor_protos: + pool.Add(file_descriptor_proto) + my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType') + +The message descriptor can be used in conjunction with the message_factory +module in order to create a protocol buffer class that can be encoded and +decoded. + +If you want to get a Python class for the specified proto, use the +helper functions inside google.protobuf.message_factory +directly instead of this class. +""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import collections +import threading +import warnings + +from google.protobuf import descriptor +from google.protobuf import descriptor_database +from google.protobuf import text_encoding +from google.protobuf.internal import python_edition_defaults +from google.protobuf.internal import python_message + +_USE_C_DESCRIPTORS = descriptor._USE_C_DESCRIPTORS # pylint: disable=protected-access + + +def _NormalizeFullyQualifiedName(name): + """Remove leading period from fully-qualified type name. + + Due to b/13860351 in descriptor_database.py, types in the root namespace are + generated with a leading period. This function removes that prefix. + + Args: + name (str): The fully-qualified symbol name. + + Returns: + str: The normalized fully-qualified symbol name. + """ + return name.lstrip('.') + + +def _OptionsOrNone(descriptor_proto): + """Returns the value of the field `options`, or None if it is not set.""" + if descriptor_proto.HasField('options'): + return descriptor_proto.options + else: + return None + + +def _IsMessageSetExtension(field): + return (field.is_extension and + field.containing_type.has_options and + field.containing_type.GetOptions().message_set_wire_format and + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL) + +_edition_defaults_lock = threading.Lock() + + +class DescriptorPool(object): + """A collection of protobufs dynamically constructed by descriptor protos.""" + + if _USE_C_DESCRIPTORS: + + def __new__(cls, descriptor_db=None): + # pylint: disable=protected-access + return descriptor._message.DescriptorPool(descriptor_db) + + def __init__( + self, descriptor_db=None, use_deprecated_legacy_json_field_conflicts=False + ): + """Initializes a Pool of proto buffs. + + The descriptor_db argument to the constructor is provided to allow + specialized file descriptor proto lookup code to be triggered on demand. An + example would be an implementation which will read and compile a file + specified in a call to FindFileByName() and not require the call to Add() + at all. Results from this database will be cached internally here as well. + + Args: + descriptor_db: A secondary source of file descriptors. + use_deprecated_legacy_json_field_conflicts: Unused, for compatibility with + C++. + """ + + self._internal_db = descriptor_database.DescriptorDatabase() + self._descriptor_db = descriptor_db + self._descriptors = {} + self._enum_descriptors = {} + self._service_descriptors = {} + self._file_descriptors = {} + self._toplevel_extensions = {} + self._top_enum_values = {} + # We store extensions in two two-level mappings: The first key is the + # descriptor of the message being extended, the second key is the extension + # full name or its tag number. + self._extensions_by_name = collections.defaultdict(dict) + self._extensions_by_number = collections.defaultdict(dict) + self._serialized_edition_defaults = ( + python_edition_defaults._PROTOBUF_INTERNAL_PYTHON_EDITION_DEFAULTS + ) + self._edition_defaults = None + self._feature_cache = dict() + + def _CheckConflictRegister(self, desc, desc_name, file_name): + """Check if the descriptor name conflicts with another of the same name. + + Args: + desc: Descriptor of a message, enum, service, extension or enum value. + desc_name (str): the full name of desc. + file_name (str): The file name of descriptor. + """ + for register, descriptor_type in [ + (self._descriptors, descriptor.Descriptor), + (self._enum_descriptors, descriptor.EnumDescriptor), + (self._service_descriptors, descriptor.ServiceDescriptor), + (self._toplevel_extensions, descriptor.FieldDescriptor), + (self._top_enum_values, descriptor.EnumValueDescriptor)]: + if desc_name in register: + old_desc = register[desc_name] + if isinstance(old_desc, descriptor.EnumValueDescriptor): + old_file = old_desc.type.file.name + else: + old_file = old_desc.file.name + + if not isinstance(desc, descriptor_type) or ( + old_file != file_name): + error_msg = ('Conflict register for file "' + file_name + + '": ' + desc_name + + ' is already defined in file "' + + old_file + '". Please fix the conflict by adding ' + 'package name on the proto file, or use different ' + 'name for the duplication.') + if isinstance(desc, descriptor.EnumValueDescriptor): + error_msg += ('\nNote: enum values appear as ' + 'siblings of the enum type instead of ' + 'children of it.') + + raise TypeError(error_msg) + + return + + def Add(self, file_desc_proto): + """Adds the FileDescriptorProto and its types to this pool. + + Args: + file_desc_proto (FileDescriptorProto): The file descriptor to add. + """ + + self._internal_db.Add(file_desc_proto) + + def AddSerializedFile(self, serialized_file_desc_proto): + """Adds the FileDescriptorProto and its types to this pool. + + Args: + serialized_file_desc_proto (bytes): A bytes string, serialization of the + :class:`FileDescriptorProto` to add. + + Returns: + FileDescriptor: Descriptor for the added file. + """ + + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString( + serialized_file_desc_proto) + file_desc = self._ConvertFileProtoToFileDescriptor(file_desc_proto) + file_desc.serialized_pb = serialized_file_desc_proto + return file_desc + + # Never call this method. It is for internal usage only. + def _AddDescriptor(self, desc): + """Adds a Descriptor to the pool, non-recursively. + + If the Descriptor contains nested messages or enums, the caller must + explicitly register them. This method also registers the FileDescriptor + associated with the message. + + Args: + desc: A Descriptor. + """ + if not isinstance(desc, descriptor.Descriptor): + raise TypeError('Expected instance of descriptor.Descriptor.') + + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + + self._descriptors[desc.full_name] = desc + self._AddFileDescriptor(desc.file) + + # Never call this method. It is for internal usage only. + def _AddEnumDescriptor(self, enum_desc): + """Adds an EnumDescriptor to the pool. + + This method also registers the FileDescriptor associated with the enum. + + Args: + enum_desc: An EnumDescriptor. + """ + + if not isinstance(enum_desc, descriptor.EnumDescriptor): + raise TypeError('Expected instance of descriptor.EnumDescriptor.') + + file_name = enum_desc.file.name + self._CheckConflictRegister(enum_desc, enum_desc.full_name, file_name) + self._enum_descriptors[enum_desc.full_name] = enum_desc + + # Top enum values need to be indexed. + # Count the number of dots to see whether the enum is toplevel or nested + # in a message. We cannot use enum_desc.containing_type at this stage. + if enum_desc.file.package: + top_level = (enum_desc.full_name.count('.') + - enum_desc.file.package.count('.') == 1) + else: + top_level = enum_desc.full_name.count('.') == 0 + if top_level: + file_name = enum_desc.file.name + package = enum_desc.file.package + for enum_value in enum_desc.values: + full_name = _NormalizeFullyQualifiedName( + '.'.join((package, enum_value.name))) + self._CheckConflictRegister(enum_value, full_name, file_name) + self._top_enum_values[full_name] = enum_value + self._AddFileDescriptor(enum_desc.file) + + # Never call this method. It is for internal usage only. + def _AddServiceDescriptor(self, service_desc): + """Adds a ServiceDescriptor to the pool. + + Args: + service_desc: A ServiceDescriptor. + """ + + if not isinstance(service_desc, descriptor.ServiceDescriptor): + raise TypeError('Expected instance of descriptor.ServiceDescriptor.') + + self._CheckConflictRegister(service_desc, service_desc.full_name, + service_desc.file.name) + self._service_descriptors[service_desc.full_name] = service_desc + + # Never call this method. It is for internal usage only. + def _AddExtensionDescriptor(self, extension): + """Adds a FieldDescriptor describing an extension to the pool. + + Args: + extension: A FieldDescriptor. + + Raises: + AssertionError: when another extension with the same number extends the + same message. + TypeError: when the specified extension is not a + descriptor.FieldDescriptor. + """ + if not (isinstance(extension, descriptor.FieldDescriptor) and + extension.is_extension): + raise TypeError('Expected an extension descriptor.') + + if extension.extension_scope is None: + self._CheckConflictRegister( + extension, extension.full_name, extension.file.name) + self._toplevel_extensions[extension.full_name] = extension + + try: + existing_desc = self._extensions_by_number[ + extension.containing_type][extension.number] + except KeyError: + pass + else: + if extension is not existing_desc: + raise AssertionError( + 'Extensions "%s" and "%s" both try to extend message type "%s" ' + 'with field number %d.' % + (extension.full_name, existing_desc.full_name, + extension.containing_type.full_name, extension.number)) + + self._extensions_by_number[extension.containing_type][ + extension.number] = extension + self._extensions_by_name[extension.containing_type][ + extension.full_name] = extension + + # Also register MessageSet extensions with the type name. + if _IsMessageSetExtension(extension): + self._extensions_by_name[extension.containing_type][ + extension.message_type.full_name] = extension + + if hasattr(extension.containing_type, '_concrete_class'): + python_message._AttachFieldHelpers( + extension.containing_type._concrete_class, extension) + + # Never call this method. It is for internal usage only. + def _InternalAddFileDescriptor(self, file_desc): + """Adds a FileDescriptor to the pool, non-recursively. + + If the FileDescriptor contains messages or enums, the caller must explicitly + register them. + + Args: + file_desc: A FileDescriptor. + """ + + self._AddFileDescriptor(file_desc) + + def _AddFileDescriptor(self, file_desc): + """Adds a FileDescriptor to the pool, non-recursively. + + If the FileDescriptor contains messages or enums, the caller must explicitly + register them. + + Args: + file_desc: A FileDescriptor. + """ + + if not isinstance(file_desc, descriptor.FileDescriptor): + raise TypeError('Expected instance of descriptor.FileDescriptor.') + self._file_descriptors[file_desc.name] = file_desc + + def FindFileByName(self, file_name): + """Gets a FileDescriptor by file name. + + Args: + file_name (str): The path to the file to get a descriptor for. + + Returns: + FileDescriptor: The descriptor for the named file. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + + try: + return self._file_descriptors[file_name] + except KeyError: + pass + + try: + file_proto = self._internal_db.FindFileByName(file_name) + except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileByName(file_name) + else: + raise error + if not file_proto: + raise KeyError('Cannot find a file named %s' % file_name) + return self._ConvertFileProtoToFileDescriptor(file_proto) + + def FindFileContainingSymbol(self, symbol): + """Gets the FileDescriptor for the file containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: Descriptor for the file that contains the specified + symbol. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + + symbol = _NormalizeFullyQualifiedName(symbol) + try: + return self._InternalFindFileContainingSymbol(symbol) + except KeyError: + pass + + try: + # Try fallback database. Build and find again if possible. + self._FindFileContainingSymbolInDb(symbol) + return self._InternalFindFileContainingSymbol(symbol) + except KeyError: + raise KeyError('Cannot find a file containing %s' % symbol) + + def _InternalFindFileContainingSymbol(self, symbol): + """Gets the already built FileDescriptor containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: Descriptor for the file that contains the specified + symbol. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + try: + return self._descriptors[symbol].file + except KeyError: + pass + + try: + return self._enum_descriptors[symbol].file + except KeyError: + pass + + try: + return self._service_descriptors[symbol].file + except KeyError: + pass + + try: + return self._top_enum_values[symbol].type.file + except KeyError: + pass + + try: + return self._toplevel_extensions[symbol].file + except KeyError: + pass + + # Try fields, enum values and nested extensions inside a message. + top_name, _, sub_name = symbol.rpartition('.') + try: + message = self.FindMessageTypeByName(top_name) + assert (sub_name in message.extensions_by_name or + sub_name in message.fields_by_name or + sub_name in message.enum_values_by_name) + return message.file + except (KeyError, AssertionError): + raise KeyError('Cannot find a file containing %s' % symbol) + + def FindMessageTypeByName(self, full_name): + """Loads the named descriptor from the pool. + + Args: + full_name (str): The full name of the descriptor to load. + + Returns: + Descriptor: The descriptor for the named type. + + Raises: + KeyError: if the message cannot be found in the pool. + """ + + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._descriptors[full_name] + + def FindEnumTypeByName(self, full_name): + """Loads the named enum descriptor from the pool. + + Args: + full_name (str): The full name of the enum descriptor to load. + + Returns: + EnumDescriptor: The enum descriptor for the named type. + + Raises: + KeyError: if the enum cannot be found in the pool. + """ + + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._enum_descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._enum_descriptors[full_name] + + def FindFieldByName(self, full_name): + """Loads the named field descriptor from the pool. + + Args: + full_name (str): The full name of the field descriptor to load. + + Returns: + FieldDescriptor: The field descriptor for the named field. + + Raises: + KeyError: if the field cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + message_name, _, field_name = full_name.rpartition('.') + message_descriptor = self.FindMessageTypeByName(message_name) + return message_descriptor.fields_by_name[field_name] + + def FindOneofByName(self, full_name): + """Loads the named oneof descriptor from the pool. + + Args: + full_name (str): The full name of the oneof descriptor to load. + + Returns: + OneofDescriptor: The oneof descriptor for the named oneof. + + Raises: + KeyError: if the oneof cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + message_name, _, oneof_name = full_name.rpartition('.') + message_descriptor = self.FindMessageTypeByName(message_name) + return message_descriptor.oneofs_by_name[oneof_name] + + def FindExtensionByName(self, full_name): + """Loads the named extension descriptor from the pool. + + Args: + full_name (str): The full name of the extension descriptor to load. + + Returns: + FieldDescriptor: The field descriptor for the named extension. + + Raises: + KeyError: if the extension cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + try: + # The proto compiler does not give any link between the FileDescriptor + # and top-level extensions unless the FileDescriptorProto is added to + # the DescriptorDatabase, but this can impact memory usage. + # So we registered these extensions by name explicitly. + return self._toplevel_extensions[full_name] + except KeyError: + pass + message_name, _, extension_name = full_name.rpartition('.') + try: + # Most extensions are nested inside a message. + scope = self.FindMessageTypeByName(message_name) + except KeyError: + # Some extensions are defined at file scope. + scope = self._FindFileContainingSymbolInDb(full_name) + return scope.extensions_by_name[extension_name] + + def FindExtensionByNumber(self, message_descriptor, number): + """Gets the extension of the specified message with the specified number. + + Extensions have to be registered to this pool by calling :func:`Add` or + :func:`AddExtensionDescriptor`. + + Args: + message_descriptor (Descriptor): descriptor of the extended message. + number (int): Number of the extension field. + + Returns: + FieldDescriptor: The descriptor for the extension. + + Raises: + KeyError: when no extension with the given number is known for the + specified message. + """ + try: + return self._extensions_by_number[message_descriptor][number] + except KeyError: + self._TryLoadExtensionFromDB(message_descriptor, number) + return self._extensions_by_number[message_descriptor][number] + + def FindAllExtensions(self, message_descriptor): + """Gets all the known extensions of a given message. + + Extensions have to be registered to this pool by build related + :func:`Add` or :func:`AddExtensionDescriptor`. + + Args: + message_descriptor (Descriptor): Descriptor of the extended message. + + Returns: + list[FieldDescriptor]: Field descriptors describing the extensions. + """ + # Fallback to descriptor db if FindAllExtensionNumbers is provided. + if self._descriptor_db and hasattr( + self._descriptor_db, 'FindAllExtensionNumbers'): + full_name = message_descriptor.full_name + all_numbers = self._descriptor_db.FindAllExtensionNumbers(full_name) + for number in all_numbers: + if number in self._extensions_by_number[message_descriptor]: + continue + self._TryLoadExtensionFromDB(message_descriptor, number) + + return list(self._extensions_by_number[message_descriptor].values()) + + def _TryLoadExtensionFromDB(self, message_descriptor, number): + """Try to Load extensions from descriptor db. + + Args: + message_descriptor: descriptor of the extended message. + number: the extension number that needs to be loaded. + """ + if not self._descriptor_db: + return + # Only supported when FindFileContainingExtension is provided. + if not hasattr( + self._descriptor_db, 'FindFileContainingExtension'): + return + + full_name = message_descriptor.full_name + file_proto = self._descriptor_db.FindFileContainingExtension( + full_name, number) + + if file_proto is None: + return + + try: + self._ConvertFileProtoToFileDescriptor(file_proto) + except: + warn_msg = ('Unable to load proto file %s for extension number %d.' % + (file_proto.name, number)) + warnings.warn(warn_msg, RuntimeWarning) + + def FindServiceByName(self, full_name): + """Loads the named service descriptor from the pool. + + Args: + full_name (str): The full name of the service descriptor to load. + + Returns: + ServiceDescriptor: The service descriptor for the named service. + + Raises: + KeyError: if the service cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._service_descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._service_descriptors[full_name] + + def FindMethodByName(self, full_name): + """Loads the named service method descriptor from the pool. + + Args: + full_name (str): The full name of the method descriptor to load. + + Returns: + MethodDescriptor: The method descriptor for the service method. + + Raises: + KeyError: if the method cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + service_name, _, method_name = full_name.rpartition('.') + service_descriptor = self.FindServiceByName(service_name) + return service_descriptor.methods_by_name[method_name] + + def SetFeatureSetDefaults(self, defaults): + """Sets the default feature mappings used during the build. + + Args: + defaults: a FeatureSetDefaults message containing the new mappings. + """ + if self._edition_defaults is not None: + raise ValueError( + "Feature set defaults can't be changed once the pool has started" + ' building!' + ) + + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + + if not isinstance(defaults, descriptor_pb2.FeatureSetDefaults): + raise TypeError('SetFeatureSetDefaults called with invalid type') + + + if defaults.minimum_edition > defaults.maximum_edition: + raise ValueError( + 'Invalid edition range %s to %s' + % ( + descriptor_pb2.Edition.Name(defaults.minimum_edition), + descriptor_pb2.Edition.Name(defaults.maximum_edition), + ) + ) + + prev_edition = descriptor_pb2.Edition.EDITION_UNKNOWN + for d in defaults.defaults: + if d.edition == descriptor_pb2.Edition.EDITION_UNKNOWN: + raise ValueError('Invalid edition EDITION_UNKNOWN specified') + if prev_edition >= d.edition: + raise ValueError( + 'Feature set defaults are not strictly increasing. %s is greater' + ' than or equal to %s' + % ( + descriptor_pb2.Edition.Name(prev_edition), + descriptor_pb2.Edition.Name(d.edition), + ) + ) + prev_edition = d.edition + self._edition_defaults = defaults + + def _CreateDefaultFeatures(self, edition): + """Creates a FeatureSet message with defaults for a specific edition. + + Args: + edition: the edition to generate defaults for. + + Returns: + A FeatureSet message with defaults for a specific edition. + """ + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + + with _edition_defaults_lock: + if not self._edition_defaults: + self._edition_defaults = descriptor_pb2.FeatureSetDefaults() + self._edition_defaults.ParseFromString( + self._serialized_edition_defaults + ) + + if edition < self._edition_defaults.minimum_edition: + raise TypeError( + 'Edition %s is earlier than the minimum supported edition %s!' + % ( + descriptor_pb2.Edition.Name(edition), + descriptor_pb2.Edition.Name( + self._edition_defaults.minimum_edition + ), + ) + ) + if edition > self._edition_defaults.maximum_edition: + raise TypeError( + 'Edition %s is later than the maximum supported edition %s!' + % ( + descriptor_pb2.Edition.Name(edition), + descriptor_pb2.Edition.Name( + self._edition_defaults.maximum_edition + ), + ) + ) + found = None + for d in self._edition_defaults.defaults: + if d.edition > edition: + break + found = d + if found is None: + raise TypeError( + 'No valid default found for edition %s!' + % descriptor_pb2.Edition.Name(edition) + ) + + defaults = descriptor_pb2.FeatureSet() + defaults.CopyFrom(found.fixed_features) + defaults.MergeFrom(found.overridable_features) + return defaults + + def _InternFeatures(self, features): + serialized = features.SerializeToString() + with _edition_defaults_lock: + cached = self._feature_cache.get(serialized) + if cached is None: + self._feature_cache[serialized] = features + cached = features + return cached + + def _FindFileContainingSymbolInDb(self, symbol): + """Finds the file in descriptor DB containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: The file that contains the specified symbol. + + Raises: + KeyError: if the file cannot be found in the descriptor database. + """ + try: + file_proto = self._internal_db.FindFileContainingSymbol(symbol) + except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) + else: + raise error + if not file_proto: + raise KeyError('Cannot find a file containing %s' % symbol) + return self._ConvertFileProtoToFileDescriptor(file_proto) + + def _ConvertFileProtoToFileDescriptor(self, file_proto): + """Creates a FileDescriptor from a proto or returns a cached copy. + + This method also has the side effect of loading all the symbols found in + the file into the appropriate dictionaries in the pool. + + Args: + file_proto: The proto to convert. + + Returns: + A FileDescriptor matching the passed in proto. + """ + if file_proto.name not in self._file_descriptors: + built_deps = list(self._GetDeps(file_proto.dependency)) + direct_deps = [self.FindFileByName(n) for n in file_proto.dependency] + public_deps = [direct_deps[i] for i in file_proto.public_dependency] + + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + + file_descriptor = descriptor.FileDescriptor( + pool=self, + name=file_proto.name, + package=file_proto.package, + syntax=file_proto.syntax, + edition=descriptor_pb2.Edition.Name(file_proto.edition), + options=_OptionsOrNone(file_proto), + serialized_pb=file_proto.SerializeToString(), + dependencies=direct_deps, + public_dependencies=public_deps, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key, + ) + scope = {} + + # This loop extracts all the message and enum types from all the + # dependencies of the file_proto. This is necessary to create the + # scope of available message types when defining the passed in + # file proto. + for dependency in built_deps: + scope.update(self._ExtractSymbols( + dependency.message_types_by_name.values())) + scope.update((_PrefixWithDot(enum.full_name), enum) + for enum in dependency.enum_types_by_name.values()) + + for message_type in file_proto.message_type: + message_desc = self._ConvertMessageDescriptor( + message_type, file_proto.package, file_descriptor, scope, + file_proto.syntax) + file_descriptor.message_types_by_name[message_desc.name] = ( + message_desc) + + for enum_type in file_proto.enum_type: + file_descriptor.enum_types_by_name[enum_type.name] = ( + self._ConvertEnumDescriptor(enum_type, file_proto.package, + file_descriptor, None, scope, True)) + + for index, extension_proto in enumerate(file_proto.extension): + extension_desc = self._MakeFieldDescriptor( + extension_proto, file_proto.package, index, file_descriptor, + is_extension=True) + extension_desc.containing_type = self._GetTypeFromScope( + file_descriptor.package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, + file_descriptor.package, scope) + file_descriptor.extensions_by_name[extension_desc.name] = ( + extension_desc) + + for desc_proto in file_proto.message_type: + self._SetAllFieldTypes(file_proto.package, desc_proto, scope) + + if file_proto.package: + desc_proto_prefix = _PrefixWithDot(file_proto.package) + else: + desc_proto_prefix = '' + + for desc_proto in file_proto.message_type: + desc = self._GetTypeFromScope( + desc_proto_prefix, desc_proto.name, scope) + file_descriptor.message_types_by_name[desc_proto.name] = desc + + for index, service_proto in enumerate(file_proto.service): + file_descriptor.services_by_name[service_proto.name] = ( + self._MakeServiceDescriptor(service_proto, index, scope, + file_proto.package, file_descriptor)) + + self._file_descriptors[file_proto.name] = file_descriptor + + # Add extensions to the pool + def AddExtensionForNested(message_type): + for nested in message_type.nested_types: + AddExtensionForNested(nested) + for extension in message_type.extensions: + self._AddExtensionDescriptor(extension) + + file_desc = self._file_descriptors[file_proto.name] + for extension in file_desc.extensions_by_name.values(): + self._AddExtensionDescriptor(extension) + for message_type in file_desc.message_types_by_name.values(): + AddExtensionForNested(message_type) + + return file_desc + + def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, + scope=None, syntax=None): + """Adds the proto to the pool in the specified package. + + Args: + desc_proto: The descriptor_pb2.DescriptorProto protobuf message. + package: The package the proto should be located in. + file_desc: The file containing this message. + scope: Dict mapping short and full symbols to message and enum types. + syntax: string indicating syntax of the file ("proto2" or "proto3") + + Returns: + The added descriptor. + """ + + if package: + desc_name = '.'.join((package, desc_proto.name)) + else: + desc_name = desc_proto.name + + if file_desc is None: + file_name = None + else: + file_name = file_desc.name + + if scope is None: + scope = {} + + nested = [ + self._ConvertMessageDescriptor( + nested, desc_name, file_desc, scope, syntax) + for nested in desc_proto.nested_type] + enums = [ + self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, + scope, False) + for enum in desc_proto.enum_type] + fields = [self._MakeFieldDescriptor(field, desc_name, index, file_desc) + for index, field in enumerate(desc_proto.field)] + extensions = [ + self._MakeFieldDescriptor(extension, desc_name, index, file_desc, + is_extension=True) + for index, extension in enumerate(desc_proto.extension)] + oneofs = [ + # pylint: disable=g-complex-comprehension + descriptor.OneofDescriptor( + desc.name, + '.'.join((desc_name, desc.name)), + index, + None, + [], + _OptionsOrNone(desc), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + for index, desc in enumerate(desc_proto.oneof_decl) + ] + extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range] + if extension_ranges: + is_extendable = True + else: + is_extendable = False + desc = descriptor.Descriptor( + name=desc_proto.name, + full_name=desc_name, + filename=file_name, + containing_type=None, + fields=fields, + oneofs=oneofs, + nested_types=nested, + enum_types=enums, + extensions=extensions, + options=_OptionsOrNone(desc_proto), + is_extendable=is_extendable, + extension_ranges=extension_ranges, + file=file_desc, + serialized_start=None, + serialized_end=None, + is_map_entry=desc_proto.options.map_entry, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key, + ) + for nested in desc.nested_types: + nested.containing_type = desc + for enum in desc.enum_types: + enum.containing_type = desc + for field_index, field_desc in enumerate(desc_proto.field): + if field_desc.HasField('oneof_index'): + oneof_index = field_desc.oneof_index + oneofs[oneof_index].fields.append(fields[field_index]) + fields[field_index].containing_oneof = oneofs[oneof_index] + + scope[_PrefixWithDot(desc_name)] = desc + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._descriptors[desc_name] = desc + return desc + + def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None, + containing_type=None, scope=None, top_level=False): + """Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. + + Args: + enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message. + package: Optional package name for the new message EnumDescriptor. + file_desc: The file containing the enum descriptor. + containing_type: The type containing this enum. + scope: Scope containing available types. + top_level: If True, the enum is a top level symbol. If False, the enum + is defined inside a message. + + Returns: + The added descriptor + """ + + if package: + enum_name = '.'.join((package, enum_proto.name)) + else: + enum_name = enum_proto.name + + if file_desc is None: + file_name = None + else: + file_name = file_desc.name + + values = [self._MakeEnumValueDescriptor(value, index) + for index, value in enumerate(enum_proto.value)] + desc = descriptor.EnumDescriptor(name=enum_proto.name, + full_name=enum_name, + filename=file_name, + file=file_desc, + values=values, + containing_type=containing_type, + options=_OptionsOrNone(enum_proto), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + scope['.%s' % enum_name] = desc + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._enum_descriptors[enum_name] = desc + + # Add top level enum values. + if top_level: + for value in values: + full_name = _NormalizeFullyQualifiedName( + '.'.join((package, value.name))) + self._CheckConflictRegister(value, full_name, file_name) + self._top_enum_values[full_name] = value + + return desc + + def _MakeFieldDescriptor(self, field_proto, message_name, index, + file_desc, is_extension=False): + """Creates a field descriptor from a FieldDescriptorProto. + + For message and enum type fields, this method will do a look up + in the pool for the appropriate descriptor for that type. If it + is unavailable, it will fall back to the _source function to + create it. If this type is still unavailable, construction will + fail. + + Args: + field_proto: The proto describing the field. + message_name: The name of the containing message. + index: Index of the field + file_desc: The file containing the field descriptor. + is_extension: Indication that this field is for an extension. + + Returns: + An initialized FieldDescriptor object + """ + + if message_name: + full_name = '.'.join((message_name, field_proto.name)) + else: + full_name = field_proto.name + + if field_proto.json_name: + json_name = field_proto.json_name + else: + json_name = None + + return descriptor.FieldDescriptor( + name=field_proto.name, + full_name=full_name, + index=index, + number=field_proto.number, + type=field_proto.type, + cpp_type=None, + message_type=None, + enum_type=None, + containing_type=None, + label=field_proto.label, + has_default_value=False, + default_value=None, + is_extension=is_extension, + extension_scope=None, + options=_OptionsOrNone(field_proto), + json_name=json_name, + file=file_desc, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _SetAllFieldTypes(self, package, desc_proto, scope): + """Sets all the descriptor's fields's types. + + This method also sets the containing types on any extensions. + + Args: + package: The current package of desc_proto. + desc_proto: The message descriptor to update. + scope: Enclosing scope of available types. + """ + + package = _PrefixWithDot(package) + + main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) + + if package == '.': + nested_package = _PrefixWithDot(desc_proto.name) + else: + nested_package = '.'.join([package, desc_proto.name]) + + for field_proto, field_desc in zip(desc_proto.field, main_desc.fields): + self._SetFieldType(field_proto, field_desc, nested_package, scope) + + for extension_proto, extension_desc in ( + zip(desc_proto.extension, main_desc.extensions)): + extension_desc.containing_type = self._GetTypeFromScope( + nested_package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, nested_package, scope) + + for nested_type in desc_proto.nested_type: + self._SetAllFieldTypes(nested_package, nested_type, scope) + + def _SetFieldType(self, field_proto, field_desc, package, scope): + """Sets the field's type, cpp_type, message_type and enum_type. + + Args: + field_proto: Data about the field in proto format. + field_desc: The descriptor to modify. + package: The package the field's container is in. + scope: Enclosing scope of available types. + """ + if field_proto.type_name: + desc = self._GetTypeFromScope(package, field_proto.type_name, scope) + else: + desc = None + + if not field_proto.HasField('type'): + if isinstance(desc, descriptor.Descriptor): + field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE + else: + field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM + + field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType( + field_proto.type) + + if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE + or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP): + field_desc.message_type = desc + + if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.enum_type = desc + + if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED: + field_desc.has_default_value = False + field_desc.default_value = [] + elif field_proto.HasField('default_value'): + field_desc.has_default_value = True + if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = float(field_proto.default_value) + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: + field_desc.default_value = field_proto.default_value + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = field_proto.default_value.lower() == 'true' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.default_value = field_desc.enum_type.values_by_name[ + field_proto.default_value].number + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: + field_desc.default_value = text_encoding.CUnescape( + field_proto.default_value) + elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: + field_desc.default_value = None + else: + # All other types are of the "int" type. + field_desc.default_value = int(field_proto.default_value) + else: + field_desc.has_default_value = False + if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = 0.0 + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: + field_desc.default_value = u'' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = False + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.default_value = field_desc.enum_type.values[0].number + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: + field_desc.default_value = b'' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: + field_desc.default_value = None + elif field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP: + field_desc.default_value = None + else: + # All other types are of the "int" type. + field_desc.default_value = 0 + + field_desc.type = field_proto.type + + def _MakeEnumValueDescriptor(self, value_proto, index): + """Creates a enum value descriptor object from a enum value proto. + + Args: + value_proto: The proto describing the enum value. + index: The index of the enum value. + + Returns: + An initialized EnumValueDescriptor object. + """ + + return descriptor.EnumValueDescriptor( + name=value_proto.name, + index=index, + number=value_proto.number, + options=_OptionsOrNone(value_proto), + type=None, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _MakeServiceDescriptor(self, service_proto, service_index, scope, + package, file_desc): + """Make a protobuf ServiceDescriptor given a ServiceDescriptorProto. + + Args: + service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message. + service_index: The index of the service in the File. + scope: Dict mapping short and full symbols to message and enum types. + package: Optional package name for the new message EnumDescriptor. + file_desc: The file containing the service descriptor. + + Returns: + The added descriptor. + """ + + if package: + service_name = '.'.join((package, service_proto.name)) + else: + service_name = service_proto.name + + methods = [self._MakeMethodDescriptor(method_proto, service_name, package, + scope, index) + for index, method_proto in enumerate(service_proto.method)] + desc = descriptor.ServiceDescriptor( + name=service_proto.name, + full_name=service_name, + index=service_index, + methods=methods, + options=_OptionsOrNone(service_proto), + file=file_desc, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._service_descriptors[service_name] = desc + return desc + + def _MakeMethodDescriptor(self, method_proto, service_name, package, scope, + index): + """Creates a method descriptor from a MethodDescriptorProto. + + Args: + method_proto: The proto describing the method. + service_name: The name of the containing service. + package: Optional package name to look up for types. + scope: Scope containing available types. + index: Index of the method in the service. + + Returns: + An initialized MethodDescriptor object. + """ + full_name = '.'.join((service_name, method_proto.name)) + input_type = self._GetTypeFromScope( + package, method_proto.input_type, scope) + output_type = self._GetTypeFromScope( + package, method_proto.output_type, scope) + return descriptor.MethodDescriptor( + name=method_proto.name, + full_name=full_name, + index=index, + containing_service=None, + input_type=input_type, + output_type=output_type, + client_streaming=method_proto.client_streaming, + server_streaming=method_proto.server_streaming, + options=_OptionsOrNone(method_proto), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _ExtractSymbols(self, descriptors): + """Pulls out all the symbols from descriptor protos. + + Args: + descriptors: The messages to extract descriptors from. + Yields: + A two element tuple of the type name and descriptor object. + """ + + for desc in descriptors: + yield (_PrefixWithDot(desc.full_name), desc) + for symbol in self._ExtractSymbols(desc.nested_types): + yield symbol + for enum in desc.enum_types: + yield (_PrefixWithDot(enum.full_name), enum) + + def _GetDeps(self, dependencies, visited=None): + """Recursively finds dependencies for file protos. + + Args: + dependencies: The names of the files being depended on. + visited: The names of files already found. + + Yields: + Each direct and indirect dependency. + """ + + visited = visited or set() + for dependency in dependencies: + if dependency not in visited: + visited.add(dependency) + dep_desc = self.FindFileByName(dependency) + yield dep_desc + public_files = [d.name for d in dep_desc.public_dependencies] + yield from self._GetDeps(public_files, visited) + + def _GetTypeFromScope(self, package, type_name, scope): + """Finds a given type name in the current scope. + + Args: + package: The package the proto should be located in. + type_name: The name of the type to be found in the scope. + scope: Dict mapping short and full symbols to message and enum types. + + Returns: + The descriptor for the requested type. + """ + if type_name not in scope: + components = _PrefixWithDot(package).split('.') + while components: + possible_match = '.'.join(components + [type_name]) + if possible_match in scope: + type_name = possible_match + break + else: + components.pop(-1) + return scope[type_name] + + +def _PrefixWithDot(name): + return name if name.startswith('.') else '.%s' % name + + +if _USE_C_DESCRIPTORS: + # TODO: This pool could be constructed from Python code, when we + # support a flag like 'use_cpp_generated_pool=True'. + # pylint: disable=protected-access + _DEFAULT = descriptor._message.default_pool +else: + _DEFAULT = DescriptorPool() + + +def Default(): + return _DEFAULT diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/duration.py b/.venv/lib/python3.12/site-packages/google/protobuf/duration.py new file mode 100644 index 00000000..21f17bf7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/duration.py @@ -0,0 +1,100 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains the Duration helper APIs.""" + +import datetime + +from google.protobuf.duration_pb2 import Duration + + +def from_json_string(value: str) -> Duration: + """Converts a string to Duration. + + Args: + value: A string to be converted. The string must end with 's'. Any + fractional digits (or none) are accepted as long as they fit into + precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s" + + Raises: + ValueError: On parsing problems. + """ + duration = Duration() + duration.FromJsonString(value) + return duration + + +def from_microseconds(micros: float) -> Duration: + """Converts microseconds to Duration.""" + duration = Duration() + duration.FromMicroseconds(micros) + return duration + + +def from_milliseconds(millis: float) -> Duration: + """Converts milliseconds to Duration.""" + duration = Duration() + duration.FromMilliseconds(millis) + return duration + + +def from_nanoseconds(nanos: float) -> Duration: + """Converts nanoseconds to Duration.""" + duration = Duration() + duration.FromNanoseconds(nanos) + return duration + + +def from_seconds(seconds: float) -> Duration: + """Converts seconds to Duration.""" + duration = Duration() + duration.FromSeconds(seconds) + return duration + + +def from_timedelta(td: datetime.timedelta) -> Duration: + """Converts timedelta to Duration.""" + duration = Duration() + duration.FromTimedelta(td) + return duration + + +def to_json_string(duration: Duration) -> str: + """Converts Duration to string format. + + Returns: + A string converted from self. The string format will contains + 3, 6, or 9 fractional digits depending on the precision required to + represent the exact Duration value. For example: "1s", "1.010s", + "1.000000100s", "-3.100s" + """ + return duration.ToJsonString() + + +def to_microseconds(duration: Duration) -> int: + """Converts a Duration to microseconds.""" + return duration.ToMicroseconds() + + +def to_milliseconds(duration: Duration) -> int: + """Converts a Duration to milliseconds.""" + return duration.ToMilliseconds() + + +def to_nanoseconds(duration: Duration) -> int: + """Converts a Duration to nanoseconds.""" + return duration.ToNanoseconds() + + +def to_seconds(duration: Duration) -> int: + """Converts a Duration to seconds.""" + return duration.ToSeconds() + + +def to_timedelta(duration: Duration) -> datetime.timedelta: + """Converts Duration to timedelta.""" + return duration.ToTimedelta() diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/duration_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/duration_pb2.py new file mode 100644 index 00000000..e767a14c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/duration_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/duration.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/duration.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n\x08\x44uration\x12\x18\n\x07seconds\x18\x01 \x01(\x03R\x07seconds\x12\x14\n\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n\x13\x63om.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.duration_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\rDurationProtoP\001Z1google.golang.org/protobuf/types/known/durationpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _globals['_DURATION']._serialized_start=51 + _globals['_DURATION']._serialized_end=109 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/empty_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/empty_pb2.py new file mode 100644 index 00000000..41626d54 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/empty_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/empty.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/empty.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05\x45mptyB}\n\x13\x63om.google.protobufB\nEmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.empty_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\nEmptyProtoP\001Z.google.golang.org/protobuf/types/known/emptypb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _globals['_EMPTY']._serialized_start=48 + _globals['_EMPTY']._serialized_end=55 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/field_mask_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/field_mask_pb2.py new file mode 100644 index 00000000..122ba518 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/field_mask_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/field_mask.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/field_mask.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"!\n\tFieldMask\x12\x14\n\x05paths\x18\x01 \x03(\tR\x05pathsB\x85\x01\n\x13\x63om.google.protobufB\x0e\x46ieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.field_mask_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\016FieldMaskProtoP\001Z2google.golang.org/protobuf/types/known/fieldmaskpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _globals['_FIELDMASK']._serialized_start=53 + _globals['_FIELDMASK']._serialized_end=86 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/__init__.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/__init__.py new file mode 100644 index 00000000..e676e288 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/__init__.py @@ -0,0 +1,7 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/_parameterized.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/_parameterized.py new file mode 100644 index 00000000..4cb2cb1f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/_parameterized.py @@ -0,0 +1,420 @@ +#! /usr/bin/env python +# +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Adds support for parameterized tests to Python's unittest TestCase class. + +A parameterized test is a method in a test case that is invoked with different +argument tuples. + +A simple example: + + class AdditionExample(_parameterized.TestCase): + @_parameterized.parameters( + (1, 2, 3), + (4, 5, 9), + (1, 1, 3)) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Each invocation is a separate test case and properly isolated just +like a normal test method, with its own setUp/tearDown cycle. In the +example above, there are three separate testcases, one of which will +fail due to an assertion error (1 + 1 != 3). + +Parameters for individual test cases can be tuples (with positional parameters) +or dictionaries (with named parameters): + + class AdditionExample(_parameterized.TestCase): + @_parameterized.parameters( + {'op1': 1, 'op2': 2, 'result': 3}, + {'op1': 4, 'op2': 5, 'result': 9}, + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + +If a parameterized test fails, the error message will show the +original test name (which is modified internally) and the arguments +for the specific invocation, which are part of the string returned by +the shortDescription() method on test cases. + +The id method of the test, used internally by the unittest framework, +is also modified to show the arguments. To make sure that test names +stay the same across several invocations, object representations like + + >>> class Foo(object): + ... pass + >>> repr(Foo()) + '<__main__.Foo object at 0x23d8610>' + +are turned into '<__main__.Foo>'. For even more descriptive names, +especially in test logs, you can use the named_parameters decorator. In +this case, only tuples are supported, and the first parameters has to +be a string (or an object that returns an apt name when converted via +str()): + + class NamedExample(_parameterized.TestCase): + @_parameterized.named_parameters( + ('Normal', 'aa', 'aaa', True), + ('EmptyPrefix', '', 'abc', True), + ('BothEmpty', '', '', True)) + def testStartsWith(self, prefix, string, result): + self.assertEqual(result, strings.startswith(prefix)) + +Named tests also have the benefit that they can be run individually +from the command line: + + $ testmodule.py NamedExample.testStartsWithNormal + . + -------------------------------------------------------------------- + Ran 1 test in 0.000s + + OK + +Parameterized Classes +===================== +If invocation arguments are shared across test methods in a single +TestCase class, instead of decorating all test methods +individually, the class itself can be decorated: + + @_parameterized.parameters( + (1, 2, 3) + (4, 5, 9)) + class ArithmeticTest(_parameterized.TestCase): + def testAdd(self, arg1, arg2, result): + self.assertEqual(arg1 + arg2, result) + + def testSubtract(self, arg2, arg2, result): + self.assertEqual(result - arg1, arg2) + +Inputs from Iterables +===================== +If parameters should be shared across several test cases, or are dynamically +created from other sources, a single non-tuple iterable can be passed into +the decorator. This iterable will be used to obtain the test cases: + + class AdditionExample(_parameterized.TestCase): + @_parameterized.parameters( + c.op1, c.op2, c.result for c in testcases + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Single-Argument Test Methods +============================ +If a test method takes only one argument, the single argument does not need to +be wrapped into a tuple: + + class NegativeNumberExample(_parameterized.TestCase): + @_parameterized.parameters( + -1, -3, -4, -5 + ) + def testIsNegative(self, arg): + self.assertTrue(IsNegative(arg)) +""" + +__author__ = 'tmarek@google.com (Torsten Marek)' + +import functools +import re +import types +import unittest +import uuid + +try: + # Since python 3 + import collections.abc as collections_abc +except ImportError: + # Won't work after python 3.8 + import collections as collections_abc + +ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>') +_SEPARATOR = uuid.uuid1().hex +_FIRST_ARG = object() +_ARGUMENT_REPR = object() + + +def _CleanRepr(obj): + return ADDR_RE.sub(r'<\1>', repr(obj)) + + +# Helper function formerly from the unittest module, removed from it in +# Python 2.7. +def _StrClass(cls): + return '%s.%s' % (cls.__module__, cls.__name__) + + +def _NonStringIterable(obj): + return (isinstance(obj, collections_abc.Iterable) and + not isinstance(obj, str)) + + +def _FormatParameterList(testcase_params): + if isinstance(testcase_params, collections_abc.Mapping): + return ', '.join('%s=%s' % (argname, _CleanRepr(value)) + for argname, value in testcase_params.items()) + elif _NonStringIterable(testcase_params): + return ', '.join(map(_CleanRepr, testcase_params)) + else: + return _FormatParameterList((testcase_params,)) + + +class _ParameterizedTestIter(object): + """Callable and iterable class for producing new test cases.""" + + def __init__(self, test_method, testcases, naming_type): + """Returns concrete test functions for a test and a list of parameters. + + The naming_type is used to determine the name of the concrete + functions as reported by the unittest framework. If naming_type is + _FIRST_ARG, the testcases must be tuples, and the first element must + have a string representation that is a valid Python identifier. + + Args: + test_method: The decorated test method. + testcases: (list of tuple/dict) A list of parameter + tuples/dicts for individual test invocations. + naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR. + """ + self._test_method = test_method + self.testcases = testcases + self._naming_type = naming_type + + def __call__(self, *args, **kwargs): + raise RuntimeError('You appear to be running a parameterized test case ' + 'without having inherited from parameterized.' + 'TestCase. This is bad because none of ' + 'your test cases are actually being run.') + + def __iter__(self): + test_method = self._test_method + naming_type = self._naming_type + + def MakeBoundParamTest(testcase_params): + @functools.wraps(test_method) + def BoundParamTest(self): + if isinstance(testcase_params, collections_abc.Mapping): + test_method(self, **testcase_params) + elif _NonStringIterable(testcase_params): + test_method(self, *testcase_params) + else: + test_method(self, testcase_params) + + if naming_type is _FIRST_ARG: + # Signal the metaclass that the name of the test function is unique + # and descriptive. + BoundParamTest.__x_use_name__ = True + BoundParamTest.__name__ += str(testcase_params[0]) + testcase_params = testcase_params[1:] + elif naming_type is _ARGUMENT_REPR: + # __x_extra_id__ is used to pass naming information to the __new__ + # method of TestGeneratorMetaclass. + # The metaclass will make sure to create a unique, but nondescriptive + # name for this test. + BoundParamTest.__x_extra_id__ = '(%s)' % ( + _FormatParameterList(testcase_params),) + else: + raise RuntimeError('%s is not a valid naming type.' % (naming_type,)) + + BoundParamTest.__doc__ = '%s(%s)' % ( + BoundParamTest.__name__, _FormatParameterList(testcase_params)) + if test_method.__doc__: + BoundParamTest.__doc__ += '\n%s' % (test_method.__doc__,) + return BoundParamTest + return (MakeBoundParamTest(c) for c in self.testcases) + + +def _IsSingletonList(testcases): + """True iff testcases contains only a single non-tuple element.""" + return len(testcases) == 1 and not isinstance(testcases[0], tuple) + + +def _ModifyClass(class_object, testcases, naming_type): + assert not getattr(class_object, '_id_suffix', None), ( + 'Cannot add parameters to %s,' + ' which already has parameterized methods.' % (class_object,)) + class_object._id_suffix = id_suffix = {} + # We change the size of __dict__ while we iterate over it, + # which Python 3.x will complain about, so use copy(). + for name, obj in class_object.__dict__.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) + and isinstance(obj, types.FunctionType)): + delattr(class_object, name) + methods = {} + _UpdateClassDictForParamTestCase( + methods, id_suffix, name, + _ParameterizedTestIter(obj, testcases, naming_type)) + for name, meth in methods.items(): + setattr(class_object, name, meth) + + +def _ParameterDecorator(naming_type, testcases): + """Implementation of the parameterization decorators. + + Args: + naming_type: The naming type. + testcases: Testcase parameters. + + Returns: + A function for modifying the decorated object. + """ + def _Apply(obj): + if isinstance(obj, type): + _ModifyClass( + obj, + list(testcases) if not isinstance(testcases, collections_abc.Sequence) + else testcases, + naming_type) + return obj + else: + return _ParameterizedTestIter(obj, testcases, naming_type) + + if _IsSingletonList(testcases): + assert _NonStringIterable(testcases[0]), ( + 'Single parameter argument must be a non-string iterable') + testcases = testcases[0] + + return _Apply + + +def parameters(*testcases): # pylint: disable=invalid-name + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples/dicts/objects (for tests + with only one argument). + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _ParameterDecorator(_ARGUMENT_REPR, testcases) + + +def named_parameters(*testcases): # pylint: disable=invalid-name + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. The first element of + each parameter tuple should be a string and will be appended to the + name of the test method. + + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _ParameterDecorator(_FIRST_ARG, testcases) + + +class TestGeneratorMetaclass(type): + """Metaclass for test cases with test generators. + + A test generator is an iterable in a testcase that produces callables. These + callables must be single-argument methods. These methods are injected into + the class namespace and the original iterable is removed. If the name of the + iterable conforms to the test pattern, the injected methods will be picked + up as tests by the unittest framework. + + In general, it is supposed to be used in conjunction with the + parameters decorator. + """ + + def __new__(mcs, class_name, bases, dct): + dct['_id_suffix'] = id_suffix = {} + for name, obj in dct.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) and + _NonStringIterable(obj)): + iterator = iter(obj) + dct.pop(name) + _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator) + + return type.__new__(mcs, class_name, bases, dct) + + +def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator): + """Adds individual test cases to a dictionary. + + Args: + dct: The target dictionary. + id_suffix: The dictionary for mapping names to test IDs. + name: The original name of the test case. + iterator: The iterator generating the individual test cases. + """ + for idx, func in enumerate(iterator): + assert callable(func), 'Test generators must yield callables, got %r' % ( + func,) + if getattr(func, '__x_use_name__', False): + new_name = func.__name__ + else: + new_name = '%s%s%d' % (name, _SEPARATOR, idx) + assert new_name not in dct, ( + 'Name of parameterized test case "%s" not unique' % (new_name,)) + dct[new_name] = func + id_suffix[new_name] = getattr(func, '__x_extra_id__', '') + + +class TestCase(unittest.TestCase, metaclass=TestGeneratorMetaclass): + """Base class for test cases using the parameters decorator.""" + + def _OriginalName(self): + return self._testMethodName.split(_SEPARATOR)[0] + + def __str__(self): + return '%s (%s)' % (self._OriginalName(), _StrClass(self.__class__)) + + def id(self): # pylint: disable=invalid-name + """Returns the descriptive ID of the test. + + This is used internally by the unittesting framework to get a name + for the test to be used in reports. + + Returns: + The test id. + """ + return '%s.%s%s' % (_StrClass(self.__class__), + self._OriginalName(), + self._id_suffix.get(self._testMethodName, '')) + + +def CoopTestCase(other_base_class): + """Returns a new base class with a cooperative metaclass base. + + This enables the TestCase to be used in combination + with other base classes that have custom metaclasses, such as + mox.MoxTestBase. + + Only works with metaclasses that do not override type.__new__. + + Example: + + import google3 + import mox + + from google.protobuf.internal import _parameterized + + class ExampleTest(parameterized.CoopTestCase(mox.MoxTestBase)): + ... + + Args: + other_base_class: (class) A test case base class. + + Returns: + A new class object. + """ + metaclass = type( + 'CoopMetaclass', + (other_base_class.__metaclass__, + TestGeneratorMetaclass), {}) + return metaclass( + 'CoopTestCase', + (other_base_class, TestCase), {}) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/api_implementation.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/api_implementation.py new file mode 100644 index 00000000..b40446b4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/api_implementation.py @@ -0,0 +1,142 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Determine which implementation of the protobuf API is used in this process. +""" + +import importlib +import os +import sys +import warnings + +_GOOGLE3_PYTHON_UPB_DEFAULT = True + + +def _ApiVersionToImplementationType(api_version): + if api_version == 2: + return 'cpp' + if api_version == 1: + raise ValueError('api_version=1 is no longer supported.') + if api_version == 0: + return 'python' + return None + + +_implementation_type = None +try: + # pylint: disable=g-import-not-at-top + from google.protobuf.internal import _api_implementation + # The compile-time constants in the _api_implementation module can be used to + # switch to a certain implementation of the Python API at build time. + _implementation_type = _ApiVersionToImplementationType( + _api_implementation.api_version) +except ImportError: + pass # Unspecified by compiler flags. + + +def _CanImport(mod_name): + try: + mod = importlib.import_module(mod_name) + # Work around a known issue in the classic bootstrap .par import hook. + if not mod: + raise ImportError(mod_name + ' import succeeded but was None') + return True + except ImportError: + return False + + +if _implementation_type is None: + if _CanImport('google._upb._message'): + _implementation_type = 'upb' + elif _CanImport('google.protobuf.pyext._message'): + _implementation_type = 'cpp' + else: + _implementation_type = 'python' + + +# This environment variable can be used to switch to a certain implementation +# of the Python API, overriding the compile-time constants in the +# _api_implementation module. Right now only 'python', 'cpp' and 'upb' are +# valid values. Any other value will raise error. +_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', + _implementation_type) + +if _implementation_type not in ('python', 'cpp', 'upb'): + raise ValueError('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION {0} is not ' + 'supported. Please set to \'python\', \'cpp\' or ' + '\'upb\'.'.format(_implementation_type)) + +if 'PyPy' in sys.version and _implementation_type == 'cpp': + warnings.warn('PyPy does not work yet with cpp protocol buffers. ' + 'Falling back to the python implementation.') + _implementation_type = 'python' + +_c_module = None + +if _implementation_type == 'cpp': + try: + # pylint: disable=g-import-not-at-top + from google.protobuf.pyext import _message + sys.modules['google3.net.proto2.python.internal.cpp._message'] = _message + _c_module = _message + del _message + except ImportError: + # TODO: fail back to python + warnings.warn( + 'Selected implementation cpp is not available.') + pass + +if _implementation_type == 'upb': + try: + # pylint: disable=g-import-not-at-top + from google._upb import _message + _c_module = _message + del _message + except ImportError: + warnings.warn('Selected implementation upb is not available. ' + 'Falling back to the python implementation.') + _implementation_type = 'python' + pass + +# Detect if serialization should be deterministic by default +try: + # The presence of this module in a build allows the proto implementation to + # be upgraded merely via build deps. + # + # NOTE: Merely importing this automatically enables deterministic proto + # serialization for C++ code, but we still need to export it as a boolean so + # that we can do the same for `_implementation_type == 'python'`. + # + # NOTE2: It is possible for C++ code to enable deterministic serialization by + # default _without_ affecting Python code, if the C++ implementation is not in + # use by this module. That is intended behavior, so we don't actually expose + # this boolean outside of this module. + # + # pylint: disable=g-import-not-at-top,unused-import + from google.protobuf import enable_deterministic_proto_serialization + _python_deterministic_proto_serialization = True +except ImportError: + _python_deterministic_proto_serialization = False + + +# Usage of this function is discouraged. Clients shouldn't care which +# implementation of the API is in use. Note that there is no guarantee +# that differences between APIs will be maintained. +# Please don't use this function if possible. +def Type(): + return _implementation_type + + +# See comment on 'Type' above. +# TODO: Remove the API, it returns a constant. b/228102101 +def Version(): + return 2 + + +# For internal use only +def IsPythonDefaultSerializationDeterministic(): + return _python_deterministic_proto_serialization diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/builder.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/builder.py new file mode 100644 index 00000000..4c0f2873 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/builder.py @@ -0,0 +1,117 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Builds descriptors, message classes and services for generated _pb2.py. + +This file is only called in python generated _pb2.py files. It builds +descriptors, message classes and services that users can directly use +in generated code. +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + +from google.protobuf.internal import enum_type_wrapper +from google.protobuf.internal import python_message +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +_sym_db = _symbol_database.Default() + + +def BuildMessageAndEnumDescriptors(file_des, module): + """Builds message and enum descriptors. + + Args: + file_des: FileDescriptor of the .proto file + module: Generated _pb2 module + """ + + def BuildNestedDescriptors(msg_des, prefix): + for (name, nested_msg) in msg_des.nested_types_by_name.items(): + module_name = prefix + name.upper() + module[module_name] = nested_msg + BuildNestedDescriptors(nested_msg, module_name + '_') + for enum_des in msg_des.enum_types: + module[prefix + enum_des.name.upper()] = enum_des + + for (name, msg_des) in file_des.message_types_by_name.items(): + module_name = '_' + name.upper() + module[module_name] = msg_des + BuildNestedDescriptors(msg_des, module_name + '_') + + +def BuildTopDescriptorsAndMessages(file_des, module_name, module): + """Builds top level descriptors and message classes. + + Args: + file_des: FileDescriptor of the .proto file + module_name: str, the name of generated _pb2 module + module: Generated _pb2 module + """ + + def BuildMessage(msg_des): + create_dict = {} + for (name, nested_msg) in msg_des.nested_types_by_name.items(): + create_dict[name] = BuildMessage(nested_msg) + create_dict['DESCRIPTOR'] = msg_des + create_dict['__module__'] = module_name + message_class = _reflection.GeneratedProtocolMessageType( + msg_des.name, (_message.Message,), create_dict) + _sym_db.RegisterMessage(message_class) + return message_class + + # top level enums + for (name, enum_des) in file_des.enum_types_by_name.items(): + module['_' + name.upper()] = enum_des + module[name] = enum_type_wrapper.EnumTypeWrapper(enum_des) + for enum_value in enum_des.values: + module[enum_value.name] = enum_value.number + + # top level extensions + for (name, extension_des) in file_des.extensions_by_name.items(): + module[name.upper() + '_FIELD_NUMBER'] = extension_des.number + module[name] = extension_des + + # services + for (name, service) in file_des.services_by_name.items(): + module['_' + name.upper()] = service + + # Build messages. + for (name, msg_des) in file_des.message_types_by_name.items(): + module[name] = BuildMessage(msg_des) + + +def AddHelpersToExtensions(file_des): + """no-op to keep old generated code work with new runtime. + + Args: + file_des: FileDescriptor of the .proto file + """ + # TODO: Remove this on-op + return + + +def BuildServices(file_des, module_name, module): + """Builds services classes and services stub class. + + Args: + file_des: FileDescriptor of the .proto file + module_name: str, the name of generated _pb2 module + module: Generated _pb2 module + """ + # pylint: disable=g-import-not-at-top + from google.protobuf import service_reflection + # pylint: enable=g-import-not-at-top + for (name, service) in file_des.services_by_name.items(): + module[name] = service_reflection.GeneratedServiceType( + name, (), + dict(DESCRIPTOR=service, __module__=module_name)) + stub_name = name + '_Stub' + module[stub_name] = service_reflection.GeneratedServiceStubType( + stub_name, (module[name],), + dict(DESCRIPTOR=service, __module__=module_name)) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/containers.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/containers.py new file mode 100644 index 00000000..23357816 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/containers.py @@ -0,0 +1,677 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains container classes to represent different protocol buffer types. + +This file defines container classes which represent categories of protocol +buffer field types which need extra maintenance. Currently these categories +are: + +- Repeated scalar fields - These are all repeated fields which aren't + composite (e.g. they are of simple types like int32, string, etc). +- Repeated composite fields - Repeated fields which are composite. This + includes groups and nested messages. +""" + +import collections.abc +import copy +import pickle +from typing import ( + Any, + Iterable, + Iterator, + List, + MutableMapping, + MutableSequence, + NoReturn, + Optional, + Sequence, + TypeVar, + Union, + overload, +) + + +_T = TypeVar('_T') +_K = TypeVar('_K') +_V = TypeVar('_V') + + +class BaseContainer(Sequence[_T]): + """Base container class.""" + + # Minimizes memory usage and disallows assignment to other attributes. + __slots__ = ['_message_listener', '_values'] + + def __init__(self, message_listener: Any) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The RepeatedScalarFieldContainer will call this object's + Modified() method when it is modified. + """ + self._message_listener = message_listener + self._values = [] + + @overload + def __getitem__(self, key: int) -> _T: + ... + + @overload + def __getitem__(self, key: slice) -> List[_T]: + ... + + def __getitem__(self, key): + """Retrieves item by the specified key.""" + return self._values[key] + + def __len__(self) -> int: + """Returns the number of elements in the container.""" + return len(self._values) + + def __ne__(self, other: Any) -> bool: + """Checks if another instance isn't equal to this one.""" + # The concrete classes should define __eq__. + return not self == other + + __hash__ = None + + def __repr__(self) -> str: + return repr(self._values) + + def sort(self, *args, **kwargs) -> None: + # Continue to support the old sort_function keyword argument. + # This is expected to be a rare occurrence, so use LBYL to avoid + # the overhead of actually catching KeyError. + if 'sort_function' in kwargs: + kwargs['cmp'] = kwargs.pop('sort_function') + self._values.sort(*args, **kwargs) + + def reverse(self) -> None: + self._values.reverse() + + +# TODO: Remove this. BaseContainer does *not* conform to +# MutableSequence, only its subclasses do. +collections.abc.MutableSequence.register(BaseContainer) + + +class RepeatedScalarFieldContainer(BaseContainer[_T], MutableSequence[_T]): + """Simple, type-checked, list-like container for holding repeated scalars.""" + + # Disallows assignment to other attributes. + __slots__ = ['_type_checker'] + + def __init__( + self, + message_listener: Any, + type_checker: Any, + ) -> None: + """Args: + + message_listener: A MessageListener implementation. The + RepeatedScalarFieldContainer will call this object's Modified() method + when it is modified. + type_checker: A type_checkers.ValueChecker instance to run on elements + inserted into this container. + """ + super().__init__(message_listener) + self._type_checker = type_checker + + def append(self, value: _T) -> None: + """Appends an item to the list. Similar to list.append().""" + self._values.append(self._type_checker.CheckValue(value)) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def insert(self, key: int, value: _T) -> None: + """Inserts the item at the specified position. Similar to list.insert().""" + self._values.insert(key, self._type_checker.CheckValue(value)) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def extend(self, elem_seq: Iterable[_T]) -> None: + """Extends by appending the given iterable. Similar to list.extend().""" + elem_seq_iter = iter(elem_seq) + new_values = [self._type_checker.CheckValue(elem) for elem in elem_seq_iter] + if new_values: + self._values.extend(new_values) + self._message_listener.Modified() + + def MergeFrom( + self, + other: Union['RepeatedScalarFieldContainer[_T]', Iterable[_T]], + ) -> None: + """Appends the contents of another repeated field of the same type to this + one. We do not check the types of the individual fields. + """ + self._values.extend(other) + self._message_listener.Modified() + + def remove(self, elem: _T): + """Removes an item from the list. Similar to list.remove().""" + self._values.remove(elem) + self._message_listener.Modified() + + def pop(self, key: Optional[int] = -1) -> _T: + """Removes and returns an item at a given index. Similar to list.pop().""" + value = self._values[key] + self.__delitem__(key) + return value + + @overload + def __setitem__(self, key: int, value: _T) -> None: + ... + + @overload + def __setitem__(self, key: slice, value: Iterable[_T]) -> None: + ... + + def __setitem__(self, key, value) -> None: + """Sets the item on the specified position.""" + if isinstance(key, slice): + if key.step is not None: + raise ValueError('Extended slices not supported') + self._values[key] = map(self._type_checker.CheckValue, value) + self._message_listener.Modified() + else: + self._values[key] = self._type_checker.CheckValue(value) + self._message_listener.Modified() + + def __delitem__(self, key: Union[int, slice]) -> None: + """Deletes the item at the specified position.""" + del self._values[key] + self._message_listener.Modified() + + def __eq__(self, other: Any) -> bool: + """Compares the current instance with another one.""" + if self is other: + return True + # Special case for the same type which should be common and fast. + if isinstance(other, self.__class__): + return other._values == self._values + # We are presumably comparing against some other sequence type. + return other == self._values + + def __deepcopy__( + self, + unused_memo: Any = None, + ) -> 'RepeatedScalarFieldContainer[_T]': + clone = RepeatedScalarFieldContainer( + copy.deepcopy(self._message_listener), self._type_checker) + clone.MergeFrom(self) + return clone + + def __reduce__(self, **kwargs) -> NoReturn: + raise pickle.PickleError( + "Can't pickle repeated scalar fields, convert to list first") + + +# TODO: Constrain T to be a subtype of Message. +class RepeatedCompositeFieldContainer(BaseContainer[_T], MutableSequence[_T]): + """Simple, list-like container for holding repeated composite fields.""" + + # Disallows assignment to other attributes. + __slots__ = ['_message_descriptor'] + + def __init__(self, message_listener: Any, message_descriptor: Any) -> None: + """ + Note that we pass in a descriptor instead of the generated directly, + since at the time we construct a _RepeatedCompositeFieldContainer we + haven't yet necessarily initialized the type that will be contained in the + container. + + Args: + message_listener: A MessageListener implementation. + The RepeatedCompositeFieldContainer will call this object's + Modified() method when it is modified. + message_descriptor: A Descriptor instance describing the protocol type + that should be present in this container. We'll use the + _concrete_class field of this descriptor when the client calls add(). + """ + super().__init__(message_listener) + self._message_descriptor = message_descriptor + + def add(self, **kwargs: Any) -> _T: + """Adds a new element at the end of the list and returns it. Keyword + arguments may be used to initialize the element. + """ + new_element = self._message_descriptor._concrete_class(**kwargs) + new_element._SetListener(self._message_listener) + self._values.append(new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + return new_element + + def append(self, value: _T) -> None: + """Appends one element by copying the message.""" + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + new_element.CopyFrom(value) + self._values.append(new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def insert(self, key: int, value: _T) -> None: + """Inserts the item at the specified position by copying.""" + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + new_element.CopyFrom(value) + self._values.insert(key, new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def extend(self, elem_seq: Iterable[_T]) -> None: + """Extends by appending the given sequence of elements of the same type + + as this one, copying each individual message. + """ + message_class = self._message_descriptor._concrete_class + listener = self._message_listener + values = self._values + for message in elem_seq: + new_element = message_class() + new_element._SetListener(listener) + new_element.MergeFrom(message) + values.append(new_element) + listener.Modified() + + def MergeFrom( + self, + other: Union['RepeatedCompositeFieldContainer[_T]', Iterable[_T]], + ) -> None: + """Appends the contents of another repeated field of the same type to this + one, copying each individual message. + """ + self.extend(other) + + def remove(self, elem: _T) -> None: + """Removes an item from the list. Similar to list.remove().""" + self._values.remove(elem) + self._message_listener.Modified() + + def pop(self, key: Optional[int] = -1) -> _T: + """Removes and returns an item at a given index. Similar to list.pop().""" + value = self._values[key] + self.__delitem__(key) + return value + + @overload + def __setitem__(self, key: int, value: _T) -> None: + ... + + @overload + def __setitem__(self, key: slice, value: Iterable[_T]) -> None: + ... + + def __setitem__(self, key, value): + # This method is implemented to make RepeatedCompositeFieldContainer + # structurally compatible with typing.MutableSequence. It is + # otherwise unsupported and will always raise an error. + raise TypeError( + f'{self.__class__.__name__} object does not support item assignment') + + def __delitem__(self, key: Union[int, slice]) -> None: + """Deletes the item at the specified position.""" + del self._values[key] + self._message_listener.Modified() + + def __eq__(self, other: Any) -> bool: + """Compares the current instance with another one.""" + if self is other: + return True + if not isinstance(other, self.__class__): + raise TypeError('Can only compare repeated composite fields against ' + 'other repeated composite fields.') + return self._values == other._values + + +class ScalarMap(MutableMapping[_K, _V]): + """Simple, type-checked, dict-like container for holding repeated scalars.""" + + # Disallows assignment to other attributes. + __slots__ = ['_key_checker', '_value_checker', '_values', '_message_listener', + '_entry_descriptor'] + + def __init__( + self, + message_listener: Any, + key_checker: Any, + value_checker: Any, + entry_descriptor: Any, + ) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The ScalarMap will call this object's Modified() method when it + is modified. + key_checker: A type_checkers.ValueChecker instance to run on keys + inserted into this container. + value_checker: A type_checkers.ValueChecker instance to run on values + inserted into this container. + entry_descriptor: The MessageDescriptor of a map entry: key and value. + """ + self._message_listener = message_listener + self._key_checker = key_checker + self._value_checker = value_checker + self._entry_descriptor = entry_descriptor + self._values = {} + + def __getitem__(self, key: _K) -> _V: + try: + return self._values[key] + except KeyError: + key = self._key_checker.CheckValue(key) + val = self._value_checker.DefaultValue() + self._values[key] = val + return val + + def __contains__(self, item: _K) -> bool: + # We check the key's type to match the strong-typing flavor of the API. + # Also this makes it easier to match the behavior of the C++ implementation. + self._key_checker.CheckValue(item) + return item in self._values + + @overload + def get(self, key: _K) -> Optional[_V]: + ... + + @overload + def get(self, key: _K, default: _T) -> Union[_V, _T]: + ... + + # We need to override this explicitly, because our defaultdict-like behavior + # will make the default implementation (from our base class) always insert + # the key. + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def __setitem__(self, key: _K, value: _V) -> _T: + checked_key = self._key_checker.CheckValue(key) + checked_value = self._value_checker.CheckValue(value) + self._values[checked_key] = checked_value + self._message_listener.Modified() + + def __delitem__(self, key: _K) -> None: + del self._values[key] + self._message_listener.Modified() + + def __len__(self) -> int: + return len(self._values) + + def __iter__(self) -> Iterator[_K]: + return iter(self._values) + + def __repr__(self) -> str: + return repr(self._values) + + def MergeFrom(self, other: 'ScalarMap[_K, _V]') -> None: + self._values.update(other._values) + self._message_listener.Modified() + + def InvalidateIterators(self) -> None: + # It appears that the only way to reliably invalidate iterators to + # self._values is to ensure that its size changes. + original = self._values + self._values = original.copy() + original[None] = None + + # This is defined in the abstract base, but we can do it much more cheaply. + def clear(self) -> None: + self._values.clear() + self._message_listener.Modified() + + def GetEntryClass(self) -> Any: + return self._entry_descriptor._concrete_class + + +class MessageMap(MutableMapping[_K, _V]): + """Simple, type-checked, dict-like container for with submessage values.""" + + # Disallows assignment to other attributes. + __slots__ = ['_key_checker', '_values', '_message_listener', + '_message_descriptor', '_entry_descriptor'] + + def __init__( + self, + message_listener: Any, + message_descriptor: Any, + key_checker: Any, + entry_descriptor: Any, + ) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The ScalarMap will call this object's Modified() method when it + is modified. + key_checker: A type_checkers.ValueChecker instance to run on keys + inserted into this container. + value_checker: A type_checkers.ValueChecker instance to run on values + inserted into this container. + entry_descriptor: The MessageDescriptor of a map entry: key and value. + """ + self._message_listener = message_listener + self._message_descriptor = message_descriptor + self._key_checker = key_checker + self._entry_descriptor = entry_descriptor + self._values = {} + + def __getitem__(self, key: _K) -> _V: + key = self._key_checker.CheckValue(key) + try: + return self._values[key] + except KeyError: + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + self._values[key] = new_element + self._message_listener.Modified() + return new_element + + def get_or_create(self, key: _K) -> _V: + """get_or_create() is an alias for getitem (ie. map[key]). + + Args: + key: The key to get or create in the map. + + This is useful in cases where you want to be explicit that the call is + mutating the map. This can avoid lint errors for statements like this + that otherwise would appear to be pointless statements: + + msg.my_map[key] + """ + return self[key] + + @overload + def get(self, key: _K) -> Optional[_V]: + ... + + @overload + def get(self, key: _K, default: _T) -> Union[_V, _T]: + ... + + # We need to override this explicitly, because our defaultdict-like behavior + # will make the default implementation (from our base class) always insert + # the key. + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def __contains__(self, item: _K) -> bool: + item = self._key_checker.CheckValue(item) + return item in self._values + + def __setitem__(self, key: _K, value: _V) -> NoReturn: + raise ValueError('May not set values directly, call my_map[key].foo = 5') + + def __delitem__(self, key: _K) -> None: + key = self._key_checker.CheckValue(key) + del self._values[key] + self._message_listener.Modified() + + def __len__(self) -> int: + return len(self._values) + + def __iter__(self) -> Iterator[_K]: + return iter(self._values) + + def __repr__(self) -> str: + return repr(self._values) + + def MergeFrom(self, other: 'MessageMap[_K, _V]') -> None: + # pylint: disable=protected-access + for key in other._values: + # According to documentation: "When parsing from the wire or when merging, + # if there are duplicate map keys the last key seen is used". + if key in self: + del self[key] + self[key].CopyFrom(other[key]) + # self._message_listener.Modified() not required here, because + # mutations to submessages already propagate. + + def InvalidateIterators(self) -> None: + # It appears that the only way to reliably invalidate iterators to + # self._values is to ensure that its size changes. + original = self._values + self._values = original.copy() + original[None] = None + + # This is defined in the abstract base, but we can do it much more cheaply. + def clear(self) -> None: + self._values.clear() + self._message_listener.Modified() + + def GetEntryClass(self) -> Any: + return self._entry_descriptor._concrete_class + + +class _UnknownField: + """A parsed unknown field.""" + + # Disallows assignment to other attributes. + __slots__ = ['_field_number', '_wire_type', '_data'] + + def __init__(self, field_number, wire_type, data): + self._field_number = field_number + self._wire_type = wire_type + self._data = data + return + + def __lt__(self, other): + # pylint: disable=protected-access + return self._field_number < other._field_number + + def __eq__(self, other): + if self is other: + return True + # pylint: disable=protected-access + return (self._field_number == other._field_number and + self._wire_type == other._wire_type and + self._data == other._data) + + +class UnknownFieldRef: # pylint: disable=missing-class-docstring + + def __init__(self, parent, index): + self._parent = parent + self._index = index + + def _check_valid(self): + if not self._parent: + raise ValueError('UnknownField does not exist. ' + 'The parent message might be cleared.') + if self._index >= len(self._parent): + raise ValueError('UnknownField does not exist. ' + 'The parent message might be cleared.') + + @property + def field_number(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._field_number + + @property + def wire_type(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._wire_type + + @property + def data(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._data + + +class UnknownFieldSet: + """UnknownField container""" + + # Disallows assignment to other attributes. + __slots__ = ['_values'] + + def __init__(self): + self._values = [] + + def __getitem__(self, index): + if self._values is None: + raise ValueError('UnknownFields does not exist. ' + 'The parent message might be cleared.') + size = len(self._values) + if index < 0: + index += size + if index < 0 or index >= size: + raise IndexError('index %d out of range'.index) + + return UnknownFieldRef(self, index) + + def _internal_get(self, index): + return self._values[index] + + def __len__(self): + if self._values is None: + raise ValueError('UnknownFields does not exist. ' + 'The parent message might be cleared.') + return len(self._values) + + def _add(self, field_number, wire_type, data): + unknown_field = _UnknownField(field_number, wire_type, data) + self._values.append(unknown_field) + return unknown_field + + def __iter__(self): + for i in range(len(self)): + yield UnknownFieldRef(self, i) + + def _extend(self, other): + if other is None: + return + # pylint: disable=protected-access + self._values.extend(other._values) + + def __eq__(self, other): + if self is other: + return True + # Sort unknown fields because their order shouldn't + # affect equality test. + values = list(self._values) + if other is None: + return not values + values.sort() + # pylint: disable=protected-access + other_values = sorted(other._values) + return values == other_values + + def _clear(self): + for value in self._values: + # pylint: disable=protected-access + if isinstance(value._data, UnknownFieldSet): + value._data._clear() # pylint: disable=protected-access + self._values = None diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/decoder.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/decoder.py new file mode 100644 index 00000000..dcde1d94 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/decoder.py @@ -0,0 +1,1036 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Code for decoding protocol buffer primitives. + +This code is very similar to encoder.py -- read the docs for that module first. + +A "decoder" is a function with the signature: + Decode(buffer, pos, end, message, field_dict) +The arguments are: + buffer: The string containing the encoded message. + pos: The current position in the string. + end: The position in the string where the current message ends. May be + less than len(buffer) if we're reading a sub-message. + message: The message object into which we're parsing. + field_dict: message._fields (avoids a hashtable lookup). +The decoder reads the field and stores it into field_dict, returning the new +buffer position. A decoder for a repeated field may proactively decode all of +the elements of that field, if they appear consecutively. + +Note that decoders may throw any of the following: + IndexError: Indicates a truncated message. + struct.error: Unpacking of a fixed-width field failed. + message.DecodeError: Other errors. + +Decoders are expected to raise an exception if they are called with pos > end. +This allows callers to be lax about bounds checking: it's fineto read past +"end" as long as you are sure that someone else will notice and throw an +exception later on. + +Something up the call stack is expected to catch IndexError and struct.error +and convert them to message.DecodeError. + +Decoders are constructed using decoder constructors with the signature: + MakeDecoder(field_number, is_repeated, is_packed, key, new_default) +The arguments are: + field_number: The field number of the field we want to decode. + is_repeated: Is the field a repeated field? (bool) + is_packed: Is the field a packed field? (bool) + key: The key to use when looking up the field within field_dict. + (This is actually the FieldDescriptor but nothing in this + file should depend on that.) + new_default: A function which takes a message object as a parameter and + returns a new instance of the default value for this field. + (This is called for repeated fields and sub-messages, when an + instance does not already exist.) + +As with encoders, we define a decoder constructor for every type of field. +Then, for every field of every message class we construct an actual decoder. +That decoder goes into a dict indexed by tag, so when we decode a message +we repeatedly read a tag, look up the corresponding decoder, and invoke it. +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +import math +import struct + +from google.protobuf import message +from google.protobuf.internal import containers +from google.protobuf.internal import encoder +from google.protobuf.internal import wire_format + + +# This is not for optimization, but rather to avoid conflicts with local +# variables named "message". +_DecodeError = message.DecodeError + + +def _VarintDecoder(mask, result_type): + """Return an encoder for a basic varint value (does not include tag). + + Decoded values will be bitwise-anded with the given mask before being + returned, e.g. to limit them to 32 bits. The returned decoder does not + take the usual "end" parameter -- the caller is expected to do bounds checking + after the fact (often the caller can defer such checking until later). The + decoder returns a (value, new_pos) pair. + """ + + def DecodeVarint(buffer, pos: int=None): + result = 0 + shift = 0 + while 1: + if pos is None: + # Read from BytesIO + try: + b = buffer.read(1)[0] + except IndexError as e: + if shift == 0: + # End of BytesIO. + return None + else: + raise ValueError('Fail to read varint %s' % str(e)) + else: + b = buffer[pos] + pos += 1 + result |= ((b & 0x7f) << shift) + if not (b & 0x80): + result &= mask + result = result_type(result) + return result if pos is None else (result, pos) + shift += 7 + if shift >= 64: + raise _DecodeError('Too many bytes when decoding varint.') + + return DecodeVarint + + +def _SignedVarintDecoder(bits, result_type): + """Like _VarintDecoder() but decodes signed values.""" + + signbit = 1 << (bits - 1) + mask = (1 << bits) - 1 + + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: + b = buffer[pos] + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): + result &= mask + result = (result ^ signbit) - signbit + result = result_type(result) + return (result, pos) + shift += 7 + if shift >= 64: + raise _DecodeError('Too many bytes when decoding varint.') + return DecodeVarint + +# All 32-bit and 64-bit values are represented as int. +_DecodeVarint = _VarintDecoder((1 << 64) - 1, int) +_DecodeSignedVarint = _SignedVarintDecoder(64, int) + +# Use these versions for values which must be limited to 32 bits. +_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int) +_DecodeSignedVarint32 = _SignedVarintDecoder(32, int) + + +def ReadTag(buffer, pos): + """Read a tag from the memoryview, and return a (tag_bytes, new_pos) tuple. + + We return the raw bytes of the tag rather than decoding them. The raw + bytes can then be used to look up the proper decoder. This effectively allows + us to trade some work that would be done in pure-python (decoding a varint) + for work that is done in C (searching for a byte string in a hash table). + In a low-level language it would be much cheaper to decode the varint and + use that, but not in Python. + + Args: + buffer: memoryview object of the encoded bytes + pos: int of the current position to start from + + Returns: + Tuple[bytes, int] of the tag data and new position. + """ + start = pos + while buffer[pos] & 0x80: + pos += 1 + pos += 1 + + tag_bytes = buffer[start:pos].tobytes() + return tag_bytes, pos + + +# -------------------------------------------------------------------- + + +def _SimpleDecoder(wire_type, decode_value): + """Return a constructor for a decoder for fields of a particular type. + + Args: + wire_type: The field's wire type. + decode_value: A function which decodes an individual value, e.g. + _DecodeVarint() + """ + + def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + if is_packed: + local_DecodeVarint = _DecodeVarint + def DecodePackedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + (endpoint, pos) = local_DecodeVarint(buffer, pos) + endpoint += pos + if endpoint > end: + raise _DecodeError('Truncated message.') + while pos < endpoint: + (element, pos) = decode_value(buffer, pos) + value.append(element) + if pos > endpoint: + del value[-1] # Discard corrupt value. + raise _DecodeError('Packed element was truncated.') + return pos + return DecodePackedField + elif is_repeated: + tag_bytes = encoder.TagBytes(field_number, wire_type) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (element, new_pos) = decode_value(buffer, pos) + value.append(element) + # Predict that the next tag is another copy of the same repeated + # field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos >= end: + # Prediction failed. Return. + if new_pos > end: + raise _DecodeError('Truncated message.') + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (new_value, pos) = decode_value(buffer, pos) + if pos > end: + raise _DecodeError('Truncated message.') + if clear_if_default and not new_value: + field_dict.pop(key, None) + else: + field_dict[key] = new_value + return pos + return DecodeField + + return SpecificDecoder + + +def _ModifiedDecoder(wire_type, decode_value, modify_value): + """Like SimpleDecoder but additionally invokes modify_value on every value + before storing it. Usually modify_value is ZigZagDecode. + """ + + # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but + # not enough to make a significant difference. + + def InnerDecode(buffer, pos): + (result, new_pos) = decode_value(buffer, pos) + return (modify_value(result), new_pos) + return _SimpleDecoder(wire_type, InnerDecode) + + +def _StructPackDecoder(wire_type, format): + """Return a constructor for a decoder for a fixed-width field. + + Args: + wire_type: The field's wire type. + format: The format string to pass to struct.unpack(). + """ + + value_size = struct.calcsize(format) + local_unpack = struct.unpack + + # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but + # not enough to make a significant difference. + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + + def InnerDecode(buffer, pos): + new_pos = pos + value_size + result = local_unpack(format, buffer[pos:new_pos])[0] + return (result, new_pos) + return _SimpleDecoder(wire_type, InnerDecode) + + +def _FloatDecoder(): + """Returns a decoder for a float field. + + This code works around a bug in struct.unpack for non-finite 32-bit + floating-point values. + """ + + local_unpack = struct.unpack + + def InnerDecode(buffer, pos): + """Decode serialized float to a float and new position. + + Args: + buffer: memoryview of the serialized bytes + pos: int, position in the memory view to start at. + + Returns: + Tuple[float, int] of the deserialized float value and new position + in the serialized data. + """ + # We expect a 32-bit value in little-endian byte order. Bit 1 is the sign + # bit, bits 2-9 represent the exponent, and bits 10-32 are the significand. + new_pos = pos + 4 + float_bytes = buffer[pos:new_pos].tobytes() + + # If this value has all its exponent bits set, then it's non-finite. + # In Python 2.4, struct.unpack will convert it to a finite 64-bit value. + # To avoid that, we parse it specially. + if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'): + # If at least one significand bit is set... + if float_bytes[0:3] != b'\x00\x00\x80': + return (math.nan, new_pos) + # If sign bit is set... + if float_bytes[3:4] == b'\xFF': + return (-math.inf, new_pos) + return (math.inf, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + result = local_unpack('<f', float_bytes)[0] + return (result, new_pos) + return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode) + + +def _DoubleDecoder(): + """Returns a decoder for a double field. + + This code works around a bug in struct.unpack for not-a-number. + """ + + local_unpack = struct.unpack + + def InnerDecode(buffer, pos): + """Decode serialized double to a double and new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + + Returns: + Tuple[float, int] of the decoded double value and new position + in the serialized data. + """ + # We expect a 64-bit value in little-endian byte order. Bit 1 is the sign + # bit, bits 2-12 represent the exponent, and bits 13-64 are the significand. + new_pos = pos + 8 + double_bytes = buffer[pos:new_pos].tobytes() + + # If this value has all its exponent bits set and at least one significand + # bit set, it's not a number. In Python 2.4, struct.unpack will treat it + # as inf or -inf. To avoid that, we treat it specially. + if ((double_bytes[7:8] in b'\x7F\xFF') + and (double_bytes[6:7] >= b'\xF0') + and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')): + return (math.nan, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + result = local_unpack('<d', double_bytes)[0] + return (result, new_pos) + return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode) + + +def EnumDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + """Returns a decoder for enum field.""" + enum_type = key.enum_type + if is_packed: + local_DecodeVarint = _DecodeVarint + def DecodePackedField(buffer, pos, end, message, field_dict): + """Decode serialized packed enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + (endpoint, pos) = local_DecodeVarint(buffer, pos) + endpoint += pos + if endpoint > end: + raise _DecodeError('Truncated message.') + while pos < endpoint: + value_start_pos = pos + (element, pos) = _DecodeSignedVarint32(buffer, pos) + # pylint: disable=protected-access + if element in enum_type.values_by_number: + value.append(element) + else: + if not message._unknown_fields: + message._unknown_fields = [] + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_VARINT) + + message._unknown_fields.append( + (tag_bytes, buffer[value_start_pos:pos].tobytes())) + # pylint: enable=protected-access + if pos > endpoint: + if element in enum_type.values_by_number: + del value[-1] # Discard corrupt value. + else: + del message._unknown_fields[-1] + # pylint: enable=protected-access + raise _DecodeError('Packed element was truncated.') + return pos + return DecodePackedField + elif is_repeated: + tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + """Decode serialized repeated enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (element, new_pos) = _DecodeSignedVarint32(buffer, pos) + # pylint: disable=protected-access + if element in enum_type.values_by_number: + value.append(element) + else: + if not message._unknown_fields: + message._unknown_fields = [] + message._unknown_fields.append( + (tag_bytes, buffer[pos:new_pos].tobytes())) + # pylint: enable=protected-access + # Predict that the next tag is another copy of the same repeated + # field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos >= end: + # Prediction failed. Return. + if new_pos > end: + raise _DecodeError('Truncated message.') + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + """Decode serialized repeated enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value_start_pos = pos + (enum_value, pos) = _DecodeSignedVarint32(buffer, pos) + if pos > end: + raise _DecodeError('Truncated message.') + if clear_if_default and not enum_value: + field_dict.pop(key, None) + return pos + # pylint: disable=protected-access + if enum_value in enum_type.values_by_number: + field_dict[key] = enum_value + else: + if not message._unknown_fields: + message._unknown_fields = [] + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_VARINT) + message._unknown_fields.append( + (tag_bytes, buffer[value_start_pos:pos].tobytes())) + # pylint: enable=protected-access + return pos + return DecodeField + + +# -------------------------------------------------------------------- + + +Int32Decoder = _SimpleDecoder( + wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32) + +Int64Decoder = _SimpleDecoder( + wire_format.WIRETYPE_VARINT, _DecodeSignedVarint) + +UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32) +UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint) + +SInt32Decoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode) +SInt64Decoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode) + +# Note that Python conveniently guarantees that when using the '<' prefix on +# formats, they will also have the same size across all platforms (as opposed +# to without the prefix, where their sizes depend on the C compiler's basic +# type sizes). +Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I') +Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q') +SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i') +SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q') +FloatDecoder = _FloatDecoder() +DoubleDecoder = _DoubleDecoder() + +BoolDecoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint, bool) + + +def StringDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + """Returns a decoder for a string field.""" + + local_DecodeVarint = _DecodeVarint + + def _ConvertToUnicode(memview): + """Convert byte to unicode.""" + byte_str = memview.tobytes() + try: + value = str(byte_str, 'utf-8') + except UnicodeDecodeError as e: + # add more information to the error message and re-raise it. + e.reason = '%s in field: %s' % (e, key.full_name) + raise + + return value + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + value.append(_ConvertToUnicode(buffer[pos:new_pos])) + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + if clear_if_default and not size: + field_dict.pop(key, None) + else: + field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos]) + return new_pos + return DecodeField + + +def BytesDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + """Returns a decoder for a bytes field.""" + + local_DecodeVarint = _DecodeVarint + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + value.append(buffer[pos:new_pos].tobytes()) + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + if clear_if_default and not size: + field_dict.pop(key, None) + else: + field_dict[key] = buffer[pos:new_pos].tobytes() + return new_pos + return DecodeField + + +def GroupDecoder(field_number, is_repeated, is_packed, key, new_default): + """Returns a decoder for a group field.""" + + end_tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_END_GROUP) + end_tag_len = len(end_tag_bytes) + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_START_GROUP) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read sub-message. + pos = value.add()._InternalParse(buffer, pos, end) + # Read end tag. + new_pos = pos+end_tag_len + if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: + raise _DecodeError('Missing group end tag.') + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read sub-message. + pos = value._InternalParse(buffer, pos, end) + # Read end tag. + new_pos = pos+end_tag_len + if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: + raise _DecodeError('Missing group end tag.') + return new_pos + return DecodeField + + +def MessageDecoder(field_number, is_repeated, is_packed, key, new_default): + """Returns a decoder for a message field.""" + + local_DecodeVarint = _DecodeVarint + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + if value.add()._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + if value._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it encountered + # an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + return new_pos + return DecodeField + + +# -------------------------------------------------------------------- + +MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP) + +def MessageSetItemDecoder(descriptor): + """Returns a decoder for a MessageSet item. + + The parameter is the message Descriptor. + + The message set message looks like this: + message MessageSet { + repeated group Item = 1 { + required int32 type_id = 2; + required string message = 3; + } + } + """ + + type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) + message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) + item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) + + local_ReadTag = ReadTag + local_DecodeVarint = _DecodeVarint + local_SkipField = SkipField + + def DecodeItem(buffer, pos, end, message, field_dict): + """Decode serialized message set to its value and new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + message_set_item_start = pos + type_id = -1 + message_start = -1 + message_end = -1 + + # Technically, type_id and message can appear in any order, so we need + # a little loop here. + while 1: + (tag_bytes, pos) = local_ReadTag(buffer, pos) + if tag_bytes == type_id_tag_bytes: + (type_id, pos) = local_DecodeVarint(buffer, pos) + elif tag_bytes == message_tag_bytes: + (size, message_start) = local_DecodeVarint(buffer, pos) + pos = message_end = message_start + size + elif tag_bytes == item_end_tag_bytes: + break + else: + pos = SkipField(buffer, pos, end, tag_bytes) + if pos == -1: + raise _DecodeError('Missing group end tag.') + + if pos > end: + raise _DecodeError('Truncated message.') + + if type_id == -1: + raise _DecodeError('MessageSet item missing type_id.') + if message_start == -1: + raise _DecodeError('MessageSet item missing message.') + + extension = message.Extensions._FindExtensionByNumber(type_id) + # pylint: disable=protected-access + if extension is not None: + value = field_dict.get(extension) + if value is None: + message_type = extension.message_type + if not hasattr(message_type, '_concrete_class'): + message_factory.GetMessageClass(message_type) + value = field_dict.setdefault( + extension, message_type._concrete_class()) + if value._InternalParse(buffer, message_start,message_end) != message_end: + # The only reason _InternalParse would return early is if it encountered + # an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + else: + if not message._unknown_fields: + message._unknown_fields = [] + message._unknown_fields.append( + (MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos].tobytes())) + # pylint: enable=protected-access + + return pos + + return DecodeItem + + +def UnknownMessageSetItemDecoder(): + """Returns a decoder for a Unknown MessageSet item.""" + + type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) + message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) + item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) + + def DecodeUnknownItem(buffer): + pos = 0 + end = len(buffer) + message_start = -1 + message_end = -1 + while 1: + (tag_bytes, pos) = ReadTag(buffer, pos) + if tag_bytes == type_id_tag_bytes: + (type_id, pos) = _DecodeVarint(buffer, pos) + elif tag_bytes == message_tag_bytes: + (size, message_start) = _DecodeVarint(buffer, pos) + pos = message_end = message_start + size + elif tag_bytes == item_end_tag_bytes: + break + else: + pos = SkipField(buffer, pos, end, tag_bytes) + if pos == -1: + raise _DecodeError('Missing group end tag.') + + if pos > end: + raise _DecodeError('Truncated message.') + + if type_id == -1: + raise _DecodeError('MessageSet item missing type_id.') + if message_start == -1: + raise _DecodeError('MessageSet item missing message.') + + return (type_id, buffer[message_start:message_end].tobytes()) + + return DecodeUnknownItem + +# -------------------------------------------------------------------- + +def MapDecoder(field_descriptor, new_default, is_message_map): + """Returns a decoder for a map field.""" + + key = field_descriptor + tag_bytes = encoder.TagBytes(field_descriptor.number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + local_DecodeVarint = _DecodeVarint + # Can't read _concrete_class yet; might not be initialized. + message_type = field_descriptor.message_type + + def DecodeMap(buffer, pos, end, message, field_dict): + submsg = message_type._concrete_class() + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + submsg.Clear() + if submsg._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + + if is_message_map: + value[submsg.key].CopyFrom(submsg.value) + else: + value[submsg.key] = submsg.value + + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + + return DecodeMap + +# -------------------------------------------------------------------- +# Optimization is not as heavy here because calls to SkipField() are rare, +# except for handling end-group tags. + +def _SkipVarint(buffer, pos, end): + """Skip a varint value. Returns the new position.""" + # Previously ord(buffer[pos]) raised IndexError when pos is out of range. + # With this code, ord(b'') raises TypeError. Both are handled in + # python_message.py to generate a 'Truncated message' error. + while ord(buffer[pos:pos+1].tobytes()) & 0x80: + pos += 1 + pos += 1 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + +def _SkipFixed64(buffer, pos, end): + """Skip a fixed64 value. Returns the new position.""" + + pos += 8 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _DecodeFixed64(buffer, pos): + """Decode a fixed64.""" + new_pos = pos + 8 + return (struct.unpack('<Q', buffer[pos:new_pos])[0], new_pos) + + +def _SkipLengthDelimited(buffer, pos, end): + """Skip a length-delimited value. Returns the new position.""" + + (size, pos) = _DecodeVarint(buffer, pos) + pos += size + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _SkipGroup(buffer, pos, end): + """Skip sub-group. Returns the new position.""" + + while 1: + (tag_bytes, pos) = ReadTag(buffer, pos) + new_pos = SkipField(buffer, pos, end, tag_bytes) + if new_pos == -1: + return pos + pos = new_pos + + +def _DecodeUnknownFieldSet(buffer, pos, end_pos=None): + """Decode UnknownFieldSet. Returns the UnknownFieldSet and new position.""" + + unknown_field_set = containers.UnknownFieldSet() + while end_pos is None or pos < end_pos: + (tag_bytes, pos) = ReadTag(buffer, pos) + (tag, _) = _DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if wire_type == wire_format.WIRETYPE_END_GROUP: + break + (data, pos) = _DecodeUnknownField(buffer, pos, wire_type) + # pylint: disable=protected-access + unknown_field_set._add(field_number, wire_type, data) + + return (unknown_field_set, pos) + + +def _DecodeUnknownField(buffer, pos, wire_type): + """Decode a unknown field. Returns the UnknownField and new position.""" + + if wire_type == wire_format.WIRETYPE_VARINT: + (data, pos) = _DecodeVarint(buffer, pos) + elif wire_type == wire_format.WIRETYPE_FIXED64: + (data, pos) = _DecodeFixed64(buffer, pos) + elif wire_type == wire_format.WIRETYPE_FIXED32: + (data, pos) = _DecodeFixed32(buffer, pos) + elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED: + (size, pos) = _DecodeVarint(buffer, pos) + data = buffer[pos:pos+size].tobytes() + pos += size + elif wire_type == wire_format.WIRETYPE_START_GROUP: + (data, pos) = _DecodeUnknownFieldSet(buffer, pos) + elif wire_type == wire_format.WIRETYPE_END_GROUP: + return (0, -1) + else: + raise _DecodeError('Wrong wire type in tag.') + + return (data, pos) + + +def _EndGroup(buffer, pos, end): + """Skipping an END_GROUP tag returns -1 to tell the parent loop to break.""" + + return -1 + + +def _SkipFixed32(buffer, pos, end): + """Skip a fixed32 value. Returns the new position.""" + + pos += 4 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _DecodeFixed32(buffer, pos): + """Decode a fixed32.""" + + new_pos = pos + 4 + return (struct.unpack('<I', buffer[pos:new_pos])[0], new_pos) + + +def _RaiseInvalidWireType(buffer, pos, end): + """Skip function for unknown wire types. Raises an exception.""" + + raise _DecodeError('Tag had invalid wire type.') + +def _FieldSkipper(): + """Constructs the SkipField function.""" + + WIRETYPE_TO_SKIPPER = [ + _SkipVarint, + _SkipFixed64, + _SkipLengthDelimited, + _SkipGroup, + _EndGroup, + _SkipFixed32, + _RaiseInvalidWireType, + _RaiseInvalidWireType, + ] + + wiretype_mask = wire_format.TAG_TYPE_MASK + + def SkipField(buffer, pos, end, tag_bytes): + """Skips a field with the specified tag. + + |pos| should point to the byte immediately after the tag. + + Returns: + The new position (after the tag value), or -1 if the tag is an end-group + tag (in which case the calling loop should break). + """ + + # The wire type is always in the first byte since varints are little-endian. + wire_type = ord(tag_bytes[0:1]) & wiretype_mask + return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end) + + return SkipField + +SkipField = _FieldSkipper() diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/encoder.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/encoder.py new file mode 100644 index 00000000..df7bf1b5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/encoder.py @@ -0,0 +1,806 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Code for encoding protocol message primitives. + +Contains the logic for encoding every logical protocol field type +into one of the 5 physical wire types. + +This code is designed to push the Python interpreter's performance to the +limits. + +The basic idea is that at startup time, for every field (i.e. every +FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The +sizer takes a value of this field's type and computes its byte size. The +encoder takes a writer function and a value. It encodes the value into byte +strings and invokes the writer function to write those strings. Typically the +writer function is the write() method of a BytesIO. + +We try to do as much work as possible when constructing the writer and the +sizer rather than when calling them. In particular: +* We copy any needed global functions to local variables, so that we do not need + to do costly global table lookups at runtime. +* Similarly, we try to do any attribute lookups at startup time if possible. +* Every field's tag is encoded to bytes at startup, since it can't change at + runtime. +* Whatever component of the field size we can compute at startup, we do. +* We *avoid* sharing code if doing so would make the code slower and not sharing + does not burden us too much. For example, encoders for repeated fields do + not just call the encoders for singular fields in a loop because this would + add an extra function call overhead for every loop iteration; instead, we + manually inline the single-value encoder into the loop. +* If a Python function lacks a return statement, Python actually generates + instructions to pop the result of the last statement off the stack, push + None onto the stack, and then return that. If we really don't care what + value is returned, then we can save two instructions by returning the + result of the last statement. It looks funny but it helps. +* We assume that type and bounds checking has happened at a higher level. +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +import struct + +from google.protobuf.internal import wire_format + + +# This will overflow and thus become IEEE-754 "infinity". We would use +# "float('inf')" but it doesn't work on Windows pre-Python-2.6. +_POS_INF = 1e10000 +_NEG_INF = -_POS_INF + + +def _VarintSize(value): + """Compute the size of a varint value.""" + if value <= 0x7f: return 1 + if value <= 0x3fff: return 2 + if value <= 0x1fffff: return 3 + if value <= 0xfffffff: return 4 + if value <= 0x7ffffffff: return 5 + if value <= 0x3ffffffffff: return 6 + if value <= 0x1ffffffffffff: return 7 + if value <= 0xffffffffffffff: return 8 + if value <= 0x7fffffffffffffff: return 9 + return 10 + + +def _SignedVarintSize(value): + """Compute the size of a signed varint value.""" + if value < 0: return 10 + if value <= 0x7f: return 1 + if value <= 0x3fff: return 2 + if value <= 0x1fffff: return 3 + if value <= 0xfffffff: return 4 + if value <= 0x7ffffffff: return 5 + if value <= 0x3ffffffffff: return 6 + if value <= 0x1ffffffffffff: return 7 + if value <= 0xffffffffffffff: return 8 + if value <= 0x7fffffffffffffff: return 9 + return 10 + + +def _TagSize(field_number): + """Returns the number of bytes required to serialize a tag with this field + number.""" + # Just pass in type 0, since the type won't affect the tag+type size. + return _VarintSize(wire_format.PackTag(field_number, 0)) + + +# -------------------------------------------------------------------- +# In this section we define some generic sizers. Each of these functions +# takes parameters specific to a particular field type, e.g. int32 or fixed64. +# It returns another function which in turn takes parameters specific to a +# particular field, e.g. the field number and whether it is repeated or packed. +# Look at the next section to see how these are used. + + +def _SimpleSizer(compute_value_size): + """A sizer which uses the function compute_value_size to compute the size of + each value. Typically compute_value_size is _VarintSize.""" + + def SpecificSizer(field_number, is_repeated, is_packed): + tag_size = _TagSize(field_number) + if is_packed: + local_VarintSize = _VarintSize + def PackedFieldSize(value): + result = 0 + for element in value: + result += compute_value_size(element) + return result + local_VarintSize(result) + tag_size + return PackedFieldSize + elif is_repeated: + def RepeatedFieldSize(value): + result = tag_size * len(value) + for element in value: + result += compute_value_size(element) + return result + return RepeatedFieldSize + else: + def FieldSize(value): + return tag_size + compute_value_size(value) + return FieldSize + + return SpecificSizer + + +def _ModifiedSizer(compute_value_size, modify_value): + """Like SimpleSizer, but modify_value is invoked on each value before it is + passed to compute_value_size. modify_value is typically ZigZagEncode.""" + + def SpecificSizer(field_number, is_repeated, is_packed): + tag_size = _TagSize(field_number) + if is_packed: + local_VarintSize = _VarintSize + def PackedFieldSize(value): + result = 0 + for element in value: + result += compute_value_size(modify_value(element)) + return result + local_VarintSize(result) + tag_size + return PackedFieldSize + elif is_repeated: + def RepeatedFieldSize(value): + result = tag_size * len(value) + for element in value: + result += compute_value_size(modify_value(element)) + return result + return RepeatedFieldSize + else: + def FieldSize(value): + return tag_size + compute_value_size(modify_value(value)) + return FieldSize + + return SpecificSizer + + +def _FixedSizer(value_size): + """Like _SimpleSizer except for a fixed-size field. The input is the size + of one value.""" + + def SpecificSizer(field_number, is_repeated, is_packed): + tag_size = _TagSize(field_number) + if is_packed: + local_VarintSize = _VarintSize + def PackedFieldSize(value): + result = len(value) * value_size + return result + local_VarintSize(result) + tag_size + return PackedFieldSize + elif is_repeated: + element_size = value_size + tag_size + def RepeatedFieldSize(value): + return len(value) * element_size + return RepeatedFieldSize + else: + field_size = value_size + tag_size + def FieldSize(value): + return field_size + return FieldSize + + return SpecificSizer + + +# ==================================================================== +# Here we declare a sizer constructor for each field type. Each "sizer +# constructor" is a function that takes (field_number, is_repeated, is_packed) +# as parameters and returns a sizer, which in turn takes a field value as +# a parameter and returns its encoded size. + + +Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize) + +UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize) + +SInt32Sizer = SInt64Sizer = _ModifiedSizer( + _SignedVarintSize, wire_format.ZigZagEncode) + +Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4) +Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8) + +BoolSizer = _FixedSizer(1) + + +def StringSizer(field_number, is_repeated, is_packed): + """Returns a sizer for a string field.""" + + tag_size = _TagSize(field_number) + local_VarintSize = _VarintSize + local_len = len + assert not is_packed + if is_repeated: + def RepeatedFieldSize(value): + result = tag_size * len(value) + for element in value: + l = local_len(element.encode('utf-8')) + result += local_VarintSize(l) + l + return result + return RepeatedFieldSize + else: + def FieldSize(value): + l = local_len(value.encode('utf-8')) + return tag_size + local_VarintSize(l) + l + return FieldSize + + +def BytesSizer(field_number, is_repeated, is_packed): + """Returns a sizer for a bytes field.""" + + tag_size = _TagSize(field_number) + local_VarintSize = _VarintSize + local_len = len + assert not is_packed + if is_repeated: + def RepeatedFieldSize(value): + result = tag_size * len(value) + for element in value: + l = local_len(element) + result += local_VarintSize(l) + l + return result + return RepeatedFieldSize + else: + def FieldSize(value): + l = local_len(value) + return tag_size + local_VarintSize(l) + l + return FieldSize + + +def GroupSizer(field_number, is_repeated, is_packed): + """Returns a sizer for a group field.""" + + tag_size = _TagSize(field_number) * 2 + assert not is_packed + if is_repeated: + def RepeatedFieldSize(value): + result = tag_size * len(value) + for element in value: + result += element.ByteSize() + return result + return RepeatedFieldSize + else: + def FieldSize(value): + return tag_size + value.ByteSize() + return FieldSize + + +def MessageSizer(field_number, is_repeated, is_packed): + """Returns a sizer for a message field.""" + + tag_size = _TagSize(field_number) + local_VarintSize = _VarintSize + assert not is_packed + if is_repeated: + def RepeatedFieldSize(value): + result = tag_size * len(value) + for element in value: + l = element.ByteSize() + result += local_VarintSize(l) + l + return result + return RepeatedFieldSize + else: + def FieldSize(value): + l = value.ByteSize() + return tag_size + local_VarintSize(l) + l + return FieldSize + + +# -------------------------------------------------------------------- +# MessageSet is special: it needs custom logic to compute its size properly. + + +def MessageSetItemSizer(field_number): + """Returns a sizer for extensions of MessageSet. + + The message set message looks like this: + message MessageSet { + repeated group Item = 1 { + required int32 type_id = 2; + required string message = 3; + } + } + """ + static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) + + _TagSize(3)) + local_VarintSize = _VarintSize + + def FieldSize(value): + l = value.ByteSize() + return static_size + local_VarintSize(l) + l + + return FieldSize + + +# -------------------------------------------------------------------- +# Map is special: it needs custom logic to compute its size properly. + + +def MapSizer(field_descriptor, is_message_map): + """Returns a sizer for a map field.""" + + # Can't look at field_descriptor.message_type._concrete_class because it may + # not have been initialized yet. + message_type = field_descriptor.message_type + message_sizer = MessageSizer(field_descriptor.number, False, False) + + def FieldSize(map_value): + total = 0 + for key in map_value: + value = map_value[key] + # It's wasteful to create the messages and throw them away one second + # later since we'll do the same for the actual encode. But there's not an + # obvious way to avoid this within the current design without tons of code + # duplication. For message map, value.ByteSize() should be called to + # update the status. + entry_msg = message_type._concrete_class(key=key, value=value) + total += message_sizer(entry_msg) + if is_message_map: + value.ByteSize() + return total + + return FieldSize + +# ==================================================================== +# Encoders! + + +def _VarintEncoder(): + """Return an encoder for a basic varint value (does not include tag).""" + + local_int2byte = struct.Struct('>B').pack + + def EncodeVarint(write, value, unused_deterministic=None): + bits = value & 0x7f + value >>= 7 + while value: + write(local_int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 + return write(local_int2byte(bits)) + + return EncodeVarint + + +def _SignedVarintEncoder(): + """Return an encoder for a basic signed varint value (does not include + tag).""" + + local_int2byte = struct.Struct('>B').pack + + def EncodeSignedVarint(write, value, unused_deterministic=None): + if value < 0: + value += (1 << 64) + bits = value & 0x7f + value >>= 7 + while value: + write(local_int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 + return write(local_int2byte(bits)) + + return EncodeSignedVarint + + +_EncodeVarint = _VarintEncoder() +_EncodeSignedVarint = _SignedVarintEncoder() + + +def _VarintBytes(value): + """Encode the given integer as a varint and return the bytes. This is only + called at startup time so it doesn't need to be fast.""" + + pieces = [] + _EncodeVarint(pieces.append, value, True) + return b"".join(pieces) + + +def TagBytes(field_number, wire_type): + """Encode the given tag and return the bytes. Only called at startup.""" + + return bytes(_VarintBytes(wire_format.PackTag(field_number, wire_type))) + +# -------------------------------------------------------------------- +# As with sizers (see above), we have a number of common encoder +# implementations. + + +def _SimpleEncoder(wire_type, encode_value, compute_value_size): + """Return a constructor for an encoder for fields of a particular type. + + Args: + wire_type: The field's wire type, for encoding tags. + encode_value: A function which encodes an individual value, e.g. + _EncodeVarint(). + compute_value_size: A function which computes the size of an individual + value, e.g. _VarintSize(). + """ + + def SpecificEncoder(field_number, is_repeated, is_packed): + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + size = 0 + for element in value: + size += compute_value_size(element) + local_EncodeVarint(write, size, deterministic) + for element in value: + encode_value(write, element, deterministic) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag_bytes) + encode_value(write, element, deterministic) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, deterministic): + write(tag_bytes) + return encode_value(write, value, deterministic) + return EncodeField + + return SpecificEncoder + + +def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value): + """Like SimpleEncoder but additionally invokes modify_value on every value + before passing it to encode_value. Usually modify_value is ZigZagEncode.""" + + def SpecificEncoder(field_number, is_repeated, is_packed): + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + size = 0 + for element in value: + size += compute_value_size(modify_value(element)) + local_EncodeVarint(write, size, deterministic) + for element in value: + encode_value(write, modify_value(element), deterministic) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag_bytes) + encode_value(write, modify_value(element), deterministic) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, deterministic): + write(tag_bytes) + return encode_value(write, modify_value(value), deterministic) + return EncodeField + + return SpecificEncoder + + +def _StructPackEncoder(wire_type, format): + """Return a constructor for an encoder for a fixed-width field. + + Args: + wire_type: The field's wire type, for encoding tags. + format: The format string to pass to struct.pack(). + """ + + value_size = struct.calcsize(format) + + def SpecificEncoder(field_number, is_repeated, is_packed): + local_struct_pack = struct.pack + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value) * value_size, deterministic) + for element in value: + write(local_struct_pack(format, element)) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + write(local_struct_pack(format, element)) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + return write(local_struct_pack(format, value)) + return EncodeField + + return SpecificEncoder + + +def _FloatingPointEncoder(wire_type, format): + """Return a constructor for an encoder for float fields. + + This is like StructPackEncoder, but catches errors that may be due to + passing non-finite floating-point values to struct.pack, and makes a + second attempt to encode those values. + + Args: + wire_type: The field's wire type, for encoding tags. + format: The format string to pass to struct.pack(). + """ + + value_size = struct.calcsize(format) + if value_size == 4: + def EncodeNonFiniteOrRaise(write, value): + # Remember that the serialized form uses little-endian byte order. + if value == _POS_INF: + write(b'\x00\x00\x80\x7F') + elif value == _NEG_INF: + write(b'\x00\x00\x80\xFF') + elif value != value: # NaN + write(b'\x00\x00\xC0\x7F') + else: + raise + elif value_size == 8: + def EncodeNonFiniteOrRaise(write, value): + if value == _POS_INF: + write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') + elif value == _NEG_INF: + write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') + elif value != value: # NaN + write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') + else: + raise + else: + raise ValueError('Can\'t encode floating-point values that are ' + '%d bytes long (only 4 or 8)' % value_size) + + def SpecificEncoder(field_number, is_repeated, is_packed): + local_struct_pack = struct.pack + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value) * value_size, deterministic) + for element in value: + # This try/except block is going to be faster than any code that + # we could write to check whether element is finite. + try: + write(local_struct_pack(format, element)) + except SystemError: + EncodeNonFiniteOrRaise(write, element) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + try: + write(local_struct_pack(format, element)) + except SystemError: + EncodeNonFiniteOrRaise(write, element) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + try: + write(local_struct_pack(format, value)) + except SystemError: + EncodeNonFiniteOrRaise(write, value) + return EncodeField + + return SpecificEncoder + + +# ==================================================================== +# Here we declare an encoder constructor for each field type. These work +# very similarly to sizer constructors, described earlier. + + +Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder( + wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize) + +UInt32Encoder = UInt64Encoder = _SimpleEncoder( + wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize) + +SInt32Encoder = SInt64Encoder = _ModifiedEncoder( + wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize, + wire_format.ZigZagEncode) + +# Note that Python conveniently guarantees that when using the '<' prefix on +# formats, they will also have the same size across all platforms (as opposed +# to without the prefix, where their sizes depend on the C compiler's basic +# type sizes). +Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I') +Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q') +SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i') +SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q') +FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f') +DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d') + + +def BoolEncoder(field_number, is_repeated, is_packed): + """Returns an encoder for a boolean field.""" + + false_byte = b'\x00' + true_byte = b'\x01' + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value), deterministic) + for element in value: + if element: + write(true_byte) + else: + write(false_byte) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + if element: + write(true_byte) + else: + write(false_byte) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + if value: + return write(true_byte) + return write(false_byte) + return EncodeField + + +def StringEncoder(field_number, is_repeated, is_packed): + """Returns an encoder for a string field.""" + + tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + local_len = len + assert not is_packed + if is_repeated: + def EncodeRepeatedField(write, value, deterministic): + for element in value: + encoded = element.encode('utf-8') + write(tag) + local_EncodeVarint(write, local_len(encoded), deterministic) + write(encoded) + return EncodeRepeatedField + else: + def EncodeField(write, value, deterministic): + encoded = value.encode('utf-8') + write(tag) + local_EncodeVarint(write, local_len(encoded), deterministic) + return write(encoded) + return EncodeField + + +def BytesEncoder(field_number, is_repeated, is_packed): + """Returns an encoder for a bytes field.""" + + tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + local_len = len + assert not is_packed + if is_repeated: + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag) + local_EncodeVarint(write, local_len(element), deterministic) + write(element) + return EncodeRepeatedField + else: + def EncodeField(write, value, deterministic): + write(tag) + local_EncodeVarint(write, local_len(value), deterministic) + return write(value) + return EncodeField + + +def GroupEncoder(field_number, is_repeated, is_packed): + """Returns an encoder for a group field.""" + + start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP) + end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP) + assert not is_packed + if is_repeated: + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(start_tag) + element._InternalSerialize(write, deterministic) + write(end_tag) + return EncodeRepeatedField + else: + def EncodeField(write, value, deterministic): + write(start_tag) + value._InternalSerialize(write, deterministic) + return write(end_tag) + return EncodeField + + +def MessageEncoder(field_number, is_repeated, is_packed): + """Returns an encoder for a message field.""" + + tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + assert not is_packed + if is_repeated: + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag) + local_EncodeVarint(write, element.ByteSize(), deterministic) + element._InternalSerialize(write, deterministic) + return EncodeRepeatedField + else: + def EncodeField(write, value, deterministic): + write(tag) + local_EncodeVarint(write, value.ByteSize(), deterministic) + return value._InternalSerialize(write, deterministic) + return EncodeField + + +# -------------------------------------------------------------------- +# As before, MessageSet is special. + + +def MessageSetItemEncoder(field_number): + """Encoder for extensions of MessageSet. + + The message set message looks like this: + message MessageSet { + repeated group Item = 1 { + required int32 type_id = 2; + required string message = 3; + } + } + """ + start_bytes = b"".join([ + TagBytes(1, wire_format.WIRETYPE_START_GROUP), + TagBytes(2, wire_format.WIRETYPE_VARINT), + _VarintBytes(field_number), + TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)]) + end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP) + local_EncodeVarint = _EncodeVarint + + def EncodeField(write, value, deterministic): + write(start_bytes) + local_EncodeVarint(write, value.ByteSize(), deterministic) + value._InternalSerialize(write, deterministic) + return write(end_bytes) + + return EncodeField + + +# -------------------------------------------------------------------- +# As before, Map is special. + + +def MapEncoder(field_descriptor): + """Encoder for extensions of MessageSet. + + Maps always have a wire format like this: + message MapEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapEntry map = N; + """ + # Can't look at field_descriptor.message_type._concrete_class because it may + # not have been initialized yet. + message_type = field_descriptor.message_type + encode_message = MessageEncoder(field_descriptor.number, False, False) + + def EncodeField(write, value, deterministic): + value_keys = sorted(value.keys()) if deterministic else value + for key in value_keys: + entry_msg = message_type._concrete_class(key=key, value=value[key]) + encode_message(write, entry_msg, deterministic) + + return EncodeField diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/enum_type_wrapper.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/enum_type_wrapper.py new file mode 100644 index 00000000..1fa6ab9f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/enum_type_wrapper.py @@ -0,0 +1,112 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""A simple wrapper around enum types to expose utility functions. + +Instances are created as properties with the same name as the enum they wrap +on proto classes. For usage, see: + reflection_test.py +""" + +import sys + +__author__ = 'rabsatt@google.com (Kevin Rabsatt)' + + +class EnumTypeWrapper(object): + """A utility for finding the names of enum values.""" + + DESCRIPTOR = None + + # This is a type alias, which mypy typing stubs can type as + # a genericized parameter constrained to an int, allowing subclasses + # to be typed with more constraint in .pyi stubs + # Eg. + # def MyGeneratedEnum(Message): + # ValueType = NewType('ValueType', int) + # def Name(self, number: MyGeneratedEnum.ValueType) -> str + ValueType = int + + def __init__(self, enum_type): + """Inits EnumTypeWrapper with an EnumDescriptor.""" + self._enum_type = enum_type + self.DESCRIPTOR = enum_type # pylint: disable=invalid-name + + def Name(self, number): # pylint: disable=invalid-name + """Returns a string containing the name of an enum value.""" + try: + return self._enum_type.values_by_number[number].name + except KeyError: + pass # fall out to break exception chaining + + if not isinstance(number, int): + raise TypeError( + 'Enum value for {} must be an int, but got {} {!r}.'.format( + self._enum_type.name, type(number), number)) + else: + # repr here to handle the odd case when you pass in a boolean. + raise ValueError('Enum {} has no name defined for value {!r}'.format( + self._enum_type.name, number)) + + def Value(self, name): # pylint: disable=invalid-name + """Returns the value corresponding to the given enum name.""" + try: + return self._enum_type.values_by_name[name].number + except KeyError: + pass # fall out to break exception chaining + raise ValueError('Enum {} has no value defined for name {!r}'.format( + self._enum_type.name, name)) + + def keys(self): + """Return a list of the string names in the enum. + + Returns: + A list of strs, in the order they were defined in the .proto file. + """ + + return [value_descriptor.name + for value_descriptor in self._enum_type.values] + + def values(self): + """Return a list of the integer values in the enum. + + Returns: + A list of ints, in the order they were defined in the .proto file. + """ + + return [value_descriptor.number + for value_descriptor in self._enum_type.values] + + def items(self): + """Return a list of the (name, value) pairs of the enum. + + Returns: + A list of (str, int) pairs, in the order they were defined + in the .proto file. + """ + return [(value_descriptor.name, value_descriptor.number) + for value_descriptor in self._enum_type.values] + + def __getattr__(self, name): + """Returns the value corresponding to the given enum name.""" + try: + return super( + EnumTypeWrapper, + self).__getattribute__('_enum_type').values_by_name[name].number + except KeyError: + pass # fall out to break exception chaining + raise AttributeError('Enum {} has no value defined for name {!r}'.format( + self._enum_type.name, name)) + + def __or__(self, other): + """Returns the union type of self and other.""" + if sys.version_info >= (3, 10): + return type(self) | other + else: + raise NotImplementedError( + 'You may not use | on EnumTypes (or classes) below python 3.10' + ) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/extension_dict.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/extension_dict.py new file mode 100644 index 00000000..89e64d32 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/extension_dict.py @@ -0,0 +1,194 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains _ExtensionDict class to represent extensions. +""" + +from google.protobuf.internal import type_checkers +from google.protobuf.descriptor import FieldDescriptor + + +def _VerifyExtensionHandle(message, extension_handle): + """Verify that the given extension handle is valid.""" + + if not isinstance(extension_handle, FieldDescriptor): + raise KeyError('HasExtension() expects an extension handle, got: %s' % + extension_handle) + + if not extension_handle.is_extension: + raise KeyError('"%s" is not an extension.' % extension_handle.full_name) + + if not extension_handle.containing_type: + raise KeyError('"%s" is missing a containing_type.' + % extension_handle.full_name) + + if extension_handle.containing_type is not message.DESCRIPTOR: + raise KeyError('Extension "%s" extends message type "%s", but this ' + 'message is of type "%s".' % + (extension_handle.full_name, + extension_handle.containing_type.full_name, + message.DESCRIPTOR.full_name)) + + +# TODO: Unify error handling of "unknown extension" crap. +# TODO: Support iteritems()-style iteration over all +# extensions with the "has" bits turned on? +class _ExtensionDict(object): + + """Dict-like container for Extension fields on proto instances. + + Note that in all cases we expect extension handles to be + FieldDescriptors. + """ + + def __init__(self, extended_message): + """ + Args: + extended_message: Message instance for which we are the Extensions dict. + """ + self._extended_message = extended_message + + def __getitem__(self, extension_handle): + """Returns the current value of the given extension handle.""" + + _VerifyExtensionHandle(self._extended_message, extension_handle) + + result = self._extended_message._fields.get(extension_handle) + if result is not None: + return result + + if extension_handle.label == FieldDescriptor.LABEL_REPEATED: + result = extension_handle._default_constructor(self._extended_message) + elif extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + message_type = extension_handle.message_type + if not hasattr(message_type, '_concrete_class'): + # pylint: disable=g-import-not-at-top + from google.protobuf import message_factory + message_factory.GetMessageClass(message_type) + if not hasattr(extension_handle.message_type, '_concrete_class'): + from google.protobuf import message_factory + message_factory.GetMessageClass(extension_handle.message_type) + result = extension_handle.message_type._concrete_class() + try: + result._SetListener(self._extended_message._listener_for_children) + except ReferenceError: + pass + else: + # Singular scalar -- just return the default without inserting into the + # dict. + return extension_handle.default_value + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + result = self._extended_message._fields.setdefault( + extension_handle, result) + + return result + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + + my_fields = self._extended_message.ListFields() + other_fields = other._extended_message.ListFields() + + # Get rid of non-extension fields. + my_fields = [field for field in my_fields if field.is_extension] + other_fields = [field for field in other_fields if field.is_extension] + + return my_fields == other_fields + + def __ne__(self, other): + return not self == other + + def __len__(self): + fields = self._extended_message.ListFields() + # Get rid of non-extension fields. + extension_fields = [field for field in fields if field[0].is_extension] + return len(extension_fields) + + def __hash__(self): + raise TypeError('unhashable object') + + # Note that this is only meaningful for non-repeated, scalar extension + # fields. Note also that we may have to call _Modified() when we do + # successfully set a field this way, to set any necessary "has" bits in the + # ancestors of the extended message. + def __setitem__(self, extension_handle, value): + """If extension_handle specifies a non-repeated, scalar extension + field, sets the value of that field. + """ + + _VerifyExtensionHandle(self._extended_message, extension_handle) + + if (extension_handle.label == FieldDescriptor.LABEL_REPEATED or + extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE): + raise TypeError( + 'Cannot assign to extension "%s" because it is a repeated or ' + 'composite type.' % extension_handle.full_name) + + # It's slightly wasteful to lookup the type checker each time, + # but we expect this to be a vanishingly uncommon case anyway. + type_checker = type_checkers.GetTypeChecker(extension_handle) + # pylint: disable=protected-access + self._extended_message._fields[extension_handle] = ( + type_checker.CheckValue(value)) + self._extended_message._Modified() + + def __delitem__(self, extension_handle): + self._extended_message.ClearExtension(extension_handle) + + def _FindExtensionByName(self, name): + """Tries to find a known extension with the specified name. + + Args: + name: Extension full name. + + Returns: + Extension field descriptor. + """ + descriptor = self._extended_message.DESCRIPTOR + extensions = descriptor.file.pool._extensions_by_name[descriptor] + return extensions.get(name, None) + + def _FindExtensionByNumber(self, number): + """Tries to find a known extension with the field number. + + Args: + number: Extension field number. + + Returns: + Extension field descriptor. + """ + descriptor = self._extended_message.DESCRIPTOR + extensions = descriptor.file.pool._extensions_by_number[descriptor] + return extensions.get(number, None) + + def __iter__(self): + # Return a generator over the populated extension fields + return (f[0] for f in self._extended_message.ListFields() + if f[0].is_extension) + + def __contains__(self, extension_handle): + _VerifyExtensionHandle(self._extended_message, extension_handle) + + if extension_handle not in self._extended_message._fields: + return False + + if extension_handle.label == FieldDescriptor.LABEL_REPEATED: + return bool(self._extended_message._fields.get(extension_handle)) + + if extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + value = self._extended_message._fields.get(extension_handle) + # pylint: disable=protected-access + return value is not None and value._is_present_in_parent + + return True diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/field_mask.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/field_mask.py new file mode 100644 index 00000000..ae34f08a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/field_mask.py @@ -0,0 +1,310 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains FieldMask class.""" + +from google.protobuf.descriptor import FieldDescriptor + + +class FieldMask(object): + """Class for FieldMask message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts FieldMask to string according to proto3 JSON spec.""" + camelcase_paths = [] + for path in self.paths: + camelcase_paths.append(_SnakeCaseToCamelCase(path)) + return ','.join(camelcase_paths) + + def FromJsonString(self, value): + """Converts string to FieldMask according to proto3 JSON spec.""" + if not isinstance(value, str): + raise ValueError('FieldMask JSON value not a string: {!r}'.format(value)) + self.Clear() + if value: + for path in value.split(','): + self.paths.append(_CamelCaseToSnakeCase(path)) + + def IsValidForDescriptor(self, message_descriptor): + """Checks whether the FieldMask is valid for Message Descriptor.""" + for path in self.paths: + if not _IsValidPath(message_descriptor, path): + return False + return True + + def AllFieldsFromDescriptor(self, message_descriptor): + """Gets all direct fields of Message Descriptor to FieldMask.""" + self.Clear() + for field in message_descriptor.fields: + self.paths.append(field.name) + + def CanonicalFormFromMask(self, mask): + """Converts a FieldMask to the canonical form. + + Removes paths that are covered by another path. For example, + "foo.bar" is covered by "foo" and will be removed if "foo" + is also in the FieldMask. Then sorts all paths in alphabetical order. + + Args: + mask: The original FieldMask to be converted. + """ + tree = _FieldMaskTree(mask) + tree.ToFieldMask(self) + + def Union(self, mask1, mask2): + """Merges mask1 and mask2 into this FieldMask.""" + _CheckFieldMaskMessage(mask1) + _CheckFieldMaskMessage(mask2) + tree = _FieldMaskTree(mask1) + tree.MergeFromFieldMask(mask2) + tree.ToFieldMask(self) + + def Intersect(self, mask1, mask2): + """Intersects mask1 and mask2 into this FieldMask.""" + _CheckFieldMaskMessage(mask1) + _CheckFieldMaskMessage(mask2) + tree = _FieldMaskTree(mask1) + intersection = _FieldMaskTree() + for path in mask2.paths: + tree.IntersectPath(path, intersection) + intersection.ToFieldMask(self) + + def MergeMessage( + self, source, destination, + replace_message_field=False, replace_repeated_field=False): + """Merges fields specified in FieldMask from source to destination. + + Args: + source: Source message. + destination: The destination message to be merged into. + replace_message_field: Replace message field if True. Merge message + field if False. + replace_repeated_field: Replace repeated field if True. Append + elements of repeated field if False. + """ + tree = _FieldMaskTree(self) + tree.MergeMessage( + source, destination, replace_message_field, replace_repeated_field) + + +def _IsValidPath(message_descriptor, path): + """Checks whether the path is valid for Message Descriptor.""" + parts = path.split('.') + last = parts.pop() + for name in parts: + field = message_descriptor.fields_by_name.get(name) + if (field is None or + field.label == FieldDescriptor.LABEL_REPEATED or + field.type != FieldDescriptor.TYPE_MESSAGE): + return False + message_descriptor = field.message_type + return last in message_descriptor.fields_by_name + + +def _CheckFieldMaskMessage(message): + """Raises ValueError if message is not a FieldMask.""" + message_descriptor = message.DESCRIPTOR + if (message_descriptor.name != 'FieldMask' or + message_descriptor.file.name != 'google/protobuf/field_mask.proto'): + raise ValueError('Message {0} is not a FieldMask.'.format( + message_descriptor.full_name)) + + +def _SnakeCaseToCamelCase(path_name): + """Converts a path name from snake_case to camelCase.""" + result = [] + after_underscore = False + for c in path_name: + if c.isupper(): + raise ValueError( + 'Fail to print FieldMask to Json string: Path name ' + '{0} must not contain uppercase letters.'.format(path_name)) + if after_underscore: + if c.islower(): + result.append(c.upper()) + after_underscore = False + else: + raise ValueError( + 'Fail to print FieldMask to Json string: The ' + 'character after a "_" must be a lowercase letter ' + 'in path name {0}.'.format(path_name)) + elif c == '_': + after_underscore = True + else: + result += c + + if after_underscore: + raise ValueError('Fail to print FieldMask to Json string: Trailing "_" ' + 'in path name {0}.'.format(path_name)) + return ''.join(result) + + +def _CamelCaseToSnakeCase(path_name): + """Converts a field name from camelCase to snake_case.""" + result = [] + for c in path_name: + if c == '_': + raise ValueError('Fail to parse FieldMask: Path name ' + '{0} must not contain "_"s.'.format(path_name)) + if c.isupper(): + result += '_' + result += c.lower() + else: + result += c + return ''.join(result) + + +class _FieldMaskTree(object): + """Represents a FieldMask in a tree structure. + + For example, given a FieldMask "foo.bar,foo.baz,bar.baz", + the FieldMaskTree will be: + [_root] -+- foo -+- bar + | | + | +- baz + | + +- bar --- baz + In the tree, each leaf node represents a field path. + """ + + __slots__ = ('_root',) + + def __init__(self, field_mask=None): + """Initializes the tree by FieldMask.""" + self._root = {} + if field_mask: + self.MergeFromFieldMask(field_mask) + + def MergeFromFieldMask(self, field_mask): + """Merges a FieldMask to the tree.""" + for path in field_mask.paths: + self.AddPath(path) + + def AddPath(self, path): + """Adds a field path into the tree. + + If the field path to add is a sub-path of an existing field path + in the tree (i.e., a leaf node), it means the tree already matches + the given path so nothing will be added to the tree. If the path + matches an existing non-leaf node in the tree, that non-leaf node + will be turned into a leaf node with all its children removed because + the path matches all the node's children. Otherwise, a new path will + be added. + + Args: + path: The field path to add. + """ + node = self._root + for name in path.split('.'): + if name not in node: + node[name] = {} + elif not node[name]: + # Pre-existing empty node implies we already have this entire tree. + return + node = node[name] + # Remove any sub-trees we might have had. + node.clear() + + def ToFieldMask(self, field_mask): + """Converts the tree to a FieldMask.""" + field_mask.Clear() + _AddFieldPaths(self._root, '', field_mask) + + def IntersectPath(self, path, intersection): + """Calculates the intersection part of a field path with this tree. + + Args: + path: The field path to calculates. + intersection: The out tree to record the intersection part. + """ + node = self._root + for name in path.split('.'): + if name not in node: + return + elif not node[name]: + intersection.AddPath(path) + return + node = node[name] + intersection.AddLeafNodes(path, node) + + def AddLeafNodes(self, prefix, node): + """Adds leaf nodes begin with prefix to this tree.""" + if not node: + self.AddPath(prefix) + for name in node: + child_path = prefix + '.' + name + self.AddLeafNodes(child_path, node[name]) + + def MergeMessage( + self, source, destination, + replace_message, replace_repeated): + """Merge all fields specified by this tree from source to destination.""" + _MergeMessage( + self._root, source, destination, replace_message, replace_repeated) + + +def _StrConvert(value): + """Converts value to str if it is not.""" + # This file is imported by c extension and some methods like ClearField + # requires string for the field name. py2/py3 has different text + # type and may use unicode. + if not isinstance(value, str): + return value.encode('utf-8') + return value + + +def _MergeMessage( + node, source, destination, replace_message, replace_repeated): + """Merge all fields specified by a sub-tree from source to destination.""" + source_descriptor = source.DESCRIPTOR + for name in node: + child = node[name] + field = source_descriptor.fields_by_name[name] + if field is None: + raise ValueError('Error: Can\'t find field {0} in message {1}.'.format( + name, source_descriptor.full_name)) + if child: + # Sub-paths are only allowed for singular message fields. + if (field.label == FieldDescriptor.LABEL_REPEATED or + field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE): + raise ValueError('Error: Field {0} in message {1} is not a singular ' + 'message field and cannot have sub-fields.'.format( + name, source_descriptor.full_name)) + if source.HasField(name): + _MergeMessage( + child, getattr(source, name), getattr(destination, name), + replace_message, replace_repeated) + continue + if field.label == FieldDescriptor.LABEL_REPEATED: + if replace_repeated: + destination.ClearField(_StrConvert(name)) + repeated_source = getattr(source, name) + repeated_destination = getattr(destination, name) + repeated_destination.MergeFrom(repeated_source) + else: + if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + if replace_message: + destination.ClearField(_StrConvert(name)) + if source.HasField(name): + getattr(destination, name).MergeFrom(getattr(source, name)) + else: + setattr(destination, name, getattr(source, name)) + + +def _AddFieldPaths(node, prefix, field_mask): + """Adds the field paths descended from node to field_mask.""" + if not node and prefix: + field_mask.paths.append(prefix) + return + for name in sorted(node): + if prefix: + child_path = prefix + '.' + name + else: + child_path = name + _AddFieldPaths(node[name], child_path, field_mask) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/message_listener.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/message_listener.py new file mode 100644 index 00000000..ff1c127d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/message_listener.py @@ -0,0 +1,55 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Defines a listener interface for observing certain +state transitions on Message objects. + +Also defines a null implementation of this interface. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + + +class MessageListener(object): + + """Listens for modifications made to a message. Meant to be registered via + Message._SetListener(). + + Attributes: + dirty: If True, then calling Modified() would be a no-op. This can be + used to avoid these calls entirely in the common case. + """ + + def Modified(self): + """Called every time the message is modified in such a way that the parent + message may need to be updated. This currently means either: + (a) The message was modified for the first time, so the parent message + should henceforth mark the message as present. + (b) The message's cached byte size became dirty -- i.e. the message was + modified for the first time after a previous call to ByteSize(). + Therefore the parent should also mark its byte size as dirty. + Note that (a) implies (b), since new objects start out with a client cached + size (zero). However, we document (a) explicitly because it is important. + + Modified() will *only* be called in response to one of these two events -- + not every time the sub-message is modified. + + Note that if the listener's |dirty| attribute is true, then calling + Modified at the moment would be a no-op, so it can be skipped. Performance- + sensitive callers should check this attribute directly before calling since + it will be true most of the time. + """ + + raise NotImplementedError + + +class NullMessageListener(object): + + """No-op MessageListener implementation.""" + + def Modified(self): + pass diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/python_edition_defaults.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/python_edition_defaults.py new file mode 100644 index 00000000..cf1feb4a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/python_edition_defaults.py @@ -0,0 +1,5 @@ +""" +This file contains the serialized FeatureSetDefaults object corresponding to +the Pure Python runtime. This is used for feature resolution under Editions. +""" +_PROTOBUF_INTERNAL_PYTHON_EDITION_DEFAULTS = b"\n\023\030\204\007\"\000*\014\010\001\020\002\030\002 \003(\0010\002\n\023\030\347\007\"\000*\014\010\002\020\001\030\001 \002(\0010\001\n\023\030\350\007\"\014\010\001\020\001\030\001 \002(\0010\001*\000 \346\007(\350\007" diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/python_message.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/python_message.py new file mode 100644 index 00000000..1f901688 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/python_message.py @@ -0,0 +1,1583 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# This code is meant to work on Python 2.4 and above only. +# +# TODO: Helpers for verbose, common checks like seeing if a +# descriptor's cpp_type is CPPTYPE_MESSAGE. + +"""Contains a metaclass and helper functions used to create +protocol message classes from Descriptor objects at runtime. + +Recall that a metaclass is the "type" of a class. +(A class is to a metaclass what an instance is to a class.) + +In this case, we use the GeneratedProtocolMessageType metaclass +to inject all the useful functionality into the classes +output by the protocol compiler at compile-time. + +The upshot of all this is that the real implementation +details for ALL pure-Python protocol buffers are *here in +this file*. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import datetime +from io import BytesIO +import struct +import sys +import warnings +import weakref + +from google.protobuf import descriptor as descriptor_mod +from google.protobuf import message as message_mod +from google.protobuf import text_format +# We use "as" to avoid name collisions with variables. +from google.protobuf.internal import api_implementation +from google.protobuf.internal import containers +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.internal import enum_type_wrapper +from google.protobuf.internal import extension_dict +from google.protobuf.internal import message_listener as message_listener_mod +from google.protobuf.internal import type_checkers +from google.protobuf.internal import well_known_types +from google.protobuf.internal import wire_format + +_FieldDescriptor = descriptor_mod.FieldDescriptor +_AnyFullTypeName = 'google.protobuf.Any' +_StructFullTypeName = 'google.protobuf.Struct' +_ListValueFullTypeName = 'google.protobuf.ListValue' +_ExtensionDict = extension_dict._ExtensionDict + +class GeneratedProtocolMessageType(type): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + We add implementations for all methods described in the Message class. We + also create properties to allow getting/setting all fields in the protocol + message. Finally, we create slots to prevent users from accidentally + "setting" nonexistent fields in the protocol message, which then wouldn't get + serialized / deserialized properly. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = message_factory.GetMessageClass(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __new__(cls, name, bases, dictionary): + """Custom allocation for runtime-generated class types. + + We override __new__ because this is apparently the only place + where we can meaningfully set __slots__ on the class we're creating(?). + (The interplay between metaclasses and slots is not very well-documented). + + Args: + name: Name of the class (ignored, but required by the + metaclass protocol). + bases: Base classes of the class we're constructing. + (Should be message.Message). We ignore this field, but + it's required by the metaclass protocol + dictionary: The class dictionary of the class we're + constructing. dictionary[_DESCRIPTOR_KEY] must contain + a Descriptor object describing this protocol message + type. + + Returns: + Newly-allocated class. + + Raises: + RuntimeError: Generated code only work with python cpp extension. + """ + descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] + + if isinstance(descriptor, str): + raise RuntimeError('The generated code only work with python cpp ' + 'extension, but it is using pure python runtime.') + + # If a concrete class already exists for this descriptor, don't try to + # create another. Doing so will break any messages that already exist with + # the existing class. + # + # The C++ implementation appears to have its own internal `PyMessageFactory` + # to achieve similar results. + # + # This most commonly happens in `text_format.py` when using descriptors from + # a custom pool; it calls message_factory.GetMessageClass() on a + # descriptor which already has an existing concrete class. + new_class = getattr(descriptor, '_concrete_class', None) + if new_class: + return new_class + + if descriptor.full_name in well_known_types.WKTBASES: + bases += (well_known_types.WKTBASES[descriptor.full_name],) + _AddClassAttributesForNestedExtensions(descriptor, dictionary) + _AddSlots(descriptor, dictionary) + + superclass = super(GeneratedProtocolMessageType, cls) + new_class = superclass.__new__(cls, name, bases, dictionary) + return new_class + + def __init__(cls, name, bases, dictionary): + """Here we perform the majority of our work on the class. + We add enum getters, an __init__ method, implementations + of all Message methods, and properties for all fields + in the protocol type. + + Args: + name: Name of the class (ignored, but required by the + metaclass protocol). + bases: Base classes of the class we're constructing. + (Should be message.Message). We ignore this field, but + it's required by the metaclass protocol + dictionary: The class dictionary of the class we're + constructing. dictionary[_DESCRIPTOR_KEY] must contain + a Descriptor object describing this protocol message + type. + """ + descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] + + # If this is an _existing_ class looked up via `_concrete_class` in the + # __new__ method above, then we don't need to re-initialize anything. + existing_class = getattr(descriptor, '_concrete_class', None) + if existing_class: + assert existing_class is cls, ( + 'Duplicate `GeneratedProtocolMessageType` created for descriptor %r' + % (descriptor.full_name)) + return + + cls._message_set_decoders_by_tag = {} + cls._fields_by_tag = {} + if (descriptor.has_options and + descriptor.GetOptions().message_set_wire_format): + cls._message_set_decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = ( + decoder.MessageSetItemDecoder(descriptor), + None, + ) + + # Attach stuff to each FieldDescriptor for quick lookup later on. + for field in descriptor.fields: + _AttachFieldHelpers(cls, field) + + if descriptor.is_extendable and hasattr(descriptor.file, 'pool'): + extensions = descriptor.file.pool.FindAllExtensions(descriptor) + for ext in extensions: + _AttachFieldHelpers(cls, ext) + + descriptor._concrete_class = cls # pylint: disable=protected-access + _AddEnumValues(descriptor, cls) + _AddInitMethod(descriptor, cls) + _AddPropertiesForFields(descriptor, cls) + _AddPropertiesForExtensions(descriptor, cls) + _AddStaticMethods(cls) + _AddMessageMethods(descriptor, cls) + _AddPrivateHelperMethods(descriptor, cls) + + superclass = super(GeneratedProtocolMessageType, cls) + superclass.__init__(name, bases, dictionary) + + +# Stateless helpers for GeneratedProtocolMessageType below. +# Outside clients should not access these directly. +# +# I opted not to make any of these methods on the metaclass, to make it more +# clear that I'm not really using any state there and to keep clients from +# thinking that they have direct access to these construction helpers. + + +def _PropertyName(proto_field_name): + """Returns the name of the public property attribute which + clients can use to get and (in some cases) set the value + of a protocol message field. + + Args: + proto_field_name: The protocol message field name, exactly + as it appears (or would appear) in a .proto file. + """ + # TODO: Escape Python keywords (e.g., yield), and test this support. + # nnorwitz makes my day by writing: + # """ + # FYI. See the keyword module in the stdlib. This could be as simple as: + # + # if keyword.iskeyword(proto_field_name): + # return proto_field_name + "_" + # return proto_field_name + # """ + # Kenton says: The above is a BAD IDEA. People rely on being able to use + # getattr() and setattr() to reflectively manipulate field values. If we + # rename the properties, then every such user has to also make sure to apply + # the same transformation. Note that currently if you name a field "yield", + # you can still access it just fine using getattr/setattr -- it's not even + # that cumbersome to do so. + # TODO: Remove this method entirely if/when everyone agrees with my + # position. + return proto_field_name + + +def _AddSlots(message_descriptor, dictionary): + """Adds a __slots__ entry to dictionary, containing the names of all valid + attributes for this message type. + + Args: + message_descriptor: A Descriptor instance describing this message type. + dictionary: Class dictionary to which we'll add a '__slots__' entry. + """ + dictionary['__slots__'] = ['_cached_byte_size', + '_cached_byte_size_dirty', + '_fields', + '_unknown_fields', + '_is_present_in_parent', + '_listener', + '_listener_for_children', + '__weakref__', + '_oneofs'] + + +def _IsMessageSetExtension(field): + return (field.is_extension and + field.containing_type.has_options and + field.containing_type.GetOptions().message_set_wire_format and + field.type == _FieldDescriptor.TYPE_MESSAGE and + field.label == _FieldDescriptor.LABEL_OPTIONAL) + + +def _IsMapField(field): + return (field.type == _FieldDescriptor.TYPE_MESSAGE and + field.message_type._is_map_entry) + + +def _IsMessageMapField(field): + value_type = field.message_type.fields_by_name['value'] + return value_type.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE + +def _AttachFieldHelpers(cls, field_descriptor): + is_repeated = field_descriptor.label == _FieldDescriptor.LABEL_REPEATED + field_descriptor._default_constructor = _DefaultValueConstructorForField( + field_descriptor + ) + + def AddFieldByTag(wiretype, is_packed): + tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype) + cls._fields_by_tag[tag_bytes] = (field_descriptor, is_packed) + + AddFieldByTag( + type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type], False + ) + + if is_repeated and wire_format.IsTypePackable(field_descriptor.type): + # To support wire compatibility of adding packed = true, add a decoder for + # packed values regardless of the field's options. + AddFieldByTag(wire_format.WIRETYPE_LENGTH_DELIMITED, True) + + +def _MaybeAddEncoder(cls, field_descriptor): + if hasattr(field_descriptor, '_encoder'): + return + is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED) + is_map_entry = _IsMapField(field_descriptor) + is_packed = field_descriptor.is_packed + + if is_map_entry: + field_encoder = encoder.MapEncoder(field_descriptor) + sizer = encoder.MapSizer(field_descriptor, + _IsMessageMapField(field_descriptor)) + elif _IsMessageSetExtension(field_descriptor): + field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number) + sizer = encoder.MessageSetItemSizer(field_descriptor.number) + else: + field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type]( + field_descriptor.number, is_repeated, is_packed) + sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type]( + field_descriptor.number, is_repeated, is_packed) + + field_descriptor._sizer = sizer + field_descriptor._encoder = field_encoder + + +def _MaybeAddDecoder(cls, field_descriptor): + if hasattr(field_descriptor, '_decoders'): + return + + is_repeated = field_descriptor.label == _FieldDescriptor.LABEL_REPEATED + is_map_entry = _IsMapField(field_descriptor) + helper_decoders = {} + + def AddDecoder(is_packed): + decode_type = field_descriptor.type + if (decode_type == _FieldDescriptor.TYPE_ENUM and + not field_descriptor.enum_type.is_closed): + decode_type = _FieldDescriptor.TYPE_INT32 + + oneof_descriptor = None + if field_descriptor.containing_oneof is not None: + oneof_descriptor = field_descriptor + + if is_map_entry: + is_message_map = _IsMessageMapField(field_descriptor) + + field_decoder = decoder.MapDecoder( + field_descriptor, _GetInitializeDefaultForMap(field_descriptor), + is_message_map) + elif decode_type == _FieldDescriptor.TYPE_STRING: + field_decoder = decoder.StringDecoder( + field_descriptor.number, is_repeated, is_packed, + field_descriptor, field_descriptor._default_constructor, + not field_descriptor.has_presence) + elif field_descriptor.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + field_decoder = type_checkers.TYPE_TO_DECODER[decode_type]( + field_descriptor.number, is_repeated, is_packed, + field_descriptor, field_descriptor._default_constructor) + else: + field_decoder = type_checkers.TYPE_TO_DECODER[decode_type]( + field_descriptor.number, is_repeated, is_packed, + # pylint: disable=protected-access + field_descriptor, field_descriptor._default_constructor, + not field_descriptor.has_presence) + + helper_decoders[is_packed] = field_decoder + + AddDecoder(False) + + if is_repeated and wire_format.IsTypePackable(field_descriptor.type): + # To support wire compatibility of adding packed = true, add a decoder for + # packed values regardless of the field's options. + AddDecoder(True) + + field_descriptor._decoders = helper_decoders + + +def _AddClassAttributesForNestedExtensions(descriptor, dictionary): + extensions = descriptor.extensions_by_name + for extension_name, extension_field in extensions.items(): + assert extension_name not in dictionary + dictionary[extension_name] = extension_field + + +def _AddEnumValues(descriptor, cls): + """Sets class-level attributes for all enum fields defined in this message. + + Also exporting a class-level object that can name enum values. + + Args: + descriptor: Descriptor object for this message type. + cls: Class we're constructing for this message type. + """ + for enum_type in descriptor.enum_types: + setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type)) + for enum_value in enum_type.values: + setattr(cls, enum_value.name, enum_value.number) + + +def _GetInitializeDefaultForMap(field): + if field.label != _FieldDescriptor.LABEL_REPEATED: + raise ValueError('map_entry set on non-repeated field %s' % ( + field.name)) + fields_by_name = field.message_type.fields_by_name + key_checker = type_checkers.GetTypeChecker(fields_by_name['key']) + + value_field = fields_by_name['value'] + if _IsMessageMapField(field): + def MakeMessageMapDefault(message): + return containers.MessageMap( + message._listener_for_children, value_field.message_type, key_checker, + field.message_type) + return MakeMessageMapDefault + else: + value_checker = type_checkers.GetTypeChecker(value_field) + def MakePrimitiveMapDefault(message): + return containers.ScalarMap( + message._listener_for_children, key_checker, value_checker, + field.message_type) + return MakePrimitiveMapDefault + +def _DefaultValueConstructorForField(field): + """Returns a function which returns a default value for a field. + + Args: + field: FieldDescriptor object for this field. + + The returned function has one argument: + message: Message instance containing this field, or a weakref proxy + of same. + + That function in turn returns a default value for this field. The default + value may refer back to |message| via a weak reference. + """ + + if _IsMapField(field): + return _GetInitializeDefaultForMap(field) + + if field.label == _FieldDescriptor.LABEL_REPEATED: + if field.has_default_value and field.default_value != []: + raise ValueError('Repeated field default value not empty list: %s' % ( + field.default_value)) + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + # We can't look at _concrete_class yet since it might not have + # been set. (Depends on order in which we initialize the classes). + message_type = field.message_type + def MakeRepeatedMessageDefault(message): + return containers.RepeatedCompositeFieldContainer( + message._listener_for_children, field.message_type) + return MakeRepeatedMessageDefault + else: + type_checker = type_checkers.GetTypeChecker(field) + def MakeRepeatedScalarDefault(message): + return containers.RepeatedScalarFieldContainer( + message._listener_for_children, type_checker) + return MakeRepeatedScalarDefault + + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + message_type = field.message_type + def MakeSubMessageDefault(message): + # _concrete_class may not yet be initialized. + if not hasattr(message_type, '_concrete_class'): + from google.protobuf import message_factory + message_factory.GetMessageClass(message_type) + result = message_type._concrete_class() + result._SetListener( + _OneofListener(message, field) + if field.containing_oneof is not None + else message._listener_for_children) + return result + return MakeSubMessageDefault + + def MakeScalarDefault(message): + # TODO: This may be broken since there may not be + # default_value. Combine with has_default_value somehow. + return field.default_value + return MakeScalarDefault + + +def _ReraiseTypeErrorWithFieldName(message_name, field_name): + """Re-raise the currently-handled TypeError with the field name added.""" + exc = sys.exc_info()[1] + if len(exc.args) == 1 and type(exc) is TypeError: + # simple TypeError; add field name to exception message + exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name)) + + # re-raise possibly-amended exception with original traceback: + raise exc.with_traceback(sys.exc_info()[2]) + + +def _AddInitMethod(message_descriptor, cls): + """Adds an __init__ method to cls.""" + + def _GetIntegerEnumValue(enum_type, value): + """Convert a string or integer enum value to an integer. + + If the value is a string, it is converted to the enum value in + enum_type with the same name. If the value is not a string, it's + returned as-is. (No conversion or bounds-checking is done.) + """ + if isinstance(value, str): + try: + return enum_type.values_by_name[value].number + except KeyError: + raise ValueError('Enum type %s: unknown label "%s"' % ( + enum_type.full_name, value)) + return value + + def init(self, **kwargs): + self._cached_byte_size = 0 + self._cached_byte_size_dirty = len(kwargs) > 0 + self._fields = {} + # Contains a mapping from oneof field descriptors to the descriptor + # of the currently set field in that oneof field. + self._oneofs = {} + + # _unknown_fields is () when empty for efficiency, and will be turned into + # a list if fields are added. + self._unknown_fields = () + self._is_present_in_parent = False + self._listener = message_listener_mod.NullMessageListener() + self._listener_for_children = _Listener(self) + for field_name, field_value in kwargs.items(): + field = _GetFieldByName(message_descriptor, field_name) + if field is None: + raise TypeError('%s() got an unexpected keyword argument "%s"' % + (message_descriptor.name, field_name)) + if field_value is None: + # field=None is the same as no field at all. + continue + if field.label == _FieldDescriptor.LABEL_REPEATED: + field_copy = field._default_constructor(self) + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite + if _IsMapField(field): + if _IsMessageMapField(field): + for key in field_value: + field_copy[key].MergeFrom(field_value[key]) + else: + field_copy.update(field_value) + else: + for val in field_value: + if isinstance(val, dict): + field_copy.add(**val) + else: + field_copy.add().MergeFrom(val) + else: # Scalar + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + field_value = [_GetIntegerEnumValue(field.enum_type, val) + for val in field_value] + field_copy.extend(field_value) + self._fields[field] = field_copy + elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + field_copy = field._default_constructor(self) + new_val = None + if isinstance(field_value, message_mod.Message): + new_val = field_value + elif isinstance(field_value, dict): + if field.message_type.full_name == _StructFullTypeName: + field_copy.Clear() + if len(field_value) == 1 and 'fields' in field_value: + try: + field_copy.update(field_value) + except: + # Fall back to init normal message field + field_copy.Clear() + new_val = field.message_type._concrete_class(**field_value) + else: + field_copy.update(field_value) + else: + new_val = field.message_type._concrete_class(**field_value) + elif hasattr(field_copy, '_internal_assign'): + field_copy._internal_assign(field_value) + else: + raise TypeError( + 'Message field {0}.{1} must be initialized with a ' + 'dict or instance of same class, got {2}.'.format( + message_descriptor.name, + field_name, + type(field_value).__name__, + ) + ) + + if new_val != None: + try: + field_copy.MergeFrom(new_val) + except TypeError: + _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) + self._fields[field] = field_copy + else: + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + field_value = _GetIntegerEnumValue(field.enum_type, field_value) + try: + setattr(self, field_name, field_value) + except TypeError: + _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) + + init.__module__ = None + init.__doc__ = None + cls.__init__ = init + + +def _GetFieldByName(message_descriptor, field_name): + """Returns a field descriptor by field name. + + Args: + message_descriptor: A Descriptor describing all fields in message. + field_name: The name of the field to retrieve. + Returns: + The field descriptor associated with the field name. + """ + try: + return message_descriptor.fields_by_name[field_name] + except KeyError: + raise ValueError('Protocol message %s has no "%s" field.' % + (message_descriptor.name, field_name)) + + +def _AddPropertiesForFields(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + for field in descriptor.fields: + _AddPropertiesForField(field, cls) + + if descriptor.is_extendable: + # _ExtensionDict is just an adaptor with no state so we allocate a new one + # every time it is accessed. + cls.Extensions = property(lambda self: _ExtensionDict(self)) + + +def _AddPropertiesForField(field, cls): + """Adds a public property for a protocol message field. + Clients can use this property to get and (in the case + of non-repeated scalar fields) directly set the value + of a protocol message field. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + # Catch it if we add other types that we should + # handle specially here. + assert _FieldDescriptor.MAX_CPPTYPE == 10 + + constant_name = field.name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, field.number) + + if field.label == _FieldDescriptor.LABEL_REPEATED: + _AddPropertiesForRepeatedField(field, cls) + elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + _AddPropertiesForNonRepeatedCompositeField(field, cls) + else: + _AddPropertiesForNonRepeatedScalarField(field, cls) + + +class _FieldProperty(property): + __slots__ = ('DESCRIPTOR',) + + def __init__(self, descriptor, getter, setter, doc): + property.__init__(self, getter, setter, doc=doc) + self.DESCRIPTOR = descriptor + + +def _AddPropertiesForRepeatedField(field, cls): + """Adds a public property for a "repeated" protocol message field. Clients + can use this property to get the value of the field, which will be either a + RepeatedScalarFieldContainer or RepeatedCompositeFieldContainer (see + below). + + Note that when clients add values to these containers, we perform + type-checking in the case of repeated scalar fields, and we also set any + necessary "has" bits as a side-effect. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + + def getter(self): + field_value = self._fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + field_value = self._fields.setdefault(field, field_value) + return field_value + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + # We define a setter just so we can throw an exception with a more + # helpful error message. + def setter(self, new_value): + raise AttributeError('Assignment not allowed to repeated field ' + '"%s" in protocol message object.' % proto_field_name) + + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForNonRepeatedScalarField(field, cls): + """Adds a public property for a nonrepeated, scalar protocol message field. + Clients can use this property to get and directly set the value of the field. + Note that when the client sets the value of a field by using this property, + all necessary "has" bits are set as a side-effect, and we also perform + type-checking. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + type_checker = type_checkers.GetTypeChecker(field) + default_value = field.default_value + + def getter(self): + # TODO: This may be broken since there may not be + # default_value. Combine with has_default_value somehow. + return self._fields.get(field, default_value) + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + def field_setter(self, new_value): + # pylint: disable=protected-access + # Testing the value for truthiness captures all of the proto3 defaults + # (0, 0.0, enum 0, and False). + try: + new_value = type_checker.CheckValue(new_value) + except TypeError as e: + raise TypeError( + 'Cannot set %s to %.1024r: %s' % (field.full_name, new_value, e)) + if not field.has_presence and not new_value: + self._fields.pop(field, None) + else: + self._fields[field] = new_value + # Check _cached_byte_size_dirty inline to improve performance, since scalar + # setters are called frequently. + if not self._cached_byte_size_dirty: + self._Modified() + + if field.containing_oneof: + def setter(self, new_value): + field_setter(self, new_value) + self._UpdateOneofState(field) + else: + setter = field_setter + + setter.__module__ = None + setter.__doc__ = 'Setter for %s.' % proto_field_name + + # Add a property to encapsulate the getter/setter. + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForNonRepeatedCompositeField(field, cls): + """Adds a public property for a nonrepeated, composite protocol message field. + A composite field is a "group" or "message" field. + + Clients can use this property to get the value of the field, but cannot + assign to the property directly. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + # TODO: Remove duplication with similar method + # for non-repeated scalars. + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + + def getter(self): + field_value = self._fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + field_value = self._fields.setdefault(field, field_value) + return field_value + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + # We define a setter just so we can throw an exception with a more + # helpful error message. + def setter(self, new_value): + if field.message_type.full_name == 'google.protobuf.Timestamp': + getter(self) + self._fields[field].FromDatetime(new_value) + elif field.message_type.full_name == 'google.protobuf.Duration': + getter(self) + self._fields[field].FromTimedelta(new_value) + elif field.message_type.full_name == _StructFullTypeName: + getter(self) + self._fields[field].Clear() + self._fields[field].update(new_value) + elif field.message_type.full_name == _ListValueFullTypeName: + getter(self) + self._fields[field].Clear() + self._fields[field].extend(new_value) + else: + raise AttributeError( + 'Assignment not allowed to composite field ' + '"%s" in protocol message object.' % proto_field_name + ) + + # Add a property to encapsulate the getter. + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForExtensions(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + extensions = descriptor.extensions_by_name + for extension_name, extension_field in extensions.items(): + constant_name = extension_name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, extension_field.number) + + # TODO: Migrate all users of these attributes to functions like + # pool.FindExtensionByNumber(descriptor). + if descriptor.file is not None: + # TODO: Use cls.MESSAGE_FACTORY.pool when available. + pool = descriptor.file.pool + +def _AddStaticMethods(cls): + def FromString(s): + message = cls() + message.MergeFromString(s) + return message + cls.FromString = staticmethod(FromString) + + +def _IsPresent(item): + """Given a (FieldDescriptor, value) tuple from _fields, return true if the + value should be included in the list returned by ListFields().""" + + if item[0].label == _FieldDescriptor.LABEL_REPEATED: + return bool(item[1]) + elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + return item[1]._is_present_in_parent + else: + return True + + +def _AddListFieldsMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def ListFields(self): + all_fields = [item for item in self._fields.items() if _IsPresent(item)] + all_fields.sort(key = lambda item: item[0].number) + return all_fields + + cls.ListFields = ListFields + + +def _AddHasFieldMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + hassable_fields = {} + for field in message_descriptor.fields: + if field.label == _FieldDescriptor.LABEL_REPEATED: + continue + # For proto3, only submessages and fields inside a oneof have presence. + if not field.has_presence: + continue + hassable_fields[field.name] = field + + # Has methods are supported for oneof descriptors. + for oneof in message_descriptor.oneofs: + hassable_fields[oneof.name] = oneof + + def HasField(self, field_name): + try: + field = hassable_fields[field_name] + except KeyError as exc: + raise ValueError('Protocol message %s has no non-repeated field "%s" ' + 'nor has presence is not available for this field.' % ( + message_descriptor.full_name, field_name)) from exc + + if isinstance(field, descriptor_mod.OneofDescriptor): + try: + return HasField(self, self._oneofs[field].name) + except KeyError: + return False + else: + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + value = self._fields.get(field) + return value is not None and value._is_present_in_parent + else: + return field in self._fields + + cls.HasField = HasField + + +def _AddClearFieldMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def ClearField(self, field_name): + try: + field = message_descriptor.fields_by_name[field_name] + except KeyError: + try: + field = message_descriptor.oneofs_by_name[field_name] + if field in self._oneofs: + field = self._oneofs[field] + else: + return + except KeyError: + raise ValueError('Protocol message %s has no "%s" field.' % + (message_descriptor.name, field_name)) + + if field in self._fields: + # To match the C++ implementation, we need to invalidate iterators + # for map fields when ClearField() happens. + if hasattr(self._fields[field], 'InvalidateIterators'): + self._fields[field].InvalidateIterators() + + # Note: If the field is a sub-message, its listener will still point + # at us. That's fine, because the worst than can happen is that it + # will call _Modified() and invalidate our byte size. Big deal. + del self._fields[field] + + if self._oneofs.get(field.containing_oneof, None) is field: + del self._oneofs[field.containing_oneof] + + # Always call _Modified() -- even if nothing was changed, this is + # a mutating method, and thus calling it should cause the field to become + # present in the parent message. + self._Modified() + + cls.ClearField = ClearField + + +def _AddClearExtensionMethod(cls): + """Helper for _AddMessageMethods().""" + def ClearExtension(self, field_descriptor): + extension_dict._VerifyExtensionHandle(self, field_descriptor) + + # Similar to ClearField(), above. + if field_descriptor in self._fields: + del self._fields[field_descriptor] + self._Modified() + cls.ClearExtension = ClearExtension + + +def _AddHasExtensionMethod(cls): + """Helper for _AddMessageMethods().""" + def HasExtension(self, field_descriptor): + extension_dict._VerifyExtensionHandle(self, field_descriptor) + if field_descriptor.label == _FieldDescriptor.LABEL_REPEATED: + raise KeyError('"%s" is repeated.' % field_descriptor.full_name) + + if field_descriptor.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + value = self._fields.get(field_descriptor) + return value is not None and value._is_present_in_parent + else: + return field_descriptor in self._fields + cls.HasExtension = HasExtension + +def _InternalUnpackAny(msg): + """Unpacks Any message and returns the unpacked message. + + This internal method is different from public Any Unpack method which takes + the target message as argument. _InternalUnpackAny method does not have + target message type and need to find the message type in descriptor pool. + + Args: + msg: An Any message to be unpacked. + + Returns: + The unpacked message. + """ + # TODO: Don't use the factory of generated messages. + # To make Any work with custom factories, use the message factory of the + # parent message. + # pylint: disable=g-import-not-at-top + from google.protobuf import symbol_database + factory = symbol_database.Default() + + type_url = msg.type_url + + if not type_url: + return None + + # TODO: For now we just strip the hostname. Better logic will be + # required. + type_name = type_url.split('/')[-1] + descriptor = factory.pool.FindMessageTypeByName(type_name) + + if descriptor is None: + return None + + # Unable to import message_factory at top because of circular import. + # pylint: disable=g-import-not-at-top + from google.protobuf import message_factory + message_class = message_factory.GetMessageClass(descriptor) + message = message_class() + + message.ParseFromString(msg.value) + return message + + +def _AddEqualsMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __eq__(self, other): + if self.DESCRIPTOR.full_name == _ListValueFullTypeName and isinstance( + other, list + ): + return self._internal_compare(other) + if self.DESCRIPTOR.full_name == _StructFullTypeName and isinstance( + other, dict + ): + return self._internal_compare(other) + + if (not isinstance(other, message_mod.Message) or + other.DESCRIPTOR != self.DESCRIPTOR): + return NotImplemented + + if self is other: + return True + + if self.DESCRIPTOR.full_name == _AnyFullTypeName: + any_a = _InternalUnpackAny(self) + any_b = _InternalUnpackAny(other) + if any_a and any_b: + return any_a == any_b + + if not self.ListFields() == other.ListFields(): + return False + + # TODO: Fix UnknownFieldSet to consider MessageSet extensions, + # then use it for the comparison. + unknown_fields = list(self._unknown_fields) + unknown_fields.sort() + other_unknown_fields = list(other._unknown_fields) + other_unknown_fields.sort() + return unknown_fields == other_unknown_fields + + cls.__eq__ = __eq__ + + +def _AddStrMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __str__(self): + return text_format.MessageToString(self) + cls.__str__ = __str__ + + +def _AddReprMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __repr__(self): + return text_format.MessageToString(self) + cls.__repr__ = __repr__ + + +def _AddUnicodeMethod(unused_message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def __unicode__(self): + return text_format.MessageToString(self, as_utf8=True).decode('utf-8') + cls.__unicode__ = __unicode__ + + +def _AddContainsMethod(message_descriptor, cls): + + if message_descriptor.full_name == 'google.protobuf.Struct': + def __contains__(self, key): + return key in self.fields + elif message_descriptor.full_name == 'google.protobuf.ListValue': + def __contains__(self, value): + return value in self.items() + else: + def __contains__(self, field): + return self.HasField(field) + + cls.__contains__ = __contains__ + + +def _BytesForNonRepeatedElement(value, field_number, field_type): + """Returns the number of bytes needed to serialize a non-repeated element. + The returned byte count includes space for tag information and any + other additional space associated with serializing value. + + Args: + value: Value we're serializing. + field_number: Field number of this value. (Since the field number + is stored as part of a varint-encoded tag, this has an impact + on the total bytes required to serialize the value). + field_type: The type of the field. One of the TYPE_* constants + within FieldDescriptor. + """ + try: + fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type] + return fn(field_number, value) + except KeyError: + raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) + + +def _AddByteSizeMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def ByteSize(self): + if not self._cached_byte_size_dirty: + return self._cached_byte_size + + size = 0 + descriptor = self.DESCRIPTOR + if descriptor._is_map_entry: + # Fields of map entry should always be serialized. + key_field = descriptor.fields_by_name['key'] + _MaybeAddEncoder(cls, key_field) + size = key_field._sizer(self.key) + value_field = descriptor.fields_by_name['value'] + _MaybeAddEncoder(cls, value_field) + size += value_field._sizer(self.value) + else: + for field_descriptor, field_value in self.ListFields(): + _MaybeAddEncoder(cls, field_descriptor) + size += field_descriptor._sizer(field_value) + for tag_bytes, value_bytes in self._unknown_fields: + size += len(tag_bytes) + len(value_bytes) + + self._cached_byte_size = size + self._cached_byte_size_dirty = False + self._listener_for_children.dirty = False + return size + + cls.ByteSize = ByteSize + + +def _AddSerializeToStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def SerializeToString(self, **kwargs): + # Check if the message has all of its required fields set. + if not self.IsInitialized(): + raise message_mod.EncodeError( + 'Message %s is missing required fields: %s' % ( + self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors()))) + return self.SerializePartialToString(**kwargs) + cls.SerializeToString = SerializeToString + + +def _AddSerializePartialToStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def SerializePartialToString(self, **kwargs): + out = BytesIO() + self._InternalSerialize(out.write, **kwargs) + return out.getvalue() + cls.SerializePartialToString = SerializePartialToString + + def InternalSerialize(self, write_bytes, deterministic=None): + if deterministic is None: + deterministic = ( + api_implementation.IsPythonDefaultSerializationDeterministic()) + else: + deterministic = bool(deterministic) + + descriptor = self.DESCRIPTOR + if descriptor._is_map_entry: + # Fields of map entry should always be serialized. + key_field = descriptor.fields_by_name['key'] + _MaybeAddEncoder(cls, key_field) + key_field._encoder(write_bytes, self.key, deterministic) + value_field = descriptor.fields_by_name['value'] + _MaybeAddEncoder(cls, value_field) + value_field._encoder(write_bytes, self.value, deterministic) + else: + for field_descriptor, field_value in self.ListFields(): + _MaybeAddEncoder(cls, field_descriptor) + field_descriptor._encoder(write_bytes, field_value, deterministic) + for tag_bytes, value_bytes in self._unknown_fields: + write_bytes(tag_bytes) + write_bytes(value_bytes) + cls._InternalSerialize = InternalSerialize + + +def _AddMergeFromStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def MergeFromString(self, serialized): + serialized = memoryview(serialized) + length = len(serialized) + try: + if self._InternalParse(serialized, 0, length) != length: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise message_mod.DecodeError('Unexpected end-group tag.') + except (IndexError, TypeError): + # Now ord(buf[p:p+1]) == ord('') gets TypeError. + raise message_mod.DecodeError('Truncated message.') + except struct.error as e: + raise message_mod.DecodeError(e) + return length # Return this for legacy reasons. + cls.MergeFromString = MergeFromString + + local_ReadTag = decoder.ReadTag + local_SkipField = decoder.SkipField + fields_by_tag = cls._fields_by_tag + message_set_decoders_by_tag = cls._message_set_decoders_by_tag + + def InternalParse(self, buffer, pos, end): + """Create a message from serialized bytes. + + Args: + self: Message, instance of the proto message object. + buffer: memoryview of the serialized data. + pos: int, position to start in the serialized data. + end: int, end position of the serialized data. + + Returns: + Message object. + """ + # Guard against internal misuse, since this function is called internally + # quite extensively, and its easy to accidentally pass bytes. + assert isinstance(buffer, memoryview) + self._Modified() + field_dict = self._fields + while pos != end: + (tag_bytes, new_pos) = local_ReadTag(buffer, pos) + field_decoder, field_des = message_set_decoders_by_tag.get( + tag_bytes, (None, None) + ) + if field_decoder: + pos = field_decoder(buffer, new_pos, end, self, field_dict) + continue + field_des, is_packed = fields_by_tag.get(tag_bytes, (None, None)) + if field_des is None: + if not self._unknown_fields: # pylint: disable=protected-access + self._unknown_fields = [] # pylint: disable=protected-access + # pylint: disable=protected-access + (tag, _) = decoder._DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if field_number == 0: + raise message_mod.DecodeError('Field number 0 is illegal.') + # TODO: remove old_pos. + old_pos = new_pos + (data, new_pos) = decoder._DecodeUnknownField( + buffer, new_pos, wire_type) # pylint: disable=protected-access + if new_pos == -1: + return pos + # TODO: remove _unknown_fields. + new_pos = local_SkipField(buffer, old_pos, end, tag_bytes) + if new_pos == -1: + return pos + self._unknown_fields.append( + (tag_bytes, buffer[old_pos:new_pos].tobytes())) + pos = new_pos + else: + _MaybeAddDecoder(cls, field_des) + field_decoder = field_des._decoders[is_packed] + pos = field_decoder(buffer, new_pos, end, self, field_dict) + if field_des.containing_oneof: + self._UpdateOneofState(field_des) + return pos + cls._InternalParse = InternalParse + + +def _AddIsInitializedMethod(message_descriptor, cls): + """Adds the IsInitialized and FindInitializationError methods to the + protocol message class.""" + + required_fields = [field for field in message_descriptor.fields + if field.label == _FieldDescriptor.LABEL_REQUIRED] + + def IsInitialized(self, errors=None): + """Checks if all required fields of a message are set. + + Args: + errors: A list which, if provided, will be populated with the field + paths of all missing required fields. + + Returns: + True iff the specified message has all required fields set. + """ + + # Performance is critical so we avoid HasField() and ListFields(). + + for field in required_fields: + if (field not in self._fields or + (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and + not self._fields[field]._is_present_in_parent)): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + + for field, value in list(self._fields.items()): # dict can change size! + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if field.label == _FieldDescriptor.LABEL_REPEATED: + if (field.message_type._is_map_entry): + continue + for element in value: + if not element.IsInitialized(): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + elif value._is_present_in_parent and not value.IsInitialized(): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + + return True + + cls.IsInitialized = IsInitialized + + def FindInitializationErrors(self): + """Finds required fields which are not initialized. + + Returns: + A list of strings. Each string is a path to an uninitialized field from + the top-level message, e.g. "foo.bar[5].baz". + """ + + errors = [] # simplify things + + for field in required_fields: + if not self.HasField(field.name): + errors.append(field.name) + + for field, value in self.ListFields(): + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if field.is_extension: + name = '(%s)' % field.full_name + else: + name = field.name + + if _IsMapField(field): + if _IsMessageMapField(field): + for key in value: + element = value[key] + prefix = '%s[%s].' % (name, key) + sub_errors = element.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + else: + # ScalarMaps can't have any initialization errors. + pass + elif field.label == _FieldDescriptor.LABEL_REPEATED: + for i in range(len(value)): + element = value[i] + prefix = '%s[%d].' % (name, i) + sub_errors = element.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + else: + prefix = name + '.' + sub_errors = value.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + + return errors + + cls.FindInitializationErrors = FindInitializationErrors + + +def _FullyQualifiedClassName(klass): + module = klass.__module__ + name = getattr(klass, '__qualname__', klass.__name__) + if module in (None, 'builtins', '__builtin__'): + return name + return module + '.' + name + + +def _AddMergeFromMethod(cls): + LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED + CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE + + def MergeFrom(self, msg): + if not isinstance(msg, cls): + raise TypeError( + 'Parameter to MergeFrom() must be instance of same class: ' + 'expected %s got %s.' % (_FullyQualifiedClassName(cls), + _FullyQualifiedClassName(msg.__class__))) + + assert msg is not self + self._Modified() + + fields = self._fields + + for field, value in msg._fields.items(): + if field.label == LABEL_REPEATED: + field_value = fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + fields[field] = field_value + field_value.MergeFrom(value) + elif field.cpp_type == CPPTYPE_MESSAGE: + if value._is_present_in_parent: + field_value = fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + fields[field] = field_value + field_value.MergeFrom(value) + else: + self._fields[field] = value + if field.containing_oneof: + self._UpdateOneofState(field) + + if msg._unknown_fields: + if not self._unknown_fields: + self._unknown_fields = [] + self._unknown_fields.extend(msg._unknown_fields) + + cls.MergeFrom = MergeFrom + + +def _AddWhichOneofMethod(message_descriptor, cls): + def WhichOneof(self, oneof_name): + """Returns the name of the currently set field inside a oneof, or None.""" + try: + field = message_descriptor.oneofs_by_name[oneof_name] + except KeyError: + raise ValueError( + 'Protocol message has no oneof "%s" field.' % oneof_name) + + nested_field = self._oneofs.get(field, None) + if nested_field is not None and self.HasField(nested_field.name): + return nested_field.name + else: + return None + + cls.WhichOneof = WhichOneof + + +def _Clear(self): + # Clear fields. + self._fields = {} + self._unknown_fields = () + + self._oneofs = {} + self._Modified() + + +def _UnknownFields(self): + raise NotImplementedError('Please use the add-on feaure ' + 'unknown_fields.UnknownFieldSet(message) in ' + 'unknown_fields.py instead.') + + +def _DiscardUnknownFields(self): + self._unknown_fields = [] + for field, value in self.ListFields(): + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if _IsMapField(field): + if _IsMessageMapField(field): + for key in value: + value[key].DiscardUnknownFields() + elif field.label == _FieldDescriptor.LABEL_REPEATED: + for sub_message in value: + sub_message.DiscardUnknownFields() + else: + value.DiscardUnknownFields() + + +def _SetListener(self, listener): + if listener is None: + self._listener = message_listener_mod.NullMessageListener() + else: + self._listener = listener + + +def _AddMessageMethods(message_descriptor, cls): + """Adds implementations of all Message methods to cls.""" + _AddListFieldsMethod(message_descriptor, cls) + _AddHasFieldMethod(message_descriptor, cls) + _AddClearFieldMethod(message_descriptor, cls) + if message_descriptor.is_extendable: + _AddClearExtensionMethod(cls) + _AddHasExtensionMethod(cls) + _AddEqualsMethod(message_descriptor, cls) + _AddStrMethod(message_descriptor, cls) + _AddReprMethod(message_descriptor, cls) + _AddUnicodeMethod(message_descriptor, cls) + _AddContainsMethod(message_descriptor, cls) + _AddByteSizeMethod(message_descriptor, cls) + _AddSerializeToStringMethod(message_descriptor, cls) + _AddSerializePartialToStringMethod(message_descriptor, cls) + _AddMergeFromStringMethod(message_descriptor, cls) + _AddIsInitializedMethod(message_descriptor, cls) + _AddMergeFromMethod(cls) + _AddWhichOneofMethod(message_descriptor, cls) + # Adds methods which do not depend on cls. + cls.Clear = _Clear + cls.DiscardUnknownFields = _DiscardUnknownFields + cls._SetListener = _SetListener + + +def _AddPrivateHelperMethods(message_descriptor, cls): + """Adds implementation of private helper methods to cls.""" + + def Modified(self): + """Sets the _cached_byte_size_dirty bit to true, + and propagates this to our listener iff this was a state change. + """ + + # Note: Some callers check _cached_byte_size_dirty before calling + # _Modified() as an extra optimization. So, if this method is ever + # changed such that it does stuff even when _cached_byte_size_dirty is + # already true, the callers need to be updated. + if not self._cached_byte_size_dirty: + self._cached_byte_size_dirty = True + self._listener_for_children.dirty = True + self._is_present_in_parent = True + self._listener.Modified() + + def _UpdateOneofState(self, field): + """Sets field as the active field in its containing oneof. + + Will also delete currently active field in the oneof, if it is different + from the argument. Does not mark the message as modified. + """ + other_field = self._oneofs.setdefault(field.containing_oneof, field) + if other_field is not field: + del self._fields[other_field] + self._oneofs[field.containing_oneof] = field + + cls._Modified = Modified + cls.SetInParent = Modified + cls._UpdateOneofState = _UpdateOneofState + + +class _Listener(object): + + """MessageListener implementation that a parent message registers with its + child message. + + In order to support semantics like: + + foo.bar.baz.moo = 23 + assert foo.HasField('bar') + + ...child objects must have back references to their parents. + This helper class is at the heart of this support. + """ + + def __init__(self, parent_message): + """Args: + parent_message: The message whose _Modified() method we should call when + we receive Modified() messages. + """ + # This listener establishes a back reference from a child (contained) object + # to its parent (containing) object. We make this a weak reference to avoid + # creating cyclic garbage when the client finishes with the 'parent' object + # in the tree. + if isinstance(parent_message, weakref.ProxyType): + self._parent_message_weakref = parent_message + else: + self._parent_message_weakref = weakref.proxy(parent_message) + + # As an optimization, we also indicate directly on the listener whether + # or not the parent message is dirty. This way we can avoid traversing + # up the tree in the common case. + self.dirty = False + + def Modified(self): + if self.dirty: + return + try: + # Propagate the signal to our parents iff this is the first field set. + self._parent_message_weakref._Modified() + except ReferenceError: + # We can get here if a client has kept a reference to a child object, + # and is now setting a field on it, but the child's parent has been + # garbage-collected. This is not an error. + pass + + +class _OneofListener(_Listener): + """Special listener implementation for setting composite oneof fields.""" + + def __init__(self, parent_message, field): + """Args: + parent_message: The message whose _Modified() method we should call when + we receive Modified() messages. + field: The descriptor of the field being set in the parent message. + """ + super(_OneofListener, self).__init__(parent_message) + self._field = field + + def Modified(self): + """Also updates the state of the containing oneof in the parent message.""" + try: + self._parent_message_weakref._UpdateOneofState(self._field) + super(_OneofListener, self).Modified() + except ReferenceError: + pass diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/testing_refleaks.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/testing_refleaks.py new file mode 100644 index 00000000..ca0f0b96 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/testing_refleaks.py @@ -0,0 +1,119 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""A subclass of unittest.TestCase which checks for reference leaks. + +To use: +- Use testing_refleak.BaseTestCase instead of unittest.TestCase +- Configure and compile Python with --with-pydebug + +If sys.gettotalrefcount() is not available (because Python was built without +the Py_DEBUG option), then this module is a no-op and tests will run normally. +""" + +import copyreg +import gc +import sys +import unittest + + +class LocalTestResult(unittest.TestResult): + """A TestResult which forwards events to a parent object, except for Skips.""" + + def __init__(self, parent_result): + unittest.TestResult.__init__(self) + self.parent_result = parent_result + + def addError(self, test, error): + self.parent_result.addError(test, error) + + def addFailure(self, test, error): + self.parent_result.addFailure(test, error) + + def addSkip(self, test, reason): + pass + + +class ReferenceLeakCheckerMixin(object): + """A mixin class for TestCase, which checks reference counts.""" + + NB_RUNS = 3 + + def run(self, result=None): + testMethod = getattr(self, self._testMethodName) + expecting_failure_method = getattr(testMethod, "__unittest_expecting_failure__", False) + expecting_failure_class = getattr(self, "__unittest_expecting_failure__", False) + if expecting_failure_class or expecting_failure_method: + return + + # python_message.py registers all Message classes to some pickle global + # registry, which makes the classes immortal. + # We save a copy of this registry, and reset it before we could references. + self._saved_pickle_registry = copyreg.dispatch_table.copy() + + # Run the test twice, to warm up the instance attributes. + super(ReferenceLeakCheckerMixin, self).run(result=result) + super(ReferenceLeakCheckerMixin, self).run(result=result) + + oldrefcount = 0 + local_result = LocalTestResult(result) + num_flakes = 0 + + refcount_deltas = [] + while len(refcount_deltas) < self.NB_RUNS: + oldrefcount = self._getRefcounts() + super(ReferenceLeakCheckerMixin, self).run(result=local_result) + newrefcount = self._getRefcounts() + # If the GC was able to collect some objects after the call to run() that + # it could not collect before the call, then the counts won't match. + if newrefcount < oldrefcount and num_flakes < 2: + # This result is (probably) a flake -- garbage collectors aren't very + # predictable, but a lower ending refcount is the opposite of the + # failure we are testing for. If the result is repeatable, then we will + # eventually report it, but not after trying to eliminate it. + num_flakes += 1 + continue + num_flakes = 0 + refcount_deltas.append(newrefcount - oldrefcount) + print(refcount_deltas, self) + + try: + self.assertEqual(refcount_deltas, [0] * self.NB_RUNS) + except Exception: # pylint: disable=broad-except + result.addError(self, sys.exc_info()) + + def _getRefcounts(self): + copyreg.dispatch_table.clear() + copyreg.dispatch_table.update(self._saved_pickle_registry) + # It is sometimes necessary to gc.collect() multiple times, to ensure + # that all objects can be collected. + gc.collect() + gc.collect() + gc.collect() + return sys.gettotalrefcount() + + +if hasattr(sys, 'gettotalrefcount'): + + def TestCase(test_class): + new_bases = (ReferenceLeakCheckerMixin,) + test_class.__bases__ + new_class = type(test_class)( + test_class.__name__, new_bases, dict(test_class.__dict__)) + return new_class + SkipReferenceLeakChecker = unittest.skip + +else: + # When PyDEBUG is not enabled, run the tests normally. + + def TestCase(test_class): + return test_class + + def SkipReferenceLeakChecker(reason): + del reason # Don't skip, so don't need a reason. + def Same(func): + return func + return Same diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/type_checkers.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/type_checkers.py new file mode 100644 index 00000000..04ccc985 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/type_checkers.py @@ -0,0 +1,408 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Provides type checking routines. + +This module defines type checking utilities in the forms of dictionaries: + +VALUE_CHECKERS: A dictionary of field types and a value validation object. +TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing + function. +TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization + function. +FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their + corresponding wire types. +TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization + function. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import struct +import numbers + +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.internal import wire_format +from google.protobuf import descriptor + +_FieldDescriptor = descriptor.FieldDescriptor + + +def TruncateToFourByteFloat(original): + return struct.unpack('<f', struct.pack('<f', original))[0] + + +def ToShortestFloat(original): + """Returns the shortest float that has same value in wire.""" + # All 4 byte floats have between 6 and 9 significant digits, so we + # start with 6 as the lower bound. + # It has to be iterative because use '.9g' directly can not get rid + # of the noises for most values. For example if set a float_field=0.9 + # use '.9g' will print 0.899999976. + precision = 6 + rounded = float('{0:.{1}g}'.format(original, precision)) + while TruncateToFourByteFloat(rounded) != original: + precision += 1 + rounded = float('{0:.{1}g}'.format(original, precision)) + return rounded + + +def GetTypeChecker(field): + """Returns a type checker for a message field of the specified types. + + Args: + field: FieldDescriptor object for this field. + + Returns: + An instance of TypeChecker which can be used to verify the types + of values assigned to a field of the specified type. + """ + if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and + field.type == _FieldDescriptor.TYPE_STRING): + return UnicodeValueChecker() + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + if field.enum_type.is_closed: + return EnumValueChecker(field.enum_type) + else: + # When open enums are supported, any int32 can be assigned. + return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32] + return _VALUE_CHECKERS[field.cpp_type] + + +# None of the typecheckers below make any attempt to guard against people +# subclassing builtin types and doing weird things. We're not trying to +# protect against malicious clients here, just people accidentally shooting +# themselves in the foot in obvious ways. +class TypeChecker(object): + + """Type checker used to catch type errors as early as possible + when the client is setting scalar fields in protocol messages. + """ + + def __init__(self, *acceptable_types): + self._acceptable_types = acceptable_types + + def CheckValue(self, proposed_value): + """Type check the provided value and return it. + + The returned value might have been normalized to another type. + """ + if not isinstance(proposed_value, self._acceptable_types): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), self._acceptable_types)) + raise TypeError(message) + return proposed_value + + +class TypeCheckerWithDefault(TypeChecker): + + def __init__(self, default_value, *acceptable_types): + TypeChecker.__init__(self, *acceptable_types) + self._default_value = default_value + + def DefaultValue(self): + return self._default_value + + +class BoolValueChecker(object): + """Type checker used for bool fields.""" + + def CheckValue(self, proposed_value): + if not hasattr(proposed_value, '__index__') or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (bool, int))) + raise TypeError(message) + return bool(proposed_value) + + def DefaultValue(self): + return False + + +# IntValueChecker and its subclasses perform integer type-checks +# and bounds-checks. +class IntValueChecker(object): + + """Checker used for integer fields. Performs type-check and range check.""" + + def CheckValue(self, proposed_value): + if not hasattr(proposed_value, '__index__') or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (int,))) + raise TypeError(message) + + if not self._MIN <= int(proposed_value) <= self._MAX: + raise ValueError('Value out of range: %d' % proposed_value) + # We force all values to int to make alternate implementations where the + # distinction is more significant (e.g. the C++ implementation) simpler. + proposed_value = int(proposed_value) + return proposed_value + + def DefaultValue(self): + return 0 + + +class EnumValueChecker(object): + + """Checker used for enum fields. Performs type-check and range check.""" + + def __init__(self, enum_type): + self._enum_type = enum_type + + def CheckValue(self, proposed_value): + if not isinstance(proposed_value, numbers.Integral): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (int,))) + raise TypeError(message) + if int(proposed_value) not in self._enum_type.values_by_number: + raise ValueError('Unknown enum value: %d' % proposed_value) + return proposed_value + + def DefaultValue(self): + return self._enum_type.values[0].number + + +class UnicodeValueChecker(object): + + """Checker used for string fields. + + Always returns a unicode value, even if the input is of type str. + """ + + def CheckValue(self, proposed_value): + if not isinstance(proposed_value, (bytes, str)): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (bytes, str))) + raise TypeError(message) + + # If the value is of type 'bytes' make sure that it is valid UTF-8 data. + if isinstance(proposed_value, bytes): + try: + proposed_value = proposed_value.decode('utf-8') + except UnicodeDecodeError: + raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 ' + 'encoding. Non-UTF-8 strings must be converted to ' + 'unicode objects before being added.' % + (proposed_value)) + else: + try: + proposed_value.encode('utf8') + except UnicodeEncodeError: + raise ValueError('%.1024r isn\'t a valid unicode string and ' + 'can\'t be encoded in UTF-8.'% + (proposed_value)) + + return proposed_value + + def DefaultValue(self): + return u"" + + +class Int32ValueChecker(IntValueChecker): + # We're sure to use ints instead of longs here since comparison may be more + # efficient. + _MIN = -2147483648 + _MAX = 2147483647 + + +class Uint32ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 32) - 1 + + +class Int64ValueChecker(IntValueChecker): + _MIN = -(1 << 63) + _MAX = (1 << 63) - 1 + + +class Uint64ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 64) - 1 + + +# The max 4 bytes float is about 3.4028234663852886e+38 +_FLOAT_MAX = float.fromhex('0x1.fffffep+127') +_FLOAT_MIN = -_FLOAT_MAX +_INF = float('inf') +_NEG_INF = float('-inf') + + +class DoubleValueChecker(object): + """Checker used for double fields. + + Performs type-check and range check. + """ + + def CheckValue(self, proposed_value): + """Check and convert proposed_value to float.""" + if (not hasattr(proposed_value, '__float__') and + not hasattr(proposed_value, '__index__')) or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: int, float' % + (proposed_value, type(proposed_value))) + raise TypeError(message) + return float(proposed_value) + + def DefaultValue(self): + return 0.0 + + +class FloatValueChecker(DoubleValueChecker): + """Checker used for float fields. + + Performs type-check and range check. + + Values exceeding a 32-bit float will be converted to inf/-inf. + """ + + def CheckValue(self, proposed_value): + """Check and convert proposed_value to float.""" + converted_value = super().CheckValue(proposed_value) + # This inf rounding matches the C++ proto SafeDoubleToFloat logic. + if converted_value > _FLOAT_MAX: + return _INF + if converted_value < _FLOAT_MIN: + return _NEG_INF + + return TruncateToFourByteFloat(converted_value) + +# Type-checkers for all scalar CPPTYPEs. +_VALUE_CHECKERS = { + _FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(), + _FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(), + _FieldDescriptor.CPPTYPE_DOUBLE: DoubleValueChecker(), + _FieldDescriptor.CPPTYPE_FLOAT: FloatValueChecker(), + _FieldDescriptor.CPPTYPE_BOOL: BoolValueChecker(), + _FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes), +} + + +# Map from field type to a function F, such that F(field_num, value) +# gives the total byte size for a value of the given type. This +# byte size includes tag information and any other additional space +# associated with serializing "value". +TYPE_TO_BYTE_SIZE_FN = { + _FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize, + _FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize, + _FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize, + _FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize, + _FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize, + _FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize, + _FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize, + _FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize, + _FieldDescriptor.TYPE_STRING: wire_format.StringByteSize, + _FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize, + _FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize, + _FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize, + _FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize, + _FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize, + _FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize, + _FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize, + _FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize, + _FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize + } + + +# Maps from field types to encoder constructors. +TYPE_TO_ENCODER = { + _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder, + _FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder, + _FieldDescriptor.TYPE_INT64: encoder.Int64Encoder, + _FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder, + _FieldDescriptor.TYPE_INT32: encoder.Int32Encoder, + _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder, + _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder, + _FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder, + _FieldDescriptor.TYPE_STRING: encoder.StringEncoder, + _FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder, + _FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder, + _FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder, + _FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder, + _FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder, + _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder, + _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder, + _FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder, + _FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder, + } + + +# Maps from field types to sizer constructors. +TYPE_TO_SIZER = { + _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer, + _FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer, + _FieldDescriptor.TYPE_INT64: encoder.Int64Sizer, + _FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer, + _FieldDescriptor.TYPE_INT32: encoder.Int32Sizer, + _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer, + _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer, + _FieldDescriptor.TYPE_BOOL: encoder.BoolSizer, + _FieldDescriptor.TYPE_STRING: encoder.StringSizer, + _FieldDescriptor.TYPE_GROUP: encoder.GroupSizer, + _FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer, + _FieldDescriptor.TYPE_BYTES: encoder.BytesSizer, + _FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer, + _FieldDescriptor.TYPE_ENUM: encoder.EnumSizer, + _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer, + _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer, + _FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer, + _FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer, + } + + +# Maps from field type to a decoder constructor. +TYPE_TO_DECODER = { + _FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder, + _FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder, + _FieldDescriptor.TYPE_INT64: decoder.Int64Decoder, + _FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder, + _FieldDescriptor.TYPE_INT32: decoder.Int32Decoder, + _FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder, + _FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder, + _FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder, + _FieldDescriptor.TYPE_STRING: decoder.StringDecoder, + _FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder, + _FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder, + _FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder, + _FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder, + _FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder, + _FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder, + _FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder, + _FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder, + _FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder, + } + +# Maps from field type to expected wiretype. +FIELD_TYPE_TO_WIRE_TYPE = { + _FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_STRING: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP, + _FieldDescriptor.TYPE_MESSAGE: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_BYTES: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT, + } diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/well_known_types.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/well_known_types.py new file mode 100644 index 00000000..bbc63f49 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/well_known_types.py @@ -0,0 +1,678 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains well known classes. + +This files defines well known classes which need extra maintenance including: + - Any + - Duration + - FieldMask + - Struct + - Timestamp +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + +import calendar +import collections.abc +import datetime +import warnings +from google.protobuf.internal import field_mask +from typing import Union + +FieldMask = field_mask.FieldMask + +_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S' +_NANOS_PER_SECOND = 1000000000 +_NANOS_PER_MILLISECOND = 1000000 +_NANOS_PER_MICROSECOND = 1000 +_MILLIS_PER_SECOND = 1000 +_MICROS_PER_SECOND = 1000000 +_SECONDS_PER_DAY = 24 * 3600 +_DURATION_SECONDS_MAX = 315576000000 +_TIMESTAMP_SECONDS_MIN = -62135596800 +_TIMESTAMP_SECONDS_MAX = 253402300799 + +_EPOCH_DATETIME_NAIVE = datetime.datetime(1970, 1, 1, tzinfo=None) +_EPOCH_DATETIME_AWARE = _EPOCH_DATETIME_NAIVE.replace( + tzinfo=datetime.timezone.utc +) + + +class Any(object): + """Class for Any Message type.""" + + __slots__ = () + + def Pack(self, msg, type_url_prefix='type.googleapis.com/', + deterministic=None): + """Packs the specified message into current Any message.""" + if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/': + self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) + else: + self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) + self.value = msg.SerializeToString(deterministic=deterministic) + + def Unpack(self, msg): + """Unpacks the current Any message into specified message.""" + descriptor = msg.DESCRIPTOR + if not self.Is(descriptor): + return False + msg.ParseFromString(self.value) + return True + + def TypeName(self): + """Returns the protobuf type name of the inner message.""" + # Only last part is to be used: b/25630112 + return self.type_url.split('/')[-1] + + def Is(self, descriptor): + """Checks if this Any represents the given protobuf type.""" + return '/' in self.type_url and self.TypeName() == descriptor.full_name + + +class Timestamp(object): + """Class for Timestamp message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts Timestamp to RFC 3339 date string format. + + Returns: + A string converted from timestamp. The string is always Z-normalized + and uses 3, 6 or 9 fractional digits as required to represent the + exact time. Example of the return format: '1972-01-01T10:00:20.021Z' + """ + _CheckTimestampValid(self.seconds, self.nanos) + nanos = self.nanos + seconds = self.seconds % _SECONDS_PER_DAY + days = (self.seconds - seconds) // _SECONDS_PER_DAY + dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(days, seconds) + + result = dt.isoformat() + if (nanos % 1e9) == 0: + # If there are 0 fractional digits, the fractional + # point '.' should be omitted when serializing. + return result + 'Z' + if (nanos % 1e6) == 0: + # Serialize 3 fractional digits. + return result + '.%03dZ' % (nanos / 1e6) + if (nanos % 1e3) == 0: + # Serialize 6 fractional digits. + return result + '.%06dZ' % (nanos / 1e3) + # Serialize 9 fractional digits. + return result + '.%09dZ' % nanos + + def FromJsonString(self, value): + """Parse a RFC 3339 date string format to Timestamp. + + Args: + value: A date string. Any fractional digits (or none) and any offset are + accepted as long as they fit into nano-seconds precision. + Example of accepted format: '1972-01-01T10:00:20.021-05:00' + + Raises: + ValueError: On parsing problems. + """ + if not isinstance(value, str): + raise ValueError('Timestamp JSON value not a string: {!r}'.format(value)) + timezone_offset = value.find('Z') + if timezone_offset == -1: + timezone_offset = value.find('+') + if timezone_offset == -1: + timezone_offset = value.rfind('-') + if timezone_offset == -1: + raise ValueError( + 'Failed to parse timestamp: missing valid timezone offset.') + time_value = value[0:timezone_offset] + # Parse datetime and nanos. + point_position = time_value.find('.') + if point_position == -1: + second_value = time_value + nano_value = '' + else: + second_value = time_value[:point_position] + nano_value = time_value[point_position + 1:] + if 't' in second_value: + raise ValueError( + 'time data \'{0}\' does not match format \'%Y-%m-%dT%H:%M:%S\', ' + 'lowercase \'t\' is not accepted'.format(second_value)) + date_object = datetime.datetime.strptime(second_value, _TIMESTAMPFOMAT) + td = date_object - datetime.datetime(1970, 1, 1) + seconds = td.seconds + td.days * _SECONDS_PER_DAY + if len(nano_value) > 9: + raise ValueError( + 'Failed to parse Timestamp: nanos {0} more than ' + '9 fractional digits.'.format(nano_value)) + if nano_value: + nanos = round(float('0.' + nano_value) * 1e9) + else: + nanos = 0 + # Parse timezone offsets. + if value[timezone_offset] == 'Z': + if len(value) != timezone_offset + 1: + raise ValueError('Failed to parse timestamp: invalid trailing' + ' data {0}.'.format(value)) + else: + timezone = value[timezone_offset:] + pos = timezone.find(':') + if pos == -1: + raise ValueError( + 'Invalid timezone offset value: {0}.'.format(timezone)) + if timezone[0] == '+': + seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 + else: + seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 + # Set seconds and nanos + _CheckTimestampValid(seconds, nanos) + self.seconds = int(seconds) + self.nanos = int(nanos) + + def GetCurrentTime(self): + """Get the current UTC into Timestamp.""" + self.FromDatetime(datetime.datetime.utcnow()) + + def ToNanoseconds(self): + """Converts Timestamp to nanoseconds since epoch.""" + _CheckTimestampValid(self.seconds, self.nanos) + return self.seconds * _NANOS_PER_SECOND + self.nanos + + def ToMicroseconds(self): + """Converts Timestamp to microseconds since epoch.""" + _CheckTimestampValid(self.seconds, self.nanos) + return (self.seconds * _MICROS_PER_SECOND + + self.nanos // _NANOS_PER_MICROSECOND) + + def ToMilliseconds(self): + """Converts Timestamp to milliseconds since epoch.""" + _CheckTimestampValid(self.seconds, self.nanos) + return (self.seconds * _MILLIS_PER_SECOND + + self.nanos // _NANOS_PER_MILLISECOND) + + def ToSeconds(self): + """Converts Timestamp to seconds since epoch.""" + _CheckTimestampValid(self.seconds, self.nanos) + return self.seconds + + def FromNanoseconds(self, nanos): + """Converts nanoseconds since epoch to Timestamp.""" + seconds = nanos // _NANOS_PER_SECOND + nanos = nanos % _NANOS_PER_SECOND + _CheckTimestampValid(seconds, nanos) + self.seconds = seconds + self.nanos = nanos + + def FromMicroseconds(self, micros): + """Converts microseconds since epoch to Timestamp.""" + seconds = micros // _MICROS_PER_SECOND + nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND + _CheckTimestampValid(seconds, nanos) + self.seconds = seconds + self.nanos = nanos + + def FromMilliseconds(self, millis): + """Converts milliseconds since epoch to Timestamp.""" + seconds = millis // _MILLIS_PER_SECOND + nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND + _CheckTimestampValid(seconds, nanos) + self.seconds = seconds + self.nanos = nanos + + def FromSeconds(self, seconds): + """Converts seconds since epoch to Timestamp.""" + _CheckTimestampValid(seconds, 0) + self.seconds = seconds + self.nanos = 0 + + def ToDatetime(self, tzinfo=None): + """Converts Timestamp to a datetime. + + Args: + tzinfo: A datetime.tzinfo subclass; defaults to None. + + Returns: + If tzinfo is None, returns a timezone-naive UTC datetime (with no timezone + information, i.e. not aware that it's UTC). + + Otherwise, returns a timezone-aware datetime in the input timezone. + """ + # Using datetime.fromtimestamp for this would avoid constructing an extra + # timedelta object and possibly an extra datetime. Unfortunately, that has + # the disadvantage of not handling the full precision (on all platforms, see + # https://github.com/python/cpython/issues/109849) or full range (on some + # platforms, see https://github.com/python/cpython/issues/110042) of + # datetime. + _CheckTimestampValid(self.seconds, self.nanos) + delta = datetime.timedelta( + seconds=self.seconds, + microseconds=_RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND), + ) + if tzinfo is None: + return _EPOCH_DATETIME_NAIVE + delta + else: + # Note the tz conversion has to come after the timedelta arithmetic. + return (_EPOCH_DATETIME_AWARE + delta).astimezone(tzinfo) + + def FromDatetime(self, dt): + """Converts datetime to Timestamp. + + Args: + dt: A datetime. If it's timezone-naive, it's assumed to be in UTC. + """ + # Using this guide: http://wiki.python.org/moin/WorkingWithTime + # And this conversion guide: http://docs.python.org/library/time.html + + # Turn the date parameter into a tuple (struct_time) that can then be + # manipulated into a long value of seconds. During the conversion from + # struct_time to long, the source date in UTC, and so it follows that the + # correct transformation is calendar.timegm() + try: + seconds = calendar.timegm(dt.utctimetuple()) + nanos = dt.microsecond * _NANOS_PER_MICROSECOND + except AttributeError as e: + raise AttributeError( + 'Fail to convert to Timestamp. Expected a datetime like ' + 'object got {0} : {1}'.format(type(dt).__name__, e) + ) from e + _CheckTimestampValid(seconds, nanos) + self.seconds = seconds + self.nanos = nanos + + def _internal_assign(self, dt): + self.FromDatetime(dt) + + def __add__(self, value) -> datetime.datetime: + if isinstance(value, Duration): + return self.ToDatetime() + value.ToTimedelta() + return self.ToDatetime() + value + + __radd__ = __add__ + + def __sub__(self, value) -> Union[datetime.datetime, datetime.timedelta]: + if isinstance(value, Timestamp): + return self.ToDatetime() - value.ToDatetime() + elif isinstance(value, Duration): + return self.ToDatetime() - value.ToTimedelta() + return self.ToDatetime() - value + + def __rsub__(self, dt) -> datetime.timedelta: + return dt - self.ToDatetime() + + +def _CheckTimestampValid(seconds, nanos): + if seconds < _TIMESTAMP_SECONDS_MIN or seconds > _TIMESTAMP_SECONDS_MAX: + raise ValueError( + 'Timestamp is not valid: Seconds {0} must be in range ' + '[-62135596800, 253402300799].'.format(seconds)) + if nanos < 0 or nanos >= _NANOS_PER_SECOND: + raise ValueError( + 'Timestamp is not valid: Nanos {} must be in a range ' + '[0, 999999].'.format(nanos)) + + +class Duration(object): + """Class for Duration message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts Duration to string format. + + Returns: + A string converted from self. The string format will contains + 3, 6, or 9 fractional digits depending on the precision required to + represent the exact Duration value. For example: "1s", "1.010s", + "1.000000100s", "-3.100s" + """ + _CheckDurationValid(self.seconds, self.nanos) + if self.seconds < 0 or self.nanos < 0: + result = '-' + seconds = - self.seconds + int((0 - self.nanos) // 1e9) + nanos = (0 - self.nanos) % 1e9 + else: + result = '' + seconds = self.seconds + int(self.nanos // 1e9) + nanos = self.nanos % 1e9 + result += '%d' % seconds + if (nanos % 1e9) == 0: + # If there are 0 fractional digits, the fractional + # point '.' should be omitted when serializing. + return result + 's' + if (nanos % 1e6) == 0: + # Serialize 3 fractional digits. + return result + '.%03ds' % (nanos / 1e6) + if (nanos % 1e3) == 0: + # Serialize 6 fractional digits. + return result + '.%06ds' % (nanos / 1e3) + # Serialize 9 fractional digits. + return result + '.%09ds' % nanos + + def FromJsonString(self, value): + """Converts a string to Duration. + + Args: + value: A string to be converted. The string must end with 's'. Any + fractional digits (or none) are accepted as long as they fit into + precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s + + Raises: + ValueError: On parsing problems. + """ + if not isinstance(value, str): + raise ValueError('Duration JSON value not a string: {!r}'.format(value)) + if len(value) < 1 or value[-1] != 's': + raise ValueError( + 'Duration must end with letter "s": {0}.'.format(value)) + try: + pos = value.find('.') + if pos == -1: + seconds = int(value[:-1]) + nanos = 0 + else: + seconds = int(value[:pos]) + if value[0] == '-': + nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) + else: + nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) + _CheckDurationValid(seconds, nanos) + self.seconds = seconds + self.nanos = nanos + except ValueError as e: + raise ValueError( + 'Couldn\'t parse duration: {0} : {1}.'.format(value, e)) + + def ToNanoseconds(self): + """Converts a Duration to nanoseconds.""" + return self.seconds * _NANOS_PER_SECOND + self.nanos + + def ToMicroseconds(self): + """Converts a Duration to microseconds.""" + micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND) + return self.seconds * _MICROS_PER_SECOND + micros + + def ToMilliseconds(self): + """Converts a Duration to milliseconds.""" + millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND) + return self.seconds * _MILLIS_PER_SECOND + millis + + def ToSeconds(self): + """Converts a Duration to seconds.""" + return self.seconds + + def FromNanoseconds(self, nanos): + """Converts nanoseconds to Duration.""" + self._NormalizeDuration(nanos // _NANOS_PER_SECOND, + nanos % _NANOS_PER_SECOND) + + def FromMicroseconds(self, micros): + """Converts microseconds to Duration.""" + self._NormalizeDuration( + micros // _MICROS_PER_SECOND, + (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND) + + def FromMilliseconds(self, millis): + """Converts milliseconds to Duration.""" + self._NormalizeDuration( + millis // _MILLIS_PER_SECOND, + (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND) + + def FromSeconds(self, seconds): + """Converts seconds to Duration.""" + self.seconds = seconds + self.nanos = 0 + + def ToTimedelta(self) -> datetime.timedelta: + """Converts Duration to timedelta.""" + return datetime.timedelta( + seconds=self.seconds, microseconds=_RoundTowardZero( + self.nanos, _NANOS_PER_MICROSECOND)) + + def FromTimedelta(self, td): + """Converts timedelta to Duration.""" + try: + self._NormalizeDuration( + td.seconds + td.days * _SECONDS_PER_DAY, + td.microseconds * _NANOS_PER_MICROSECOND, + ) + except AttributeError as e: + raise AttributeError( + 'Fail to convert to Duration. Expected a timedelta like ' + 'object got {0}: {1}'.format(type(td).__name__, e) + ) from e + + def _internal_assign(self, td): + self.FromTimedelta(td) + + def _NormalizeDuration(self, seconds, nanos): + """Set Duration by seconds and nanos.""" + # Force nanos to be negative if the duration is negative. + if seconds < 0 and nanos > 0: + seconds += 1 + nanos -= _NANOS_PER_SECOND + self.seconds = seconds + self.nanos = nanos + + def __add__(self, value) -> Union[datetime.datetime, datetime.timedelta]: + if isinstance(value, Timestamp): + return self.ToTimedelta() + value.ToDatetime() + return self.ToTimedelta() + value + + __radd__ = __add__ + + def __rsub__(self, dt) -> Union[datetime.datetime, datetime.timedelta]: + return dt - self.ToTimedelta() + + +def _CheckDurationValid(seconds, nanos): + if seconds < -_DURATION_SECONDS_MAX or seconds > _DURATION_SECONDS_MAX: + raise ValueError( + 'Duration is not valid: Seconds {0} must be in range ' + '[-315576000000, 315576000000].'.format(seconds)) + if nanos <= -_NANOS_PER_SECOND or nanos >= _NANOS_PER_SECOND: + raise ValueError( + 'Duration is not valid: Nanos {0} must be in range ' + '[-999999999, 999999999].'.format(nanos)) + if (nanos < 0 and seconds > 0) or (nanos > 0 and seconds < 0): + raise ValueError( + 'Duration is not valid: Sign mismatch.') + + +def _RoundTowardZero(value, divider): + """Truncates the remainder part after division.""" + # For some languages, the sign of the remainder is implementation + # dependent if any of the operands is negative. Here we enforce + # "rounded toward zero" semantics. For example, for (-5) / 2 an + # implementation may give -3 as the result with the remainder being + # 1. This function ensures we always return -2 (closer to zero). + result = value // divider + remainder = value % divider + if result < 0 and remainder > 0: + return result + 1 + else: + return result + + +def _SetStructValue(struct_value, value): + if value is None: + struct_value.null_value = 0 + elif isinstance(value, bool): + # Note: this check must come before the number check because in Python + # True and False are also considered numbers. + struct_value.bool_value = value + elif isinstance(value, str): + struct_value.string_value = value + elif isinstance(value, (int, float)): + struct_value.number_value = value + elif isinstance(value, (dict, Struct)): + struct_value.struct_value.Clear() + struct_value.struct_value.update(value) + elif isinstance(value, (list, tuple, ListValue)): + struct_value.list_value.Clear() + struct_value.list_value.extend(value) + else: + raise ValueError('Unexpected type') + + +def _GetStructValue(struct_value): + which = struct_value.WhichOneof('kind') + if which == 'struct_value': + return struct_value.struct_value + elif which == 'null_value': + return None + elif which == 'number_value': + return struct_value.number_value + elif which == 'string_value': + return struct_value.string_value + elif which == 'bool_value': + return struct_value.bool_value + elif which == 'list_value': + return struct_value.list_value + elif which is None: + raise ValueError('Value not set') + + +class Struct(object): + """Class for Struct message type.""" + + __slots__ = () + + def __getitem__(self, key): + return _GetStructValue(self.fields[key]) + + def __setitem__(self, key, value): + _SetStructValue(self.fields[key], value) + + def __delitem__(self, key): + del self.fields[key] + + def __len__(self): + return len(self.fields) + + def __iter__(self): + return iter(self.fields) + + def _internal_assign(self, dictionary): + self.Clear() + self.update(dictionary) + + def _internal_compare(self, other): + size = len(self) + if size != len(other): + return False + for key, value in self.items(): + if key not in other: + return False + if isinstance(other[key], (dict, list)): + if not value._internal_compare(other[key]): + return False + elif value != other[key]: + return False + return True + + def keys(self): # pylint: disable=invalid-name + return self.fields.keys() + + def values(self): # pylint: disable=invalid-name + return [self[key] for key in self] + + def items(self): # pylint: disable=invalid-name + return [(key, self[key]) for key in self] + + def get_or_create_list(self, key): + """Returns a list for this key, creating if it didn't exist already.""" + if not self.fields[key].HasField('list_value'): + # Clear will mark list_value modified which will indeed create a list. + self.fields[key].list_value.Clear() + return self.fields[key].list_value + + def get_or_create_struct(self, key): + """Returns a struct for this key, creating if it didn't exist already.""" + if not self.fields[key].HasField('struct_value'): + # Clear will mark struct_value modified which will indeed create a struct. + self.fields[key].struct_value.Clear() + return self.fields[key].struct_value + + def update(self, dictionary): # pylint: disable=invalid-name + for key, value in dictionary.items(): + _SetStructValue(self.fields[key], value) + +collections.abc.MutableMapping.register(Struct) + + +class ListValue(object): + """Class for ListValue message type.""" + + __slots__ = () + + def __len__(self): + return len(self.values) + + def append(self, value): + _SetStructValue(self.values.add(), value) + + def extend(self, elem_seq): + for value in elem_seq: + self.append(value) + + def __getitem__(self, index): + """Retrieves item by the specified index.""" + return _GetStructValue(self.values.__getitem__(index)) + + def __setitem__(self, index, value): + _SetStructValue(self.values.__getitem__(index), value) + + def __delitem__(self, key): + del self.values[key] + + def _internal_assign(self, elem_seq): + self.Clear() + self.extend(elem_seq) + + def _internal_compare(self, other): + size = len(self) + if size != len(other): + return False + for i in range(size): + if isinstance(other[i], (dict, list)): + if not self[i]._internal_compare(other[i]): + return False + elif self[i] != other[i]: + return False + return True + + def items(self): + for i in range(len(self)): + yield self[i] + + def add_struct(self): + """Appends and returns a struct value as the next value in the list.""" + struct_value = self.values.add().struct_value + # Clear will mark struct_value modified which will indeed create a struct. + struct_value.Clear() + return struct_value + + def add_list(self): + """Appends and returns a list value as the next value in the list.""" + list_value = self.values.add().list_value + # Clear will mark list_value modified which will indeed create a list. + list_value.Clear() + return list_value + +collections.abc.MutableSequence.register(ListValue) + + +# LINT.IfChange(wktbases) +WKTBASES = { + 'google.protobuf.Any': Any, + 'google.protobuf.Duration': Duration, + 'google.protobuf.FieldMask': FieldMask, + 'google.protobuf.ListValue': ListValue, + 'google.protobuf.Struct': Struct, + 'google.protobuf.Timestamp': Timestamp, +} +# LINT.ThenChange(//depot/google.protobuf/compiler/python/pyi_generator.cc:wktbases) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/internal/wire_format.py b/.venv/lib/python3.12/site-packages/google/protobuf/internal/wire_format.py new file mode 100644 index 00000000..6237dabf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/internal/wire_format.py @@ -0,0 +1,245 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Constants and static functions to support protocol buffer wire format.""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import struct +from google.protobuf import descriptor +from google.protobuf import message + + +TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag. +TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7 + +# These numbers identify the wire type of a protocol buffer value. +# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded +# tag-and-type to store one of these WIRETYPE_* constants. +# These values must match WireType enum in //google/protobuf/wire_format.h. +WIRETYPE_VARINT = 0 +WIRETYPE_FIXED64 = 1 +WIRETYPE_LENGTH_DELIMITED = 2 +WIRETYPE_START_GROUP = 3 +WIRETYPE_END_GROUP = 4 +WIRETYPE_FIXED32 = 5 +_WIRETYPE_MAX = 5 + + +# Bounds for various integer types. +INT32_MAX = int((1 << 31) - 1) +INT32_MIN = int(-(1 << 31)) +UINT32_MAX = (1 << 32) - 1 + +INT64_MAX = (1 << 63) - 1 +INT64_MIN = -(1 << 63) +UINT64_MAX = (1 << 64) - 1 + +# "struct" format strings that will encode/decode the specified formats. +FORMAT_UINT32_LITTLE_ENDIAN = '<I' +FORMAT_UINT64_LITTLE_ENDIAN = '<Q' +FORMAT_FLOAT_LITTLE_ENDIAN = '<f' +FORMAT_DOUBLE_LITTLE_ENDIAN = '<d' + + +# We'll have to provide alternate implementations of AppendLittleEndian*() on +# any architectures where these checks fail. +if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4: + raise AssertionError('Format "I" is not a 32-bit number.') +if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8: + raise AssertionError('Format "Q" is not a 64-bit number.') + + +def PackTag(field_number, wire_type): + """Returns an unsigned 32-bit integer that encodes the field number and + wire type information in standard protocol message wire format. + + Args: + field_number: Expected to be an integer in the range [1, 1 << 29) + wire_type: One of the WIRETYPE_* constants. + """ + if not 0 <= wire_type <= _WIRETYPE_MAX: + raise message.EncodeError('Unknown wire type: %d' % wire_type) + return (field_number << TAG_TYPE_BITS) | wire_type + + +def UnpackTag(tag): + """The inverse of PackTag(). Given an unsigned 32-bit number, + returns a (field_number, wire_type) tuple. + """ + return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK) + + +def ZigZagEncode(value): + """ZigZag Transform: Encodes signed integers so that they can be + effectively used with varint encoding. See wire_format.h for + more details. + """ + if value >= 0: + return value << 1 + return (value << 1) ^ (~0) + + +def ZigZagDecode(value): + """Inverse of ZigZagEncode().""" + if not value & 0x1: + return value >> 1 + return (value >> 1) ^ (~0) + + + +# The *ByteSize() functions below return the number of bytes required to +# serialize "field number + type" information and then serialize the value. + + +def Int32ByteSize(field_number, int32): + return Int64ByteSize(field_number, int32) + + +def Int32ByteSizeNoTag(int32): + return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32) + + +def Int64ByteSize(field_number, int64): + # Have to convert to uint before calling UInt64ByteSize(). + return UInt64ByteSize(field_number, 0xffffffffffffffff & int64) + + +def UInt32ByteSize(field_number, uint32): + return UInt64ByteSize(field_number, uint32) + + +def UInt64ByteSize(field_number, uint64): + return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64) + + +def SInt32ByteSize(field_number, int32): + return UInt32ByteSize(field_number, ZigZagEncode(int32)) + + +def SInt64ByteSize(field_number, int64): + return UInt64ByteSize(field_number, ZigZagEncode(int64)) + + +def Fixed32ByteSize(field_number, fixed32): + return TagByteSize(field_number) + 4 + + +def Fixed64ByteSize(field_number, fixed64): + return TagByteSize(field_number) + 8 + + +def SFixed32ByteSize(field_number, sfixed32): + return TagByteSize(field_number) + 4 + + +def SFixed64ByteSize(field_number, sfixed64): + return TagByteSize(field_number) + 8 + + +def FloatByteSize(field_number, flt): + return TagByteSize(field_number) + 4 + + +def DoubleByteSize(field_number, double): + return TagByteSize(field_number) + 8 + + +def BoolByteSize(field_number, b): + return TagByteSize(field_number) + 1 + + +def EnumByteSize(field_number, enum): + return UInt32ByteSize(field_number, enum) + + +def StringByteSize(field_number, string): + return BytesByteSize(field_number, string.encode('utf-8')) + + +def BytesByteSize(field_number, b): + return (TagByteSize(field_number) + + _VarUInt64ByteSizeNoTag(len(b)) + + len(b)) + + +def GroupByteSize(field_number, message): + return (2 * TagByteSize(field_number) # START and END group. + + message.ByteSize()) + + +def MessageByteSize(field_number, message): + return (TagByteSize(field_number) + + _VarUInt64ByteSizeNoTag(message.ByteSize()) + + message.ByteSize()) + + +def MessageSetItemByteSize(field_number, msg): + # First compute the sizes of the tags. + # There are 2 tags for the beginning and ending of the repeated group, that + # is field number 1, one with field number 2 (type_id) and one with field + # number 3 (message). + total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3)) + + # Add the number of bytes for type_id. + total_size += _VarUInt64ByteSizeNoTag(field_number) + + message_size = msg.ByteSize() + + # The number of bytes for encoding the length of the message. + total_size += _VarUInt64ByteSizeNoTag(message_size) + + # The size of the message. + total_size += message_size + return total_size + + +def TagByteSize(field_number): + """Returns the bytes required to serialize a tag with this field number.""" + # Just pass in type 0, since the type won't affect the tag+type size. + return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0)) + + +# Private helper function for the *ByteSize() functions above. + +def _VarUInt64ByteSizeNoTag(uint64): + """Returns the number of bytes required to serialize a single varint + using boundary value comparisons. (unrolled loop optimization -WPierce) + uint64 must be unsigned. + """ + if uint64 <= 0x7f: return 1 + if uint64 <= 0x3fff: return 2 + if uint64 <= 0x1fffff: return 3 + if uint64 <= 0xfffffff: return 4 + if uint64 <= 0x7ffffffff: return 5 + if uint64 <= 0x3ffffffffff: return 6 + if uint64 <= 0x1ffffffffffff: return 7 + if uint64 <= 0xffffffffffffff: return 8 + if uint64 <= 0x7fffffffffffffff: return 9 + if uint64 > UINT64_MAX: + raise message.EncodeError('Value out of range: %d' % uint64) + return 10 + + +NON_PACKABLE_TYPES = ( + descriptor.FieldDescriptor.TYPE_STRING, + descriptor.FieldDescriptor.TYPE_GROUP, + descriptor.FieldDescriptor.TYPE_MESSAGE, + descriptor.FieldDescriptor.TYPE_BYTES +) + + +def IsTypePackable(field_type): + """Return true iff packable = true is valid for fields of this type. + + Args: + field_type: a FieldDescriptor::Type value. + + Returns: + True iff fields of this type are packable. + """ + return field_type not in NON_PACKABLE_TYPES diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/json_format.py b/.venv/lib/python3.12/site-packages/google/protobuf/json_format.py new file mode 100644 index 00000000..2a6bba93 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/json_format.py @@ -0,0 +1,1069 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains routines for printing protocol messages in JSON format. + +Simple usage example: + + # Create a proto object and serialize it to a json format string. + message = my_proto_pb2.MyMessage(foo='bar') + json_string = json_format.MessageToJson(message) + + # Parse a json format string to proto object. + message = json_format.Parse(json_string, my_proto_pb2.MyMessage()) +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + + +import base64 +from collections import OrderedDict +import json +import math +from operator import methodcaller +import re + +from google.protobuf import descriptor +from google.protobuf import message_factory +from google.protobuf import symbol_database +from google.protobuf.internal import type_checkers + + +_INT_TYPES = frozenset([ + descriptor.FieldDescriptor.CPPTYPE_INT32, + descriptor.FieldDescriptor.CPPTYPE_UINT32, + descriptor.FieldDescriptor.CPPTYPE_INT64, + descriptor.FieldDescriptor.CPPTYPE_UINT64, +]) +_INT64_TYPES = frozenset([ + descriptor.FieldDescriptor.CPPTYPE_INT64, + descriptor.FieldDescriptor.CPPTYPE_UINT64, +]) +_FLOAT_TYPES = frozenset([ + descriptor.FieldDescriptor.CPPTYPE_FLOAT, + descriptor.FieldDescriptor.CPPTYPE_DOUBLE, +]) +_INFINITY = 'Infinity' +_NEG_INFINITY = '-Infinity' +_NAN = 'NaN' + +_UNPAIRED_SURROGATE_PATTERN = re.compile( + '[\ud800-\udbff](?![\udc00-\udfff])|(?<![\ud800-\udbff])[\udc00-\udfff]' +) + +_VALID_EXTENSION_NAME = re.compile(r'\[[a-zA-Z0-9\._]*\]$') + + +class Error(Exception): + """Top-level module error for json_format.""" + + +class SerializeToJsonError(Error): + """Thrown if serialization to JSON fails.""" + + +class ParseError(Error): + """Thrown in case of parsing error.""" + + +class EnumStringValueParseError(ParseError): + """Thrown if unknown string enum value is encountered. + This exception is suppressed if ignore_unknown_fields is set. + """ + + +def MessageToJson( + message, + preserving_proto_field_name=False, + indent=2, + sort_keys=False, + use_integers_for_enums=False, + descriptor_pool=None, + float_precision=None, + ensure_ascii=True, + always_print_fields_with_no_presence=False, +): + """Converts protobuf message to JSON format. + + Args: + message: The protocol buffers message instance to serialize. + always_print_fields_with_no_presence: If True, fields without + presence (implicit presence scalars, repeated fields, and map fields) will + always be serialized. Any field that supports presence is not affected by + this option (including singular message fields and oneof fields). + preserving_proto_field_name: If True, use the original proto field names as + defined in the .proto file. If False, convert the field names to + lowerCamelCase. + indent: The JSON object will be pretty-printed with this indent level. An + indent level of 0 or negative will only insert newlines. If the indent + level is None, no newlines will be inserted. + sort_keys: If True, then the output will be sorted by field names. + use_integers_for_enums: If true, print integers instead of enum names. + descriptor_pool: A Descriptor Pool for resolving types. If None use the + default. + float_precision: If set, use this to specify float field valid digits. + ensure_ascii: If True, strings with non-ASCII characters are escaped. If + False, Unicode strings are returned unchanged. + + Returns: + A string containing the JSON formatted protocol buffer message. + """ + printer = _Printer( + preserving_proto_field_name, + use_integers_for_enums, + descriptor_pool, + float_precision, + always_print_fields_with_no_presence + ) + return printer.ToJsonString(message, indent, sort_keys, ensure_ascii) + + +def MessageToDict( + message, + always_print_fields_with_no_presence=False, + preserving_proto_field_name=False, + use_integers_for_enums=False, + descriptor_pool=None, + float_precision=None, +): + """Converts protobuf message to a dictionary. + + When the dictionary is encoded to JSON, it conforms to proto3 JSON spec. + + Args: + message: The protocol buffers message instance to serialize. + always_print_fields_with_no_presence: If True, fields without + presence (implicit presence scalars, repeated fields, and map fields) will + always be serialized. Any field that supports presence is not affected by + this option (including singular message fields and oneof fields). + preserving_proto_field_name: If True, use the original proto field names as + defined in the .proto file. If False, convert the field names to + lowerCamelCase. + use_integers_for_enums: If true, print integers instead of enum names. + descriptor_pool: A Descriptor Pool for resolving types. If None use the + default. + float_precision: If set, use this to specify float field valid digits. + + Returns: + A dict representation of the protocol buffer message. + """ + printer = _Printer( + preserving_proto_field_name, + use_integers_for_enums, + descriptor_pool, + float_precision, + always_print_fields_with_no_presence, + ) + # pylint: disable=protected-access + return printer._MessageToJsonObject(message) + + +def _IsMapEntry(field): + return ( + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE + and field.message_type.has_options + and field.message_type.GetOptions().map_entry + ) + + +class _Printer(object): + """JSON format printer for protocol message.""" + + def __init__( + self, + preserving_proto_field_name=False, + use_integers_for_enums=False, + descriptor_pool=None, + float_precision=None, + always_print_fields_with_no_presence=False, + ): + self.always_print_fields_with_no_presence = ( + always_print_fields_with_no_presence + ) + self.preserving_proto_field_name = preserving_proto_field_name + self.use_integers_for_enums = use_integers_for_enums + self.descriptor_pool = descriptor_pool + if float_precision: + self.float_format = '.{}g'.format(float_precision) + else: + self.float_format = None + + def ToJsonString(self, message, indent, sort_keys, ensure_ascii): + js = self._MessageToJsonObject(message) + return json.dumps( + js, indent=indent, sort_keys=sort_keys, ensure_ascii=ensure_ascii + ) + + def _MessageToJsonObject(self, message): + """Converts message to an object according to Proto3 JSON Specification.""" + message_descriptor = message.DESCRIPTOR + full_name = message_descriptor.full_name + if _IsWrapperMessage(message_descriptor): + return self._WrapperMessageToJsonObject(message) + if full_name in _WKTJSONMETHODS: + return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self) + js = {} + return self._RegularMessageToJsonObject(message, js) + + def _RegularMessageToJsonObject(self, message, js): + """Converts normal message according to Proto3 JSON Specification.""" + fields = message.ListFields() + + try: + for field, value in fields: + if self.preserving_proto_field_name: + name = field.name + else: + name = field.json_name + if _IsMapEntry(field): + # Convert a map field. + v_field = field.message_type.fields_by_name['value'] + js_map = {} + for key in value: + if isinstance(key, bool): + if key: + recorded_key = 'true' + else: + recorded_key = 'false' + else: + recorded_key = str(key) + js_map[recorded_key] = self._FieldToJsonObject(v_field, value[key]) + js[name] = js_map + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + # Convert a repeated field. + js[name] = [self._FieldToJsonObject(field, k) for k in value] + elif field.is_extension: + name = '[%s]' % field.full_name + js[name] = self._FieldToJsonObject(field, value) + else: + js[name] = self._FieldToJsonObject(field, value) + + # Serialize default value if including_default_value_fields is True. + if ( + self.always_print_fields_with_no_presence + ): + message_descriptor = message.DESCRIPTOR + for field in message_descriptor.fields: + + # always_print_fields_with_no_presence doesn't apply to + # any field which supports presence. + if ( + self.always_print_fields_with_no_presence + and field.has_presence + ): + continue + + if self.preserving_proto_field_name: + name = field.name + else: + name = field.json_name + if name in js: + # Skip the field which has been serialized already. + continue + if _IsMapEntry(field): + js[name] = {} + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + js[name] = [] + else: + js[name] = self._FieldToJsonObject(field, field.default_value) + + except ValueError as e: + raise SerializeToJsonError( + 'Failed to serialize {0} field: {1}.'.format(field.name, e) + ) from e + + return js + + def _FieldToJsonObject(self, field, value): + """Converts field value according to Proto3 JSON Specification.""" + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + return self._MessageToJsonObject(value) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + if self.use_integers_for_enums: + return value + if field.enum_type.full_name == 'google.protobuf.NullValue': + return None + enum_value = field.enum_type.values_by_number.get(value, None) + if enum_value is not None: + return enum_value.name + else: + if field.enum_type.is_closed: + raise SerializeToJsonError( + 'Enum field contains an integer value ' + 'which can not mapped to an enum value.' + ) + else: + return value + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + # Use base64 Data encoding for bytes + return base64.b64encode(value).decode('utf-8') + else: + return str(value) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + return bool(value) + elif field.cpp_type in _INT64_TYPES: + return str(value) + elif field.cpp_type in _FLOAT_TYPES: + if math.isinf(value): + if value < 0.0: + return _NEG_INFINITY + else: + return _INFINITY + if math.isnan(value): + return _NAN + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + if self.float_format: + return float(format(value, self.float_format)) + else: + return type_checkers.ToShortestFloat(value) + + return value + + def _AnyMessageToJsonObject(self, message): + """Converts Any message according to Proto3 JSON Specification.""" + if not message.ListFields(): + return {} + # Must print @type first, use OrderedDict instead of {} + js = OrderedDict() + type_url = message.type_url + js['@type'] = type_url + sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool) + sub_message.ParseFromString(message.value) + message_descriptor = sub_message.DESCRIPTOR + full_name = message_descriptor.full_name + if _IsWrapperMessage(message_descriptor): + js['value'] = self._WrapperMessageToJsonObject(sub_message) + return js + if full_name in _WKTJSONMETHODS: + js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0], sub_message)( + self + ) + return js + return self._RegularMessageToJsonObject(sub_message, js) + + def _GenericMessageToJsonObject(self, message): + """Converts message according to Proto3 JSON Specification.""" + # Duration, Timestamp and FieldMask have ToJsonString method to do the + # convert. Users can also call the method directly. + return message.ToJsonString() + + def _ValueMessageToJsonObject(self, message): + """Converts Value message according to Proto3 JSON Specification.""" + which = message.WhichOneof('kind') + # If the Value message is not set treat as null_value when serialize + # to JSON. The parse back result will be different from original message. + if which is None or which == 'null_value': + return None + if which == 'list_value': + return self._ListValueMessageToJsonObject(message.list_value) + if which == 'number_value': + value = message.number_value + if math.isinf(value): + raise ValueError( + 'Fail to serialize Infinity for Value.number_value, ' + 'which would parse as string_value' + ) + if math.isnan(value): + raise ValueError( + 'Fail to serialize NaN for Value.number_value, ' + 'which would parse as string_value' + ) + else: + value = getattr(message, which) + oneof_descriptor = message.DESCRIPTOR.fields_by_name[which] + return self._FieldToJsonObject(oneof_descriptor, value) + + def _ListValueMessageToJsonObject(self, message): + """Converts ListValue message according to Proto3 JSON Specification.""" + return [self._ValueMessageToJsonObject(value) for value in message.values] + + def _StructMessageToJsonObject(self, message): + """Converts Struct message according to Proto3 JSON Specification.""" + fields = message.fields + ret = {} + for key in fields: + ret[key] = self._ValueMessageToJsonObject(fields[key]) + return ret + + def _WrapperMessageToJsonObject(self, message): + return self._FieldToJsonObject( + message.DESCRIPTOR.fields_by_name['value'], message.value + ) + + +def _IsWrapperMessage(message_descriptor): + return message_descriptor.file.name == 'google/protobuf/wrappers.proto' + + +def _DuplicateChecker(js): + result = {} + for name, value in js: + if name in result: + raise ParseError('Failed to load JSON: duplicate key {0}.'.format(name)) + result[name] = value + return result + + +def _CreateMessageFromTypeUrl(type_url, descriptor_pool): + """Creates a message from a type URL.""" + db = symbol_database.Default() + pool = db.pool if descriptor_pool is None else descriptor_pool + type_name = type_url.split('/')[-1] + try: + message_descriptor = pool.FindMessageTypeByName(type_name) + except KeyError as e: + raise TypeError( + 'Can not find message descriptor by type_url: {0}'.format(type_url) + ) from e + message_class = message_factory.GetMessageClass(message_descriptor) + return message_class() + + +def Parse( + text, + message, + ignore_unknown_fields=False, + descriptor_pool=None, + max_recursion_depth=100, +): + """Parses a JSON representation of a protocol message into a message. + + Args: + text: Message JSON representation. + message: A protocol buffer message to merge into. + ignore_unknown_fields: If True, do not raise errors for unknown fields. + descriptor_pool: A Descriptor Pool for resolving types. If None use the + default. + max_recursion_depth: max recursion depth of JSON message to be deserialized. + JSON messages over this depth will fail to be deserialized. Default value + is 100. + + Returns: + The same message passed as argument. + + Raises:: + ParseError: On JSON parsing problems. + """ + if not isinstance(text, str): + text = text.decode('utf-8') + + try: + js = json.loads(text, object_pairs_hook=_DuplicateChecker) + except Exception as e: + raise ParseError('Failed to load JSON: {0}.'.format(str(e))) from e + + try: + return ParseDict( + js, message, ignore_unknown_fields, descriptor_pool, max_recursion_depth + ) + except ParseError as e: + raise e + except Exception as e: + raise ParseError( + 'Failed to parse JSON: {0}: {1}.'.format(type(e).__name__, str(e)) + ) from e + + +def ParseDict( + js_dict, + message, + ignore_unknown_fields=False, + descriptor_pool=None, + max_recursion_depth=100, +): + """Parses a JSON dictionary representation into a message. + + Args: + js_dict: Dict representation of a JSON message. + message: A protocol buffer message to merge into. + ignore_unknown_fields: If True, do not raise errors for unknown fields. + descriptor_pool: A Descriptor Pool for resolving types. If None use the + default. + max_recursion_depth: max recursion depth of JSON message to be deserialized. + JSON messages over this depth will fail to be deserialized. Default value + is 100. + + Returns: + The same message passed as argument. + """ + parser = _Parser(ignore_unknown_fields, descriptor_pool, max_recursion_depth) + parser.ConvertMessage(js_dict, message, '') + return message + + +_INT_OR_FLOAT = (int, float) + + +class _Parser(object): + """JSON format parser for protocol message.""" + + def __init__( + self, ignore_unknown_fields, descriptor_pool, max_recursion_depth + ): + self.ignore_unknown_fields = ignore_unknown_fields + self.descriptor_pool = descriptor_pool + self.max_recursion_depth = max_recursion_depth + self.recursion_depth = 0 + + def ConvertMessage(self, value, message, path): + """Convert a JSON object into a message. + + Args: + value: A JSON object. + message: A WKT or regular protocol message to record the data. + path: parent path to log parse error info. + + Raises: + ParseError: In case of convert problems. + """ + self.recursion_depth += 1 + if self.recursion_depth > self.max_recursion_depth: + raise ParseError( + 'Message too deep. Max recursion depth is {0}'.format( + self.max_recursion_depth + ) + ) + message_descriptor = message.DESCRIPTOR + full_name = message_descriptor.full_name + if not path: + path = message_descriptor.name + if _IsWrapperMessage(message_descriptor): + self._ConvertWrapperMessage(value, message, path) + elif full_name in _WKTJSONMETHODS: + methodcaller(_WKTJSONMETHODS[full_name][1], value, message, path)(self) + else: + self._ConvertFieldValuePair(value, message, path) + self.recursion_depth -= 1 + + def _ConvertFieldValuePair(self, js, message, path): + """Convert field value pairs into regular message. + + Args: + js: A JSON object to convert the field value pairs. + message: A regular protocol message to record the data. + path: parent path to log parse error info. + + Raises: + ParseError: In case of problems converting. + """ + names = [] + message_descriptor = message.DESCRIPTOR + fields_by_json_name = dict( + (f.json_name, f) for f in message_descriptor.fields + ) + for name in js: + try: + field = fields_by_json_name.get(name, None) + if not field: + field = message_descriptor.fields_by_name.get(name, None) + if not field and _VALID_EXTENSION_NAME.match(name): + if not message_descriptor.is_extendable: + raise ParseError( + 'Message type {0} does not have extensions at {1}'.format( + message_descriptor.full_name, path + ) + ) + identifier = name[1:-1] # strip [] brackets + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(identifier) + # pylint: enable=protected-access + if not field: + # Try looking for extension by the message type name, dropping the + # field name following the final . separator in full_name. + identifier = '.'.join(identifier.split('.')[:-1]) + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(identifier) + # pylint: enable=protected-access + if not field: + if self.ignore_unknown_fields: + continue + raise ParseError( + ( + 'Message type "{0}" has no field named "{1}" at "{2}".\n' + ' Available Fields(except extensions): "{3}"' + ).format( + message_descriptor.full_name, + name, + path, + [f.json_name for f in message_descriptor.fields], + ) + ) + if name in names: + raise ParseError( + 'Message type "{0}" should not have multiple ' + '"{1}" fields at "{2}".'.format( + message.DESCRIPTOR.full_name, name, path + ) + ) + names.append(name) + value = js[name] + # Check no other oneof field is parsed. + if field.containing_oneof is not None and value is not None: + oneof_name = field.containing_oneof.name + if oneof_name in names: + raise ParseError( + 'Message type "{0}" should not have multiple ' + '"{1}" oneof fields at "{2}".'.format( + message.DESCRIPTOR.full_name, oneof_name, path + ) + ) + names.append(oneof_name) + + if value is None: + if ( + field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE + and field.message_type.full_name == 'google.protobuf.Value' + ): + sub_message = getattr(message, field.name) + sub_message.null_value = 0 + elif ( + field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM + and field.enum_type.full_name == 'google.protobuf.NullValue' + ): + setattr(message, field.name, 0) + else: + message.ClearField(field.name) + continue + + # Parse field value. + if _IsMapEntry(field): + message.ClearField(field.name) + self._ConvertMapFieldValue( + value, message, field, '{0}.{1}'.format(path, name) + ) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + message.ClearField(field.name) + if not isinstance(value, list): + raise ParseError( + 'repeated field {0} must be in [] which is {1} at {2}'.format( + name, value, path + ) + ) + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + # Repeated message field. + for index, item in enumerate(value): + sub_message = getattr(message, field.name).add() + # None is a null_value in Value. + if ( + item is None + and sub_message.DESCRIPTOR.full_name + != 'google.protobuf.Value' + ): + raise ParseError( + 'null is not allowed to be used as an element' + ' in a repeated field at {0}.{1}[{2}]'.format( + path, name, index + ) + ) + self.ConvertMessage( + item, sub_message, '{0}.{1}[{2}]'.format(path, name, index) + ) + else: + # Repeated scalar field. + for index, item in enumerate(value): + if item is None: + raise ParseError( + 'null is not allowed to be used as an element' + ' in a repeated field at {0}.{1}[{2}]'.format( + path, name, index + ) + ) + self._ConvertAndAppendScalar( + message, field, item, '{0}.{1}[{2}]'.format(path, name, index)) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + if field.is_extension: + sub_message = message.Extensions[field] + else: + sub_message = getattr(message, field.name) + sub_message.SetInParent() + self.ConvertMessage(value, sub_message, '{0}.{1}'.format(path, name)) + else: + if field.is_extension: + self._ConvertAndSetScalarExtension(message, field, value, '{0}.{1}'.format(path, name)) + else: + self._ConvertAndSetScalar(message, field, value, '{0}.{1}'.format(path, name)) + except ParseError as e: + if field and field.containing_oneof is None: + raise ParseError( + 'Failed to parse {0} field: {1}.'.format(name, e) + ) from e + else: + raise ParseError(str(e)) from e + except ValueError as e: + raise ParseError( + 'Failed to parse {0} field: {1}.'.format(name, e) + ) from e + except TypeError as e: + raise ParseError( + 'Failed to parse {0} field: {1}.'.format(name, e) + ) from e + + def _ConvertAnyMessage(self, value, message, path): + """Convert a JSON representation into Any message.""" + if isinstance(value, dict) and not value: + return + try: + type_url = value['@type'] + except KeyError as e: + raise ParseError( + '@type is missing when parsing any message at {0}'.format(path) + ) from e + + try: + sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool) + except TypeError as e: + raise ParseError('{0} at {1}'.format(e, path)) from e + message_descriptor = sub_message.DESCRIPTOR + full_name = message_descriptor.full_name + if _IsWrapperMessage(message_descriptor): + self._ConvertWrapperMessage( + value['value'], sub_message, '{0}.value'.format(path) + ) + elif full_name in _WKTJSONMETHODS: + methodcaller( + _WKTJSONMETHODS[full_name][1], + value['value'], + sub_message, + '{0}.value'.format(path), + )(self) + else: + del value['@type'] + self._ConvertFieldValuePair(value, sub_message, path) + value['@type'] = type_url + # Sets Any message + message.value = sub_message.SerializeToString() + message.type_url = type_url + + def _ConvertGenericMessage(self, value, message, path): + """Convert a JSON representation into message with FromJsonString.""" + # Duration, Timestamp, FieldMask have a FromJsonString method to do the + # conversion. Users can also call the method directly. + try: + message.FromJsonString(value) + except ValueError as e: + raise ParseError('{0} at {1}'.format(e, path)) from e + + def _ConvertValueMessage(self, value, message, path): + """Convert a JSON representation into Value message.""" + if isinstance(value, dict): + self._ConvertStructMessage(value, message.struct_value, path) + elif isinstance(value, list): + self._ConvertListValueMessage(value, message.list_value, path) + elif value is None: + message.null_value = 0 + elif isinstance(value, bool): + message.bool_value = value + elif isinstance(value, str): + message.string_value = value + elif isinstance(value, _INT_OR_FLOAT): + message.number_value = value + else: + raise ParseError( + 'Value {0} has unexpected type {1} at {2}'.format( + value, type(value), path + ) + ) + + def _ConvertListValueMessage(self, value, message, path): + """Convert a JSON representation into ListValue message.""" + if not isinstance(value, list): + raise ParseError( + 'ListValue must be in [] which is {0} at {1}'.format(value, path) + ) + message.ClearField('values') + for index, item in enumerate(value): + self._ConvertValueMessage( + item, message.values.add(), '{0}[{1}]'.format(path, index) + ) + + def _ConvertStructMessage(self, value, message, path): + """Convert a JSON representation into Struct message.""" + if not isinstance(value, dict): + raise ParseError( + 'Struct must be in a dict which is {0} at {1}'.format(value, path) + ) + # Clear will mark the struct as modified so it will be created even if + # there are no values. + message.Clear() + for key in value: + self._ConvertValueMessage( + value[key], message.fields[key], '{0}.{1}'.format(path, key) + ) + return + + def _ConvertWrapperMessage(self, value, message, path): + """Convert a JSON representation into Wrapper message.""" + field = message.DESCRIPTOR.fields_by_name['value'] + self._ConvertAndSetScalar(message, field, value, path='{0}.value'.format(path)) + + def _ConvertMapFieldValue(self, value, message, field, path): + """Convert map field value for a message map field. + + Args: + value: A JSON object to convert the map field value. + message: A protocol message to record the converted data. + field: The descriptor of the map field to be converted. + path: parent path to log parse error info. + + Raises: + ParseError: In case of convert problems. + """ + if not isinstance(value, dict): + raise ParseError( + 'Map field {0} must be in a dict which is {1} at {2}'.format( + field.name, value, path + ) + ) + key_field = field.message_type.fields_by_name['key'] + value_field = field.message_type.fields_by_name['value'] + for key in value: + key_value = _ConvertScalarFieldValue( + key, key_field, '{0}.key'.format(path), True + ) + if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + self.ConvertMessage( + value[key], + getattr(message, field.name)[key_value], + '{0}[{1}]'.format(path, key_value), + ) + else: + self._ConvertAndSetScalarToMapKey( + message, + field, + key_value, + value[key], + path='{0}[{1}]'.format(path, key_value)) + + def _ConvertAndSetScalarExtension(self, message, extension_field, js_value, path): + """Convert scalar from js_value and assign it to message.Extensions[extension_field].""" + try: + message.Extensions[extension_field] = _ConvertScalarFieldValue( + js_value, extension_field, path) + except EnumStringValueParseError: + if not self.ignore_unknown_fields: + raise + + def _ConvertAndSetScalar(self, message, field, js_value, path): + """Convert scalar from js_value and assign it to message.field.""" + try: + setattr( + message, + field.name, + _ConvertScalarFieldValue(js_value, field, path)) + except EnumStringValueParseError: + if not self.ignore_unknown_fields: + raise + + def _ConvertAndAppendScalar(self, message, repeated_field, js_value, path): + """Convert scalar from js_value and append it to message.repeated_field.""" + try: + getattr(message, repeated_field.name).append( + _ConvertScalarFieldValue(js_value, repeated_field, path)) + except EnumStringValueParseError: + if not self.ignore_unknown_fields: + raise + + def _ConvertAndSetScalarToMapKey(self, message, map_field, converted_key, js_value, path): + """Convert scalar from 'js_value' and add it to message.map_field[converted_key].""" + try: + getattr(message, map_field.name)[converted_key] = _ConvertScalarFieldValue( + js_value, map_field.message_type.fields_by_name['value'], path, + ) + except EnumStringValueParseError: + if not self.ignore_unknown_fields: + raise + + +def _ConvertScalarFieldValue(value, field, path, require_str=False): + """Convert a single scalar field value. + + Args: + value: A scalar value to convert the scalar field value. + field: The descriptor of the field to convert. + path: parent path to log parse error info. + require_str: If True, the field value must be a str. + + Returns: + The converted scalar field value + + Raises: + ParseError: In case of convert problems. + EnumStringValueParseError: In case of unknown enum string value. + """ + try: + if field.cpp_type in _INT_TYPES: + return _ConvertInteger(value) + elif field.cpp_type in _FLOAT_TYPES: + return _ConvertFloat(value, field) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + return _ConvertBool(value, require_str) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + if isinstance(value, str): + encoded = value.encode('utf-8') + else: + encoded = value + # Add extra padding '=' + padded_value = encoded + b'=' * (4 - len(encoded) % 4) + return base64.urlsafe_b64decode(padded_value) + else: + # Checking for unpaired surrogates appears to be unreliable, + # depending on the specific Python version, so we check manually. + if _UNPAIRED_SURROGATE_PATTERN.search(value): + raise ParseError('Unpaired surrogate') + return value + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + # Convert an enum value. + enum_value = field.enum_type.values_by_name.get(value, None) + if enum_value is None: + try: + number = int(value) + enum_value = field.enum_type.values_by_number.get(number, None) + except ValueError as e: + # Since parsing to integer failed and lookup in values_by_name didn't + # find this name, we have an enum string value which is unknown. + raise EnumStringValueParseError( + 'Invalid enum value {0} for enum type {1}'.format( + value, field.enum_type.full_name + ) + ) from e + if enum_value is None: + if field.enum_type.is_closed: + raise ParseError( + 'Invalid enum value {0} for enum type {1}'.format( + value, field.enum_type.full_name + ) + ) + else: + return number + return enum_value.number + except EnumStringValueParseError as e: + raise EnumStringValueParseError('{0} at {1}'.format(e, path)) from e + except ParseError as e: + raise ParseError('{0} at {1}'.format(e, path)) from e + + +def _ConvertInteger(value): + """Convert an integer. + + Args: + value: A scalar value to convert. + + Returns: + The integer value. + + Raises: + ParseError: If an integer couldn't be consumed. + """ + if isinstance(value, float) and not value.is_integer(): + raise ParseError("Couldn't parse integer: {0}".format(value)) + + if isinstance(value, str) and value.find(' ') != -1: + raise ParseError('Couldn\'t parse integer: "{0}"'.format(value)) + + if isinstance(value, bool): + raise ParseError( + 'Bool value {0} is not acceptable for integer field'.format(value) + ) + + return int(value) + + +def _ConvertFloat(value, field): + """Convert an floating point number.""" + if isinstance(value, float): + if math.isnan(value): + raise ParseError('Couldn\'t parse NaN, use quoted "NaN" instead') + if math.isinf(value): + if value > 0: + raise ParseError( + "Couldn't parse Infinity or value too large, " + 'use quoted "Infinity" instead' + ) + else: + raise ParseError( + "Couldn't parse -Infinity or value too small, " + 'use quoted "-Infinity" instead' + ) + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + # pylint: disable=protected-access + if value > type_checkers._FLOAT_MAX: + raise ParseError('Float value too large') + # pylint: disable=protected-access + if value < type_checkers._FLOAT_MIN: + raise ParseError('Float value too small') + if value == 'nan': + raise ParseError('Couldn\'t parse float "nan", use "NaN" instead') + try: + # Assume Python compatible syntax. + return float(value) + except ValueError as e: + # Check alternative spellings. + if value == _NEG_INFINITY: + return float('-inf') + elif value == _INFINITY: + return float('inf') + elif value == _NAN: + return float('nan') + else: + raise ParseError("Couldn't parse float: {0}".format(value)) from e + + +def _ConvertBool(value, require_str): + """Convert a boolean value. + + Args: + value: A scalar value to convert. + require_str: If True, value must be a str. + + Returns: + The bool parsed. + + Raises: + ParseError: If a boolean value couldn't be consumed. + """ + if require_str: + if value == 'true': + return True + elif value == 'false': + return False + else: + raise ParseError('Expected "true" or "false", not {0}'.format(value)) + + if not isinstance(value, bool): + raise ParseError('Expected true or false without quotes') + return value + + +_WKTJSONMETHODS = { + 'google.protobuf.Any': ['_AnyMessageToJsonObject', '_ConvertAnyMessage'], + 'google.protobuf.Duration': [ + '_GenericMessageToJsonObject', + '_ConvertGenericMessage', + ], + 'google.protobuf.FieldMask': [ + '_GenericMessageToJsonObject', + '_ConvertGenericMessage', + ], + 'google.protobuf.ListValue': [ + '_ListValueMessageToJsonObject', + '_ConvertListValueMessage', + ], + 'google.protobuf.Struct': [ + '_StructMessageToJsonObject', + '_ConvertStructMessage', + ], + 'google.protobuf.Timestamp': [ + '_GenericMessageToJsonObject', + '_ConvertGenericMessage', + ], + 'google.protobuf.Value': [ + '_ValueMessageToJsonObject', + '_ConvertValueMessage', + ], +} diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/message.py b/.venv/lib/python3.12/site-packages/google/protobuf/message.py new file mode 100644 index 00000000..ae9cb147 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/message.py @@ -0,0 +1,422 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# TODO: We should just make these methods all "pure-virtual" and move +# all implementation out, into reflection.py for now. + + +"""Contains an abstract base class for protocol messages.""" + +__author__ = 'robinson@google.com (Will Robinson)' + +class Error(Exception): + """Base error type for this module.""" + pass + + +class DecodeError(Error): + """Exception raised when deserializing messages.""" + pass + + +class EncodeError(Error): + """Exception raised when serializing messages.""" + pass + + +class Message(object): + + """Abstract base class for protocol messages. + + Protocol message classes are almost always generated by the protocol + compiler. These generated types subclass Message and implement the methods + shown below. + """ + + # TODO: Link to an HTML document here. + + # TODO: Document that instances of this class will also + # have an Extensions attribute with __getitem__ and __setitem__. + # Again, not sure how to best convey this. + + # TODO: Document these fields and methods. + + __slots__ = [] + + #: The :class:`google.protobuf.Descriptor` + # for this message type. + DESCRIPTOR = None + + def __deepcopy__(self, memo=None): + clone = type(self)() + clone.MergeFrom(self) + return clone + + def __eq__(self, other_msg): + """Recursively compares two messages by value and structure.""" + raise NotImplementedError + + def __ne__(self, other_msg): + # Can't just say self != other_msg, since that would infinitely recurse. :) + return not self == other_msg + + def __hash__(self): + raise TypeError('unhashable object') + + def __str__(self): + """Outputs a human-readable representation of the message.""" + raise NotImplementedError + + def __unicode__(self): + """Outputs a human-readable representation of the message.""" + raise NotImplementedError + + def __contains__(self, field_name_or_key): + """Checks if a certain field is set for the message. + + Has presence fields return true if the field is set, false if the field is + not set. Fields without presence do raise `ValueError` (this includes + repeated fields, map fields, and implicit presence fields). + + If field_name is not defined in the message descriptor, `ValueError` will + be raised. + Note: WKT Struct checks if the key is contained in fields. ListValue checks + if the item is contained in the list. + + Args: + field_name_or_key: For Struct, the key (str) of the fields map. For + ListValue, any type that may be contained in the list. For other + messages, name of the field (str) to check for presence. + + Returns: + bool: For Struct, whether the item is contained in fields. For ListValue, + whether the item is contained in the list. For other message, + whether a value has been set for the named field. + + Raises: + ValueError: For normal messages, if the `field_name_or_key` is not a + member of this message or `field_name_or_key` is not a string. + """ + raise NotImplementedError + + def MergeFrom(self, other_msg): + """Merges the contents of the specified message into current message. + + This method merges the contents of the specified message into the current + message. Singular fields that are set in the specified message overwrite + the corresponding fields in the current message. Repeated fields are + appended. Singular sub-messages and groups are recursively merged. + + Args: + other_msg (Message): A message to merge into the current message. + """ + raise NotImplementedError + + def CopyFrom(self, other_msg): + """Copies the content of the specified message into the current message. + + The method clears the current message and then merges the specified + message using MergeFrom. + + Args: + other_msg (Message): A message to copy into the current one. + """ + if self is other_msg: + return + self.Clear() + self.MergeFrom(other_msg) + + def Clear(self): + """Clears all data that was set in the message.""" + raise NotImplementedError + + def SetInParent(self): + """Mark this as present in the parent. + + This normally happens automatically when you assign a field of a + sub-message, but sometimes you want to make the sub-message + present while keeping it empty. If you find yourself using this, + you may want to reconsider your design. + """ + raise NotImplementedError + + def IsInitialized(self): + """Checks if the message is initialized. + + Returns: + bool: The method returns True if the message is initialized (i.e. all of + its required fields are set). + """ + raise NotImplementedError + + # TODO: MergeFromString() should probably return None and be + # implemented in terms of a helper that returns the # of bytes read. Our + # deserialization routines would use the helper when recursively + # deserializing, but the end user would almost always just want the no-return + # MergeFromString(). + + def MergeFromString(self, serialized): + """Merges serialized protocol buffer data into this message. + + When we find a field in `serialized` that is already present + in this message: + + - If it's a "repeated" field, we append to the end of our list. + - Else, if it's a scalar, we overwrite our field. + - Else, (it's a nonrepeated composite), we recursively merge + into the existing composite. + + Args: + serialized (bytes): Any object that allows us to call + ``memoryview(serialized)`` to access a string of bytes using the + buffer interface. + + Returns: + int: The number of bytes read from `serialized`. + For non-group messages, this will always be `len(serialized)`, + but for messages which are actually groups, this will + generally be less than `len(serialized)`, since we must + stop when we reach an ``END_GROUP`` tag. Note that if + we *do* stop because of an ``END_GROUP`` tag, the number + of bytes returned does not include the bytes + for the ``END_GROUP`` tag information. + + Raises: + DecodeError: if the input cannot be parsed. + """ + # TODO: Document handling of unknown fields. + # TODO: When we switch to a helper, this will return None. + raise NotImplementedError + + def ParseFromString(self, serialized): + """Parse serialized protocol buffer data in binary form into this message. + + Like :func:`MergeFromString()`, except we clear the object first. + + Raises: + message.DecodeError if the input cannot be parsed. + """ + self.Clear() + return self.MergeFromString(serialized) + + def SerializeToString(self, **kwargs): + """Serializes the protocol message to a binary string. + + Keyword Args: + deterministic (bool): If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + A binary string representation of the message if all of the required + fields in the message are set (i.e. the message is initialized). + + Raises: + EncodeError: if the message isn't initialized (see :func:`IsInitialized`). + """ + raise NotImplementedError + + def SerializePartialToString(self, **kwargs): + """Serializes the protocol message to a binary string. + + This method is similar to SerializeToString but doesn't check if the + message is initialized. + + Keyword Args: + deterministic (bool): If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + bytes: A serialized representation of the partial message. + """ + raise NotImplementedError + + # TODO: Decide whether we like these better + # than auto-generated has_foo() and clear_foo() methods + # on the instances themselves. This way is less consistent + # with C++, but it makes reflection-type access easier and + # reduces the number of magically autogenerated things. + # + # TODO: Be sure to document (and test) exactly + # which field names are accepted here. Are we case-sensitive? + # What do we do with fields that share names with Python keywords + # like 'lambda' and 'yield'? + # + # nnorwitz says: + # """ + # Typically (in python), an underscore is appended to names that are + # keywords. So they would become lambda_ or yield_. + # """ + def ListFields(self): + """Returns a list of (FieldDescriptor, value) tuples for present fields. + + A message field is non-empty if HasField() would return true. A singular + primitive field is non-empty if HasField() would return true in proto2 or it + is non zero in proto3. A repeated field is non-empty if it contains at least + one element. The fields are ordered by field number. + + Returns: + list[tuple(FieldDescriptor, value)]: field descriptors and values + for all fields in the message which are not empty. The values vary by + field type. + """ + raise NotImplementedError + + def HasField(self, field_name): + """Checks if a certain field is set for the message. + + For a oneof group, checks if any field inside is set. Note that if the + field_name is not defined in the message descriptor, :exc:`ValueError` will + be raised. + + Args: + field_name (str): The name of the field to check for presence. + + Returns: + bool: Whether a value has been set for the named field. + + Raises: + ValueError: if the `field_name` is not a member of this message. + """ + raise NotImplementedError + + def ClearField(self, field_name): + """Clears the contents of a given field. + + Inside a oneof group, clears the field set. If the name neither refers to a + defined field or oneof group, :exc:`ValueError` is raised. + + Args: + field_name (str): The name of the field to check for presence. + + Raises: + ValueError: if the `field_name` is not a member of this message. + """ + raise NotImplementedError + + def WhichOneof(self, oneof_group): + """Returns the name of the field that is set inside a oneof group. + + If no field is set, returns None. + + Args: + oneof_group (str): the name of the oneof group to check. + + Returns: + str or None: The name of the group that is set, or None. + + Raises: + ValueError: no group with the given name exists + """ + raise NotImplementedError + + def HasExtension(self, field_descriptor): + """Checks if a certain extension is present for this message. + + Extensions are retrieved using the :attr:`Extensions` mapping (if present). + + Args: + field_descriptor: The field descriptor for the extension to check. + + Returns: + bool: Whether the extension is present for this message. + + Raises: + KeyError: if the extension is repeated. Similar to repeated fields, + there is no separate notion of presence: a "not present" repeated + extension is an empty list. + """ + raise NotImplementedError + + def ClearExtension(self, field_descriptor): + """Clears the contents of a given extension. + + Args: + field_descriptor: The field descriptor for the extension to clear. + """ + raise NotImplementedError + + def UnknownFields(self): + """Returns the UnknownFieldSet. + + Returns: + UnknownFieldSet: The unknown fields stored in this message. + """ + raise NotImplementedError + + def DiscardUnknownFields(self): + """Clears all fields in the :class:`UnknownFieldSet`. + + This operation is recursive for nested message. + """ + raise NotImplementedError + + def ByteSize(self): + """Returns the serialized size of this message. + + Recursively calls ByteSize() on all contained messages. + + Returns: + int: The number of bytes required to serialize this message. + """ + raise NotImplementedError + + @classmethod + def FromString(cls, s): + raise NotImplementedError + + def _SetListener(self, message_listener): + """Internal method used by the protocol message implementation. + Clients should not call this directly. + + Sets a listener that this message will call on certain state transitions. + + The purpose of this method is to register back-edges from children to + parents at runtime, for the purpose of setting "has" bits and + byte-size-dirty bits in the parent and ancestor objects whenever a child or + descendant object is modified. + + If the client wants to disconnect this Message from the object tree, she + explicitly sets callback to None. + + If message_listener is None, unregisters any existing listener. Otherwise, + message_listener must implement the MessageListener interface in + internal/message_listener.py, and we discard any listener registered + via a previous _SetListener() call. + """ + raise NotImplementedError + + def __getstate__(self): + """Support the pickle protocol.""" + return dict(serialized=self.SerializePartialToString()) + + def __setstate__(self, state): + """Support the pickle protocol.""" + self.__init__() + serialized = state['serialized'] + # On Python 3, using encoding='latin1' is required for unpickling + # protos pickled by Python 2. + if not isinstance(serialized, bytes): + serialized = serialized.encode('latin1') + self.ParseFromString(serialized) + + def __reduce__(self): + message_descriptor = self.DESCRIPTOR + if message_descriptor.containing_type is None: + return type(self), (), self.__getstate__() + # the message type must be nested. + # Python does not pickle nested classes; use the symbol_database on the + # receiving end. + container = message_descriptor + return (_InternalConstructMessage, (container.full_name,), + self.__getstate__()) + + +def _InternalConstructMessage(full_name): + """Constructs a nested message.""" + from google.protobuf import symbol_database # pylint:disable=g-import-not-at-top + + return symbol_database.Default().GetSymbol(full_name)() diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/message_factory.py b/.venv/lib/python3.12/site-packages/google/protobuf/message_factory.py new file mode 100644 index 00000000..05e330d0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/message_factory.py @@ -0,0 +1,237 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Provides a factory class for generating dynamic messages. + +The easiest way to use this class is if you have access to the FileDescriptor +protos containing the messages you want to create you can just do the following: + +message_classes = message_factory.GetMessages(iterable_of_file_descriptors) +my_proto_instance = message_classes['some.proto.package.MessageName']() +""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import warnings + +from google.protobuf import descriptor_pool +from google.protobuf import message +from google.protobuf.internal import api_implementation + +if api_implementation.Type() == 'python': + from google.protobuf.internal import python_message as message_impl +else: + from google.protobuf.pyext import cpp_message as message_impl # pylint: disable=g-import-not-at-top + + +# The type of all Message classes. +_GENERATED_PROTOCOL_MESSAGE_TYPE = message_impl.GeneratedProtocolMessageType + + +def GetMessageClass(descriptor): + """Obtains a proto2 message class based on the passed in descriptor. + + Passing a descriptor with a fully qualified name matching a previous + invocation will cause the same class to be returned. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + concrete_class = getattr(descriptor, '_concrete_class', None) + if concrete_class: + return concrete_class + return _InternalCreateMessageClass(descriptor) + + +def GetMessageClassesForFiles(files, pool): + """Gets all the messages from specified files. + + This will find and resolve dependencies, failing if the descriptor + pool cannot satisfy them. + + Args: + files: The file names to extract messages from. + pool: The descriptor pool to find the files including the dependent files. + + Returns: + A dictionary mapping proto names to the message classes. + """ + result = {} + for file_name in files: + file_desc = pool.FindFileByName(file_name) + for desc in file_desc.message_types_by_name.values(): + result[desc.full_name] = GetMessageClass(desc) + + # While the extension FieldDescriptors are created by the descriptor pool, + # the python classes created in the factory need them to be registered + # explicitly, which is done below. + # + # The call to RegisterExtension will specifically check if the + # extension was already registered on the object and either + # ignore the registration if the original was the same, or raise + # an error if they were different. + + for extension in file_desc.extensions_by_name.values(): + _ = GetMessageClass(extension.containing_type) + if api_implementation.Type() != 'python': + # TODO: Remove this check here. Duplicate extension + # register check should be in descriptor_pool. + if extension is not pool.FindExtensionByNumber( + extension.containing_type, extension.number + ): + raise ValueError('Double registration of Extensions') + # Recursively load protos for extension field, in order to be able to + # fully represent the extension. This matches the behavior for regular + # fields too. + if extension.message_type: + GetMessageClass(extension.message_type) + return result + + +def _InternalCreateMessageClass(descriptor): + """Builds a proto2 message class based on the passed in descriptor. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + descriptor_name = descriptor.name + result_class = _GENERATED_PROTOCOL_MESSAGE_TYPE( + descriptor_name, + (message.Message,), + { + 'DESCRIPTOR': descriptor, + # If module not set, it wrongly points to message_factory module. + '__module__': None, + }, + ) + for field in descriptor.fields: + if field.message_type: + GetMessageClass(field.message_type) + + for extension in result_class.DESCRIPTOR.extensions: + extended_class = GetMessageClass(extension.containing_type) + if api_implementation.Type() != 'python': + # TODO: Remove this check here. Duplicate extension + # register check should be in descriptor_pool. + pool = extension.containing_type.file.pool + if extension is not pool.FindExtensionByNumber( + extension.containing_type, extension.number + ): + raise ValueError('Double registration of Extensions') + if extension.message_type: + GetMessageClass(extension.message_type) + return result_class + + +# Deprecated. Please use GetMessageClass() or GetMessageClassesForFiles() +# method above instead. +class MessageFactory(object): + """Factory for creating Proto2 messages from descriptors in a pool.""" + + def __init__(self, pool=None): + """Initializes a new factory.""" + self.pool = pool or descriptor_pool.DescriptorPool() + + def GetPrototype(self, descriptor): + """Obtains a proto2 message class based on the passed in descriptor. + + Passing a descriptor with a fully qualified name matching a previous + invocation will cause the same class to be returned. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + warnings.warn( + 'MessageFactory class is deprecated. Please use ' + 'GetMessageClass() instead of MessageFactory.GetPrototype. ' + 'MessageFactory class will be removed after 2024.', + stacklevel=2, + ) + return GetMessageClass(descriptor) + + def CreatePrototype(self, descriptor): + """Builds a proto2 message class based on the passed in descriptor. + + Don't call this function directly, it always creates a new class. Call + GetMessageClass() instead. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + warnings.warn( + 'Directly call CreatePrototype is wrong. Please use ' + 'GetMessageClass() method instead. Directly use ' + 'CreatePrototype will raise error after July 2023.', + stacklevel=2, + ) + return _InternalCreateMessageClass(descriptor) + + def GetMessages(self, files): + """Gets all the messages from a specified file. + + This will find and resolve dependencies, failing if the descriptor + pool cannot satisfy them. + + Args: + files: The file names to extract messages from. + + Returns: + A dictionary mapping proto names to the message classes. This will include + any dependent messages as well as any messages defined in the same file as + a specified message. + """ + warnings.warn( + 'MessageFactory class is deprecated. Please use ' + 'GetMessageClassesForFiles() instead of ' + 'MessageFactory.GetMessages(). MessageFactory class ' + 'will be removed after 2024.', + stacklevel=2, + ) + return GetMessageClassesForFiles(files, self.pool) + + +def GetMessages(file_protos, pool=None): + """Builds a dictionary of all the messages available in a set of files. + + Args: + file_protos: Iterable of FileDescriptorProto to build messages out of. + pool: The descriptor pool to add the file protos. + + Returns: + A dictionary mapping proto names to the message classes. This will include + any dependent messages as well as any messages defined in the same file as + a specified message. + """ + # The cpp implementation of the protocol buffer library requires to add the + # message in topological order of the dependency graph. + des_pool = pool or descriptor_pool.DescriptorPool() + file_by_name = {file_proto.name: file_proto for file_proto in file_protos} + + def _AddFile(file_proto): + for dependency in file_proto.dependency: + if dependency in file_by_name: + # Remove from elements to be visited, in order to cut cycles. + _AddFile(file_by_name.pop(dependency)) + des_pool.Add(file_proto) + + while file_by_name: + _AddFile(file_by_name.popitem()[1]) + return GetMessageClassesForFiles( + [file_proto.name for file_proto in file_protos], des_pool + ) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/proto.py b/.venv/lib/python3.12/site-packages/google/protobuf/proto.py new file mode 100644 index 00000000..df0a0d02 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/proto.py @@ -0,0 +1,116 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains the Nextgen Pythonic protobuf APIs.""" + +import io +from typing import Type, TypeVar + +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.message import Message + +_MESSAGE = TypeVar('_MESSAGE', bound='Message') + + +def serialize(message: _MESSAGE, deterministic: bool = None) -> bytes: + """Return the serialized proto. + + Args: + message: The proto message to be serialized. + deterministic: If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + A binary bytes representation of the message. + """ + return message.SerializeToString(deterministic=deterministic) + + +def parse(message_class: Type[_MESSAGE], payload: bytes) -> _MESSAGE: + """Given a serialized data in binary form, deserialize it into a Message. + + Args: + message_class: The message meta class. + payload: A serialized bytes in binary form. + + Returns: + A new message deserialized from payload. + """ + new_message = message_class() + new_message.ParseFromString(payload) + return new_message + + +def serialize_length_prefixed(message: _MESSAGE, output: io.BytesIO) -> None: + """Writes the size of the message as a varint and the serialized message. + + Writes the size of the message as a varint and then the serialized message. + This allows more data to be written to the output after the message. Use + parse_length_prefixed to parse messages written by this method. + + The output stream must be buffered, e.g. using + https://docs.python.org/3/library/io.html#buffered-streams. + + Example usage: + out = io.BytesIO() + for msg in message_list: + proto.serialize_length_prefixed(msg, out) + + Args: + message: The protocol buffer message that should be serialized. + output: BytesIO or custom buffered IO that data should be written to. + """ + size = message.ByteSize() + encoder._VarintEncoder()(output.write, size) + out_size = output.write(serialize(message)) + + if out_size != size: + raise TypeError( + 'Failed to write complete message (wrote: %d, expected: %d)' + '. Ensure output is using buffered IO.' % (out_size, size) + ) + + +def parse_length_prefixed( + message_class: Type[_MESSAGE], input_bytes: io.BytesIO +) -> _MESSAGE: + """Parse a message from input_bytes. + + Args: + message_class: The protocol buffer message class that parser should parse. + input_bytes: A buffered input. + + Example usage: + while True: + msg = proto.parse_length_prefixed(message_class, input_bytes) + if msg is None: + break + ... + + Returns: + A parsed message if successful. None if input_bytes is at EOF. + """ + size = decoder._DecodeVarint(input_bytes) + if size is None: + # It is the end of buffered input. See example usage in the + # API description. + return None + + message = message_class() + + if size == 0: + return message + + parsed_size = message.ParseFromString(input_bytes.read(size)) + if parsed_size != size: + raise ValueError( + 'Truncated message or non-buffered input_bytes: ' + 'Expected {0} bytes but only {1} bytes parsed for ' + '{2}.'.format(size, parsed_size, message.DESCRIPTOR.name) + ) + return message diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/proto_builder.py b/.venv/lib/python3.12/site-packages/google/protobuf/proto_builder.py new file mode 100644 index 00000000..803d0040 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/proto_builder.py @@ -0,0 +1,111 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Dynamic Protobuf class creator.""" + +from collections import OrderedDict +import hashlib +import os + +from google.protobuf import descriptor_pb2 +from google.protobuf import descriptor +from google.protobuf import descriptor_pool +from google.protobuf import message_factory + + +def _GetMessageFromFactory(pool, full_name): + """Get a proto class from the MessageFactory by name. + + Args: + pool: a descriptor pool. + full_name: str, the fully qualified name of the proto type. + Returns: + A class, for the type identified by full_name. + Raises: + KeyError, if the proto is not found in the factory's descriptor pool. + """ + proto_descriptor = pool.FindMessageTypeByName(full_name) + proto_cls = message_factory.GetMessageClass(proto_descriptor) + return proto_cls + + +def MakeSimpleProtoClass(fields, full_name=None, pool=None): + """Create a Protobuf class whose fields are basic types. + + Note: this doesn't validate field names! + + Args: + fields: dict of {name: field_type} mappings for each field in the proto. If + this is an OrderedDict the order will be maintained, otherwise the + fields will be sorted by name. + full_name: optional str, the fully-qualified name of the proto type. + pool: optional DescriptorPool instance. + Returns: + a class, the new protobuf class with a FileDescriptor. + """ + pool_instance = pool or descriptor_pool.DescriptorPool() + if full_name is not None: + try: + proto_cls = _GetMessageFromFactory(pool_instance, full_name) + return proto_cls + except KeyError: + # The factory's DescriptorPool doesn't know about this class yet. + pass + + # Get a list of (name, field_type) tuples from the fields dict. If fields was + # an OrderedDict we keep the order, but otherwise we sort the field to ensure + # consistent ordering. + field_items = fields.items() + if not isinstance(fields, OrderedDict): + field_items = sorted(field_items) + + # Use a consistent file name that is unlikely to conflict with any imported + # proto files. + fields_hash = hashlib.sha1() + for f_name, f_type in field_items: + fields_hash.update(f_name.encode('utf-8')) + fields_hash.update(str(f_type).encode('utf-8')) + proto_file_name = fields_hash.hexdigest() + '.proto' + + # If the proto is anonymous, use the same hash to name it. + if full_name is None: + full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' + + fields_hash.hexdigest()) + try: + proto_cls = _GetMessageFromFactory(pool_instance, full_name) + return proto_cls + except KeyError: + # The factory's DescriptorPool doesn't know about this class yet. + pass + + # This is the first time we see this proto: add a new descriptor to the pool. + pool_instance.Add( + _MakeFileDescriptorProto(proto_file_name, full_name, field_items)) + return _GetMessageFromFactory(pool_instance, full_name) + + +def _MakeFileDescriptorProto(proto_file_name, full_name, field_items): + """Populate FileDescriptorProto for MessageFactory's DescriptorPool.""" + package, name = full_name.rsplit('.', 1) + file_proto = descriptor_pb2.FileDescriptorProto() + file_proto.name = os.path.join(package.replace('.', '/'), proto_file_name) + file_proto.package = package + desc_proto = file_proto.message_type.add() + desc_proto.name = name + for f_number, (f_name, f_type) in enumerate(field_items, 1): + field_proto = desc_proto.field.add() + field_proto.name = f_name + # # If the number falls in the reserved range, reassign it to the correct + # # number after the range. + if f_number >= descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER: + f_number += ( + descriptor.FieldDescriptor.LAST_RESERVED_FIELD_NUMBER - + descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER + 1) + field_proto.number = f_number + field_proto.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL + field_proto.type = f_type + return file_proto diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/proto_json.py b/.venv/lib/python3.12/site-packages/google/protobuf/proto_json.py new file mode 100644 index 00000000..ea670ab0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/proto_json.py @@ -0,0 +1,83 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains the Nextgen Pythonic Protobuf JSON APIs.""" + +from typing import Optional, Type + +from google.protobuf.message import Message +from google.protobuf.descriptor_pool import DescriptorPool +from google.protobuf import json_format + +def serialize( + message: Message, + always_print_fields_with_no_presence: bool=False, + preserving_proto_field_name: bool=False, + use_integers_for_enums: bool=False, + descriptor_pool: Optional[DescriptorPool]=None, + float_precision: int=None, +) -> dict: + """Converts protobuf message to a dictionary. + + When the dictionary is encoded to JSON, it conforms to proto3 JSON spec. + + Args: + message: The protocol buffers message instance to serialize. + always_print_fields_with_no_presence: If True, fields without + presence (implicit presence scalars, repeated fields, and map fields) will + always be serialized. Any field that supports presence is not affected by + this option (including singular message fields and oneof fields). + preserving_proto_field_name: If True, use the original proto field names as + defined in the .proto file. If False, convert the field names to + lowerCamelCase. + use_integers_for_enums: If true, print integers instead of enum names. + descriptor_pool: A Descriptor Pool for resolving types. If None use the + default. + float_precision: If set, use this to specify float field valid digits. + + Returns: + A dict representation of the protocol buffer message. + """ + return json_format.MessageToDict( + message, + always_print_fields_with_no_presence=always_print_fields_with_no_presence, + preserving_proto_field_name=preserving_proto_field_name, + use_integers_for_enums=use_integers_for_enums, + float_precision=float_precision, + ) + +def parse( + message_class: Type[Message], + js_dict: dict, + ignore_unknown_fields: bool=False, + descriptor_pool: Optional[DescriptorPool]=None, + max_recursion_depth: int=100 +) -> Message: + """Parses a JSON dictionary representation into a message. + + Args: + message_class: The message meta class. + js_dict: Dict representation of a JSON message. + ignore_unknown_fields: If True, do not raise errors for unknown fields. + descriptor_pool: A Descriptor Pool for resolving types. If None use the + default. + max_recursion_depth: max recursion depth of JSON message to be deserialized. + JSON messages over this depth will fail to be deserialized. Default value + is 100. + + Returns: + A new message passed from json_dict. + """ + new_message = message_class() + json_format.ParseDict( + js_dict=js_dict, + message=new_message, + ignore_unknown_fields=ignore_unknown_fields, + descriptor_pool=descriptor_pool, + max_recursion_depth=max_recursion_depth, + ) + return new_message diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/pyext/__init__.py b/.venv/lib/python3.12/site-packages/google/protobuf/pyext/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/pyext/__init__.py diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/pyext/cpp_message.py b/.venv/lib/python3.12/site-packages/google/protobuf/pyext/cpp_message.py new file mode 100644 index 00000000..54cfe306 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/pyext/cpp_message.py @@ -0,0 +1,49 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Protocol message implementation hooks for C++ implementation. + +Contains helper functions used to create protocol message classes from +Descriptor objects at runtime backed by the protocol buffer C++ API. +""" + +__author__ = 'tibell@google.com (Johan Tibell)' + +from google.protobuf.internal import api_implementation + + +# pylint: disable=protected-access +_message = api_implementation._c_module +# TODO: Remove this import after fix api_implementation +if _message is None: + from google.protobuf.pyext import _message + + +class GeneratedProtocolMessageType(_message.MessageMeta): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = message_factory.GetMessageClass(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + + The above example will not work for nested types. If you wish to include them, + use reflection.MakeClass() instead of manually instantiating the class in + order to create the appropriate class structure. + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/reflection.py b/.venv/lib/python3.12/site-packages/google/protobuf/reflection.py new file mode 100644 index 00000000..0943c2fe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/reflection.py @@ -0,0 +1,85 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# This code is meant to work on Python 2.4 and above only. + +"""Contains a metaclass and helper functions used to create +protocol message classes from Descriptor objects at runtime. + +Recall that a metaclass is the "type" of a class. +(A class is to a metaclass what an instance is to a class.) + +In this case, we use the GeneratedProtocolMessageType metaclass +to inject all the useful functionality into the classes +output by the protocol compiler at compile-time. + +The upshot of all this is that the real implementation +details for ALL pure-Python protocol buffers are *here in +this file*. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import warnings + +from google.protobuf import message_factory +from google.protobuf import symbol_database + +# The type of all Message classes. +# Part of the public interface, but normally only used by message factories. +GeneratedProtocolMessageType = message_factory._GENERATED_PROTOCOL_MESSAGE_TYPE + +MESSAGE_CLASS_CACHE = {} + + +# Deprecated. Please NEVER use reflection.ParseMessage(). +def ParseMessage(descriptor, byte_str): + """Generate a new Message instance from this Descriptor and a byte string. + + DEPRECATED: ParseMessage is deprecated because it is using MakeClass(). + Please use MessageFactory.GetMessageClass() instead. + + Args: + descriptor: Protobuf Descriptor object + byte_str: Serialized protocol buffer byte string + + Returns: + Newly created protobuf Message object. + """ + warnings.warn( + 'reflection.ParseMessage() is deprecated. Please use ' + 'MessageFactory.GetMessageClass() and message.ParseFromString() instead. ' + 'reflection.ParseMessage() will be removed in Jan 2025.', + stacklevel=2, + ) + result_class = MakeClass(descriptor) + new_msg = result_class() + new_msg.ParseFromString(byte_str) + return new_msg + + +# Deprecated. Please NEVER use reflection.MakeClass(). +def MakeClass(descriptor): + """Construct a class object for a protobuf described by descriptor. + + DEPRECATED: use MessageFactory.GetMessageClass() instead. + + Args: + descriptor: A descriptor.Descriptor object describing the protobuf. + Returns: + The Message class object described by the descriptor. + """ + warnings.warn( + 'reflection.MakeClass() is deprecated. Please use ' + 'MessageFactory.GetMessageClass() instead. ' + 'reflection.MakeClass() will be removed in Jan 2025.', + stacklevel=2, + ) + # Original implementation leads to duplicate message classes, which won't play + # well with extensions. Message factory info is also missing. + # Redirect to message_factory. + return message_factory.GetMessageClass(descriptor) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/runtime_version.py b/.venv/lib/python3.12/site-packages/google/protobuf/runtime_version.py new file mode 100644 index 00000000..e20e4653 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/runtime_version.py @@ -0,0 +1,124 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Protobuf Runtime versions and validators. + +It should only be accessed by Protobuf gencodes and tests. DO NOT USE it +elsewhere. +""" + +__author__ = 'shaod@google.com (Dennis Shao)' + +from enum import Enum +import os +import warnings + + +class Domain(Enum): + GOOGLE_INTERNAL = 1 + PUBLIC = 2 + + +# The versions of this Python Protobuf runtime to be changed automatically by +# the Protobuf release process. Do not edit them manually. +# These OSS versions are not stripped to avoid merging conflicts. +OSS_DOMAIN = Domain.PUBLIC +OSS_MAJOR = 5 +OSS_MINOR = 29 +OSS_PATCH = 4 +OSS_SUFFIX = '' + +DOMAIN = OSS_DOMAIN +MAJOR = OSS_MAJOR +MINOR = OSS_MINOR +PATCH = OSS_PATCH +SUFFIX = OSS_SUFFIX + +# Avoid flooding of warnings. +_MAX_WARNING_COUNT = 20 +_warning_count = 0 + +class VersionError(Exception): + """Exception class for version violation.""" + + +def _ReportVersionError(msg): + raise VersionError(msg) + + +def ValidateProtobufRuntimeVersion( + gen_domain, gen_major, gen_minor, gen_patch, gen_suffix, location +): + """Function to validate versions. + + Args: + gen_domain: The domain where the code was generated from. + gen_major: The major version number of the gencode. + gen_minor: The minor version number of the gencode. + gen_patch: The patch version number of the gencode. + gen_suffix: The version suffix e.g. '-dev', '-rc1' of the gencode. + location: The proto location that causes the version violation. + + Raises: + VersionError: if gencode version is invalid or incompatible with the + runtime. + """ + + disable_flag = os.getenv('TEMPORARILY_DISABLE_PROTOBUF_VERSION_CHECK') + if disable_flag is not None and disable_flag.lower() == 'true': + return + + global _warning_count + + version = f'{MAJOR}.{MINOR}.{PATCH}{SUFFIX}' + gen_version = f'{gen_major}.{gen_minor}.{gen_patch}{gen_suffix}' + + if gen_major < 0 or gen_minor < 0 or gen_patch < 0: + raise VersionError(f'Invalid gencode version: {gen_version}') + + error_prompt = ( + 'See Protobuf version guarantees at' + ' https://protobuf.dev/support/cross-version-runtime-guarantee.' + ) + + if gen_domain != DOMAIN: + _ReportVersionError( + 'Detected mismatched Protobuf Gencode/Runtime domains when loading' + f' {location}: gencode {gen_domain.name} runtime {DOMAIN.name}.' + ' Cross-domain usage of Protobuf is not supported.' + ) + + if gen_major != MAJOR: + if gen_major == MAJOR - 1: + if _warning_count < _MAX_WARNING_COUNT: + warnings.warn( + 'Protobuf gencode version %s is exactly one major version older' + ' than the runtime version %s at %s. Please update the gencode to' + ' avoid compatibility violations in the next runtime release.' + % (gen_version, version, location) + ) + _warning_count += 1 + else: + _ReportVersionError( + 'Detected mismatched Protobuf Gencode/Runtime major versions when' + f' loading {location}: gencode {gen_version} runtime {version}.' + f' Same major version is required. {error_prompt}' + ) + + if MINOR < gen_minor or (MINOR == gen_minor and PATCH < gen_patch): + _ReportVersionError( + 'Detected incompatible Protobuf Gencode/Runtime versions when loading' + f' {location}: gencode {gen_version} runtime {version}. Runtime version' + f' cannot be older than the linked gencode version. {error_prompt}' + ) + + if gen_suffix != SUFFIX: + _ReportVersionError( + 'Detected mismatched Protobuf Gencode/Runtime version suffixes when' + f' loading {location}: gencode {gen_version} runtime {version}.' + f' Version suffixes must be the same. {error_prompt}' + ) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/service.py b/.venv/lib/python3.12/site-packages/google/protobuf/service.py new file mode 100644 index 00000000..38b82179 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/service.py @@ -0,0 +1,213 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""DEPRECATED: Declares the RPC service interfaces. + +This module declares the abstract interfaces underlying proto2 RPC +services. These are intended to be independent of any particular RPC +implementation, so that proto2 services can be used on top of a variety +of implementations. Starting with version 2.3.0, RPC implementations should +not try to build on these, but should instead provide code generator plugins +which generate code specific to the particular RPC implementation. This way +the generated code can be more appropriate for the implementation in use +and can avoid unnecessary layers of indirection. +""" + +__author__ = 'petar@google.com (Petar Petrov)' + +import warnings + +warnings.warn( + 'google.protobuf.service module is deprecated. RPC implementations ' + 'should provide code generator plugins which generate code specific to ' + 'the RPC implementation. service.py will be removed in Jan 2025', + stacklevel=2, +) + +class RpcException(Exception): + """Exception raised on failed blocking RPC method call.""" + pass + + +class Service(object): + + """Abstract base interface for protocol-buffer-based RPC services. + + Services themselves are abstract classes (implemented either by servers or as + stubs), but they subclass this base interface. The methods of this + interface can be used to call the methods of the service without knowing + its exact type at compile time (analogous to the Message interface). + """ + + def GetDescriptor(): + """Retrieves this service's descriptor.""" + raise NotImplementedError + + def CallMethod(self, method_descriptor, rpc_controller, + request, done): + """Calls a method of the service specified by method_descriptor. + + If "done" is None then the call is blocking and the response + message will be returned directly. Otherwise the call is asynchronous + and "done" will later be called with the response value. + + In the blocking case, RpcException will be raised on error. + + Preconditions: + + * method_descriptor.service == GetDescriptor + * request is of the exact same classes as returned by + GetRequestClass(method). + * After the call has started, the request must not be modified. + * "rpc_controller" is of the correct type for the RPC implementation being + used by this Service. For stubs, the "correct type" depends on the + RpcChannel which the stub is using. + + Postconditions: + + * "done" will be called when the method is complete. This may be + before CallMethod() returns or it may be at some point in the future. + * If the RPC failed, the response value passed to "done" will be None. + Further details about the failure can be found by querying the + RpcController. + """ + raise NotImplementedError + + def GetRequestClass(self, method_descriptor): + """Returns the class of the request message for the specified method. + + CallMethod() requires that the request is of a particular subclass of + Message. GetRequestClass() gets the default instance of this required + type. + + Example: + method = service.GetDescriptor().FindMethodByName("Foo") + request = stub.GetRequestClass(method)() + request.ParseFromString(input) + service.CallMethod(method, request, callback) + """ + raise NotImplementedError + + def GetResponseClass(self, method_descriptor): + """Returns the class of the response message for the specified method. + + This method isn't really needed, as the RpcChannel's CallMethod constructs + the response protocol message. It's provided anyway in case it is useful + for the caller to know the response type in advance. + """ + raise NotImplementedError + + +class RpcController(object): + + """An RpcController mediates a single method call. + + The primary purpose of the controller is to provide a way to manipulate + settings specific to the RPC implementation and to find out about RPC-level + errors. The methods provided by the RpcController interface are intended + to be a "least common denominator" set of features which we expect all + implementations to support. Specific implementations may provide more + advanced features (e.g. deadline propagation). + """ + + # Client-side methods below + + def Reset(self): + """Resets the RpcController to its initial state. + + After the RpcController has been reset, it may be reused in + a new call. Must not be called while an RPC is in progress. + """ + raise NotImplementedError + + def Failed(self): + """Returns true if the call failed. + + After a call has finished, returns true if the call failed. The possible + reasons for failure depend on the RPC implementation. Failed() must not + be called before a call has finished. If Failed() returns true, the + contents of the response message are undefined. + """ + raise NotImplementedError + + def ErrorText(self): + """If Failed is true, returns a human-readable description of the error.""" + raise NotImplementedError + + def StartCancel(self): + """Initiate cancellation. + + Advises the RPC system that the caller desires that the RPC call be + canceled. The RPC system may cancel it immediately, may wait awhile and + then cancel it, or may not even cancel the call at all. If the call is + canceled, the "done" callback will still be called and the RpcController + will indicate that the call failed at that time. + """ + raise NotImplementedError + + # Server-side methods below + + def SetFailed(self, reason): + """Sets a failure reason. + + Causes Failed() to return true on the client side. "reason" will be + incorporated into the message returned by ErrorText(). If you find + you need to return machine-readable information about failures, you + should incorporate it into your response protocol buffer and should + NOT call SetFailed(). + """ + raise NotImplementedError + + def IsCanceled(self): + """Checks if the client cancelled the RPC. + + If true, indicates that the client canceled the RPC, so the server may + as well give up on replying to it. The server should still call the + final "done" callback. + """ + raise NotImplementedError + + def NotifyOnCancel(self, callback): + """Sets a callback to invoke on cancel. + + Asks that the given callback be called when the RPC is canceled. The + callback will always be called exactly once. If the RPC completes without + being canceled, the callback will be called after completion. If the RPC + has already been canceled when NotifyOnCancel() is called, the callback + will be called immediately. + + NotifyOnCancel() must be called no more than once per request. + """ + raise NotImplementedError + + +class RpcChannel(object): + + """Abstract interface for an RPC channel. + + An RpcChannel represents a communication line to a service which can be used + to call that service's methods. The service may be running on another + machine. Normally, you should not use an RpcChannel directly, but instead + construct a stub {@link Service} wrapping it. Example: + + Example: + RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234") + RpcController controller = rpcImpl.Controller() + MyService service = MyService_Stub(channel) + service.MyMethod(controller, request, callback) + """ + + def CallMethod(self, method_descriptor, rpc_controller, + request, response_class, done): + """Calls the method identified by the descriptor. + + Call the given method of the remote service. The signature of this + procedure looks the same as Service.CallMethod(), but the requirements + are less strict in one important way: the request object doesn't have to + be of any specific class as long as its descriptor is method.input_type. + """ + raise NotImplementedError diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/service_reflection.py b/.venv/lib/python3.12/site-packages/google/protobuf/service_reflection.py new file mode 100644 index 00000000..7ba3d0b2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/service_reflection.py @@ -0,0 +1,272 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains metaclasses used to create protocol service and service stub +classes from ServiceDescriptor objects at runtime. + +The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to +inject all useful functionality into the classes output by the protocol +compiler at compile-time. +""" + +__author__ = 'petar@google.com (Petar Petrov)' + + +class GeneratedServiceType(type): + + """Metaclass for service classes created at runtime from ServiceDescriptors. + + Implementations for all methods described in the Service class are added here + by this class. We also create properties to allow getting/setting all fields + in the protocol message. + + The protocol compiler currently uses this metaclass to create protocol service + classes at runtime. Clients can also manually create their own classes at + runtime, as in this example:: + + mydescriptor = ServiceDescriptor(.....) + class MyProtoService(service.Service): + __metaclass__ = GeneratedServiceType + DESCRIPTOR = mydescriptor + myservice_instance = MyProtoService() + # ... + """ + + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __init__(cls, name, bases, dictionary): + """Creates a message service class. + + Args: + name: Name of the class (ignored, but required by the metaclass + protocol). + bases: Base classes of the class being constructed. + dictionary: The class dictionary of the class being constructed. + dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object + describing this protocol service type. + """ + # Don't do anything if this class doesn't have a descriptor. This happens + # when a service class is subclassed. + if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary: + return + + descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY] + service_builder = _ServiceBuilder(descriptor) + service_builder.BuildService(cls) + cls.DESCRIPTOR = descriptor + + +class GeneratedServiceStubType(GeneratedServiceType): + + """Metaclass for service stubs created at runtime from ServiceDescriptors. + + This class has similar responsibilities as GeneratedServiceType, except that + it creates the service stub classes. + """ + + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __init__(cls, name, bases, dictionary): + """Creates a message service stub class. + + Args: + name: Name of the class (ignored, here). + bases: Base classes of the class being constructed. + dictionary: The class dictionary of the class being constructed. + dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object + describing this protocol service type. + """ + super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary) + # Don't do anything if this class doesn't have a descriptor. This happens + # when a service stub is subclassed. + if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary: + return + + descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY] + service_stub_builder = _ServiceStubBuilder(descriptor) + service_stub_builder.BuildServiceStub(cls) + + +class _ServiceBuilder(object): + + """This class constructs a protocol service class using a service descriptor. + + Given a service descriptor, this class constructs a class that represents + the specified service descriptor. One service builder instance constructs + exactly one service class. That means all instances of that class share the + same builder. + """ + + def __init__(self, service_descriptor): + """Initializes an instance of the service class builder. + + Args: + service_descriptor: ServiceDescriptor to use when constructing the + service class. + """ + self.descriptor = service_descriptor + + def BuildService(builder, cls): + """Constructs the service class. + + Args: + cls: The class that will be constructed. + """ + + # CallMethod needs to operate with an instance of the Service class. This + # internal wrapper function exists only to be able to pass the service + # instance to the method that does the real CallMethod work. + # Making sure to use exact argument names from the abstract interface in + # service.py to match the type signature + def _WrapCallMethod(self, method_descriptor, rpc_controller, request, done): + return builder._CallMethod(self, method_descriptor, rpc_controller, + request, done) + + def _WrapGetRequestClass(self, method_descriptor): + return builder._GetRequestClass(method_descriptor) + + def _WrapGetResponseClass(self, method_descriptor): + return builder._GetResponseClass(method_descriptor) + + builder.cls = cls + cls.CallMethod = _WrapCallMethod + cls.GetDescriptor = staticmethod(lambda: builder.descriptor) + cls.GetDescriptor.__doc__ = 'Returns the service descriptor.' + cls.GetRequestClass = _WrapGetRequestClass + cls.GetResponseClass = _WrapGetResponseClass + for method in builder.descriptor.methods: + setattr(cls, method.name, builder._GenerateNonImplementedMethod(method)) + + def _CallMethod(self, srvc, method_descriptor, + rpc_controller, request, callback): + """Calls the method described by a given method descriptor. + + Args: + srvc: Instance of the service for which this method is called. + method_descriptor: Descriptor that represent the method to call. + rpc_controller: RPC controller to use for this method's execution. + request: Request protocol message. + callback: A callback to invoke after the method has completed. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'CallMethod() given method descriptor for wrong service type.') + method = getattr(srvc, method_descriptor.name) + return method(rpc_controller, request, callback) + + def _GetRequestClass(self, method_descriptor): + """Returns the class of the request protocol message. + + Args: + method_descriptor: Descriptor of the method for which to return the + request protocol message class. + + Returns: + A class that represents the input protocol message of the specified + method. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'GetRequestClass() given method descriptor for wrong service type.') + return method_descriptor.input_type._concrete_class + + def _GetResponseClass(self, method_descriptor): + """Returns the class of the response protocol message. + + Args: + method_descriptor: Descriptor of the method for which to return the + response protocol message class. + + Returns: + A class that represents the output protocol message of the specified + method. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'GetResponseClass() given method descriptor for wrong service type.') + return method_descriptor.output_type._concrete_class + + def _GenerateNonImplementedMethod(self, method): + """Generates and returns a method that can be set for a service methods. + + Args: + method: Descriptor of the service method for which a method is to be + generated. + + Returns: + A method that can be added to the service class. + """ + return lambda inst, rpc_controller, request, callback: ( + self._NonImplementedMethod(method.name, rpc_controller, callback)) + + def _NonImplementedMethod(self, method_name, rpc_controller, callback): + """The body of all methods in the generated service class. + + Args: + method_name: Name of the method being executed. + rpc_controller: RPC controller used to execute this method. + callback: A callback which will be invoked when the method finishes. + """ + rpc_controller.SetFailed('Method %s not implemented.' % method_name) + callback(None) + + +class _ServiceStubBuilder(object): + + """Constructs a protocol service stub class using a service descriptor. + + Given a service descriptor, this class constructs a suitable stub class. + A stub is just a type-safe wrapper around an RpcChannel which emulates a + local implementation of the service. + + One service stub builder instance constructs exactly one class. It means all + instances of that class share the same service stub builder. + """ + + def __init__(self, service_descriptor): + """Initializes an instance of the service stub class builder. + + Args: + service_descriptor: ServiceDescriptor to use when constructing the + stub class. + """ + self.descriptor = service_descriptor + + def BuildServiceStub(self, cls): + """Constructs the stub class. + + Args: + cls: The class that will be constructed. + """ + + def _ServiceStubInit(stub, rpc_channel): + stub.rpc_channel = rpc_channel + self.cls = cls + cls.__init__ = _ServiceStubInit + for method in self.descriptor.methods: + setattr(cls, method.name, self._GenerateStubMethod(method)) + + def _GenerateStubMethod(self, method): + return (lambda inst, rpc_controller, request, callback=None: + self._StubMethod(inst, method, rpc_controller, request, callback)) + + def _StubMethod(self, stub, method_descriptor, + rpc_controller, request, callback): + """The body of all service methods in the generated stub class. + + Args: + stub: Stub instance. + method_descriptor: Descriptor of the invoked method. + rpc_controller: Rpc controller to execute the method. + request: Request protocol message. + callback: A callback to execute when the method finishes. + Returns: + Response message (in case of blocking call). + """ + return stub.rpc_channel.CallMethod( + method_descriptor, rpc_controller, request, + method_descriptor.output_type._concrete_class, callback) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/source_context_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/source_context_pb2.py new file mode 100644 index 00000000..65491f08 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/source_context_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/source_context.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/source_context.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$google/protobuf/source_context.proto\x12\x0fgoogle.protobuf\",\n\rSourceContext\x12\x1b\n\tfile_name\x18\x01 \x01(\tR\x08\x66ileNameB\x8a\x01\n\x13\x63om.google.protobufB\x12SourceContextProtoP\x01Z6google.golang.org/protobuf/types/known/sourcecontextpb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.source_context_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\022SourceContextProtoP\001Z6google.golang.org/protobuf/types/known/sourcecontextpb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _globals['_SOURCECONTEXT']._serialized_start=57 + _globals['_SOURCECONTEXT']._serialized_end=101 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/struct_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/struct_pb2.py new file mode 100644 index 00000000..cf50eb55 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/struct_pb2.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/struct.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/struct.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n\x06Struct\x12;\n\x06\x66ields\x18\x01 \x03(\x0b\x32#.google.protobuf.Struct.FieldsEntryR\x06\x66ields\x1aQ\n\x0b\x46ieldsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12,\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.ValueR\x05value:\x02\x38\x01\"\xb2\x02\n\x05Value\x12;\n\nnull_value\x18\x01 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n\x0cnumber_value\x18\x02 \x01(\x01H\x00R\x0bnumberValue\x12#\n\x0cstring_value\x18\x03 \x01(\tH\x00R\x0bstringValue\x12\x1f\n\nbool_value\x18\x04 \x01(\x08H\x00R\tboolValue\x12<\n\x0cstruct_value\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructH\x00R\x0bstructValue\x12;\n\nlist_value\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n\x04kind\";\n\tListValue\x12.\n\x06values\x18\x01 \x03(\x0b\x32\x16.google.protobuf.ValueR\x06values*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00\x42\x7f\n\x13\x63om.google.protobufB\x0bStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.struct_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\013StructProtoP\001Z/google.golang.org/protobuf/types/known/structpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _globals['_STRUCT_FIELDSENTRY']._loaded_options = None + _globals['_STRUCT_FIELDSENTRY']._serialized_options = b'8\001' + _globals['_NULLVALUE']._serialized_start=574 + _globals['_NULLVALUE']._serialized_end=601 + _globals['_STRUCT']._serialized_start=50 + _globals['_STRUCT']._serialized_end=202 + _globals['_STRUCT_FIELDSENTRY']._serialized_start=121 + _globals['_STRUCT_FIELDSENTRY']._serialized_end=202 + _globals['_VALUE']._serialized_start=205 + _globals['_VALUE']._serialized_end=511 + _globals['_LISTVALUE']._serialized_start=513 + _globals['_LISTVALUE']._serialized_end=572 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/symbol_database.py b/.venv/lib/python3.12/site-packages/google/protobuf/symbol_database.py new file mode 100644 index 00000000..1941e811 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/symbol_database.py @@ -0,0 +1,197 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""A database of Python protocol buffer generated symbols. + +SymbolDatabase is the MessageFactory for messages generated at compile time, +and makes it easy to create new instances of a registered type, given only the +type's protocol buffer symbol name. + +Example usage:: + + db = symbol_database.SymbolDatabase() + + # Register symbols of interest, from one or multiple files. + db.RegisterFileDescriptor(my_proto_pb2.DESCRIPTOR) + db.RegisterMessage(my_proto_pb2.MyMessage) + db.RegisterEnumDescriptor(my_proto_pb2.MyEnum.DESCRIPTOR) + + # The database can be used as a MessageFactory, to generate types based on + # their name: + types = db.GetMessages(['my_proto.proto']) + my_message_instance = types['MyMessage']() + + # The database's underlying descriptor pool can be queried, so it's not + # necessary to know a type's filename to be able to generate it: + filename = db.pool.FindFileContainingSymbol('MyMessage') + my_message_instance = db.GetMessages([filename])['MyMessage']() + + # This functionality is also provided directly via a convenience method: + my_message_instance = db.GetSymbol('MyMessage')() +""" + +import warnings + +from google.protobuf.internal import api_implementation +from google.protobuf import descriptor_pool +from google.protobuf import message_factory + + +class SymbolDatabase(): + """A database of Python generated symbols.""" + + # local cache of registered classes. + _classes = {} + + def __init__(self, pool=None): + """Initializes a new SymbolDatabase.""" + self.pool = pool or descriptor_pool.DescriptorPool() + + def GetPrototype(self, descriptor): + warnings.warn('SymbolDatabase.GetPrototype() is deprecated. Please ' + 'use message_factory.GetMessageClass() instead. ' + 'SymbolDatabase.GetPrototype() will be removed soon.') + return message_factory.GetMessageClass(descriptor) + + def CreatePrototype(self, descriptor): + warnings.warn('Directly call CreatePrototype() is wrong. Please use ' + 'message_factory.GetMessageClass() instead. ' + 'SymbolDatabase.CreatePrototype() will be removed soon.') + return message_factory._InternalCreateMessageClass(descriptor) + + def GetMessages(self, files): + warnings.warn('SymbolDatabase.GetMessages() is deprecated. Please use ' + 'message_factory.GetMessageClassedForFiles() instead. ' + 'SymbolDatabase.GetMessages() will be removed soon.') + return message_factory.GetMessageClassedForFiles(files, self.pool) + + def RegisterMessage(self, message): + """Registers the given message type in the local database. + + Calls to GetSymbol() and GetMessages() will return messages registered here. + + Args: + message: A :class:`google.protobuf.message.Message` subclass (or + instance); its descriptor will be registered. + + Returns: + The provided message. + """ + + desc = message.DESCRIPTOR + self._classes[desc] = message + self.RegisterMessageDescriptor(desc) + return message + + def RegisterMessageDescriptor(self, message_descriptor): + """Registers the given message descriptor in the local database. + + Args: + message_descriptor (Descriptor): the message descriptor to add. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddDescriptor(message_descriptor) + + def RegisterEnumDescriptor(self, enum_descriptor): + """Registers the given enum descriptor in the local database. + + Args: + enum_descriptor (EnumDescriptor): The enum descriptor to register. + + Returns: + EnumDescriptor: The provided descriptor. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddEnumDescriptor(enum_descriptor) + return enum_descriptor + + def RegisterServiceDescriptor(self, service_descriptor): + """Registers the given service descriptor in the local database. + + Args: + service_descriptor (ServiceDescriptor): the service descriptor to + register. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddServiceDescriptor(service_descriptor) + + def RegisterFileDescriptor(self, file_descriptor): + """Registers the given file descriptor in the local database. + + Args: + file_descriptor (FileDescriptor): The file descriptor to register. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._InternalAddFileDescriptor(file_descriptor) + + def GetSymbol(self, symbol): + """Tries to find a symbol in the local database. + + Currently, this method only returns message.Message instances, however, if + may be extended in future to support other symbol types. + + Args: + symbol (str): a protocol buffer symbol. + + Returns: + A Python class corresponding to the symbol. + + Raises: + KeyError: if the symbol could not be found. + """ + + return self._classes[self.pool.FindMessageTypeByName(symbol)] + + def GetMessages(self, files): + # TODO: Fix the differences with MessageFactory. + """Gets all registered messages from a specified file. + + Only messages already created and registered will be returned; (this is the + case for imported _pb2 modules) + But unlike MessageFactory, this version also returns already defined nested + messages, but does not register any message extensions. + + Args: + files (list[str]): The file names to extract messages from. + + Returns: + A dictionary mapping proto names to the message classes. + + Raises: + KeyError: if a file could not be found. + """ + + def _GetAllMessages(desc): + """Walk a message Descriptor and recursively yields all message names.""" + yield desc + for msg_desc in desc.nested_types: + for nested_desc in _GetAllMessages(msg_desc): + yield nested_desc + + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) + for msg_desc in file_desc.message_types_by_name.values(): + for desc in _GetAllMessages(msg_desc): + try: + result[desc.full_name] = self._classes[desc] + except KeyError: + # This descriptor has no registered class, skip it. + pass + return result + + +_DEFAULT = SymbolDatabase(pool=descriptor_pool.Default()) + + +def Default(): + """Returns the default SymbolDatabase.""" + return _DEFAULT diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/testdata/__init__.py b/.venv/lib/python3.12/site-packages/google/protobuf/testdata/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/testdata/__init__.py diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/text_encoding.py b/.venv/lib/python3.12/site-packages/google/protobuf/text_encoding.py new file mode 100644 index 00000000..03c27dc1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/text_encoding.py @@ -0,0 +1,106 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Encoding related utilities.""" +import re + +def _AsciiIsPrint(i): + return i >= 32 and i < 127 + +def _MakeStrEscapes(): + ret = {} + for i in range(0, 128): + if not _AsciiIsPrint(i): + ret[i] = r'\%03o' % i + ret[ord('\t')] = r'\t' # optional escape + ret[ord('\n')] = r'\n' # optional escape + ret[ord('\r')] = r'\r' # optional escape + ret[ord('"')] = r'\"' # necessary escape + ret[ord('\'')] = r"\'" # optional escape + ret[ord('\\')] = r'\\' # necessary escape + return ret + +# Maps int -> char, performing string escapes. +_str_escapes = _MakeStrEscapes() + +# Maps int -> char, performing byte escaping and string escapes +_byte_escapes = {i: chr(i) for i in range(0, 256)} +_byte_escapes.update(_str_escapes) +_byte_escapes.update({i: r'\%03o' % i for i in range(128, 256)}) + + +def _DecodeUtf8EscapeErrors(text_bytes): + ret = '' + while text_bytes: + try: + ret += text_bytes.decode('utf-8').translate(_str_escapes) + text_bytes = '' + except UnicodeDecodeError as e: + ret += text_bytes[:e.start].decode('utf-8').translate(_str_escapes) + ret += _byte_escapes[text_bytes[e.start]] + text_bytes = text_bytes[e.start+1:] + return ret + + +def CEscape(text, as_utf8) -> str: + """Escape a bytes string for use in an text protocol buffer. + + Args: + text: A byte string to be escaped. + as_utf8: Specifies if result may contain non-ASCII characters. + In Python 3 this allows unescaped non-ASCII Unicode characters. + In Python 2 the return value will be valid UTF-8 rather than only ASCII. + Returns: + Escaped string (str). + """ + # Python's text.encode() 'string_escape' or 'unicode_escape' codecs do not + # satisfy our needs; they encodes unprintable characters using two-digit hex + # escapes whereas our C++ unescaping function allows hex escapes to be any + # length. So, "\0011".encode('string_escape') ends up being "\\x011", which + # will be decoded in C++ as a single-character string with char code 0x11. + text_is_unicode = isinstance(text, str) + if as_utf8: + if text_is_unicode: + return text.translate(_str_escapes) + else: + return _DecodeUtf8EscapeErrors(text) + else: + if text_is_unicode: + text = text.encode('utf-8') + return ''.join([_byte_escapes[c] for c in text]) + + +_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])') + + +def CUnescape(text: str) -> bytes: + """Unescape a text string with C-style escape sequences to UTF-8 bytes. + + Args: + text: The data to parse in a str. + Returns: + A byte string. + """ + + def ReplaceHex(m): + # Only replace the match if the number of leading back slashes is odd. i.e. + # the slash itself is not escaped. + if len(m.group(1)) & 1: + return m.group(1) + 'x0' + m.group(2) + return m.group(0) + + # This is required because the 'string_escape' encoding doesn't + # allow single-digit hex escapes (like '\xf'). + result = _CUNESCAPE_HEX.sub(ReplaceHex, text) + + # Replaces Unicode escape sequences with their character equivalents. + result = result.encode('raw_unicode_escape').decode('raw_unicode_escape') + # Encode Unicode characters as UTF-8, then decode to Latin-1 escaping + # unprintable characters. + result = result.encode('utf-8').decode('unicode_escape') + # Convert Latin-1 text back to a byte string (latin-1 codec also works here). + return result.encode('latin-1') diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/text_format.py b/.venv/lib/python3.12/site-packages/google/protobuf/text_format.py new file mode 100644 index 00000000..7e17b43e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/text_format.py @@ -0,0 +1,1864 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains routines for printing protocol messages in text format. + +Simple usage example:: + + # Create a proto object and serialize it to a text proto string. + message = my_proto_pb2.MyMessage(foo='bar') + text_proto = text_format.MessageToString(message) + + # Parse a text proto string. + message = text_format.Parse(text_proto, my_proto_pb2.MyMessage()) +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +# TODO Import thread contention leads to test failures. +import encodings.raw_unicode_escape # pylint: disable=unused-import +import encodings.unicode_escape # pylint: disable=unused-import +import io +import math +import re + +from google.protobuf.internal import decoder +from google.protobuf.internal import type_checkers +from google.protobuf import descriptor +from google.protobuf import text_encoding +from google.protobuf import unknown_fields + +# pylint: disable=g-import-not-at-top +__all__ = ['MessageToString', 'Parse', 'PrintMessage', 'PrintField', + 'PrintFieldValue', 'Merge', 'MessageToBytes'] + +_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), + type_checkers.Int32ValueChecker(), + type_checkers.Uint64ValueChecker(), + type_checkers.Int64ValueChecker()) +_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?$', re.IGNORECASE) +_FLOAT_NAN = re.compile('nanf?$', re.IGNORECASE) +_QUOTES = frozenset(("'", '"')) +_ANY_FULL_TYPE_NAME = 'google.protobuf.Any' +_DEBUG_STRING_SILENT_MARKER = '\t ' + +_as_utf8_default = True + + +class Error(Exception): + """Top-level module error for text_format.""" + + +class ParseError(Error): + """Thrown in case of text parsing or tokenizing error.""" + + def __init__(self, message=None, line=None, column=None): + if message is not None and line is not None: + loc = str(line) + if column is not None: + loc += ':{0}'.format(column) + message = '{0} : {1}'.format(loc, message) + if message is not None: + super(ParseError, self).__init__(message) + else: + super(ParseError, self).__init__() + self._line = line + self._column = column + + def GetLine(self): + return self._line + + def GetColumn(self): + return self._column + + +class TextWriter(object): + + def __init__(self, as_utf8): + self._writer = io.StringIO() + + def write(self, val): + return self._writer.write(val) + + def close(self): + return self._writer.close() + + def getvalue(self): + return self._writer.getvalue() + + +def MessageToString( + message, + as_utf8=_as_utf8_default, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + indent=0, + message_formatter=None, + print_unknown_fields=False, + force_colon=False) -> str: + """Convert protobuf message to text format. + + Double values can be formatted compactly with 15 digits of + precision (which is the most that IEEE 754 "double" can guarantee) + using double_format='.15g'. To ensure that converting to text and back to a + proto will result in an identical value, double_format='.17g' should be used. + + Args: + message: The protocol buffers message. + as_utf8: Return unescaped Unicode for non-ASCII characters. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, fields of a proto message will be printed using + the order defined in source code instead of the field number, extensions + will be printed at the end of the message and their relative order is + determined by the extension number. By default, use the field number + order. + float_format (str): If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest float + that has same value in wire will be printed. Also affect double field + if double_format is not set but float_format is set. + double_format (str): If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, use ``str()`` + use_field_number: If True, print field numbers instead of names. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + indent (int): The initial indent level, in terms of spaces, for pretty + print. + message_formatter (function(message, indent, as_one_line) -> unicode|None): + Custom formatter for selected sub-messages (usually based on message + type). Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if the + field is a proto message. + + Returns: + str: A string of the text formatted protocol buffer message. + """ + out = TextWriter(as_utf8) + printer = _Printer( + out, + indent, + as_utf8, + as_one_line, + use_short_repeated_primitives, + pointy_brackets, + use_index_order, + float_format, + double_format, + use_field_number, + descriptor_pool, + message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintMessage(message) + result = out.getvalue() + out.close() + if as_one_line: + return result.rstrip() + return result + + +def MessageToBytes(message, **kwargs) -> bytes: + """Convert protobuf message to encoded text format. See MessageToString.""" + text = MessageToString(message, **kwargs) + if isinstance(text, bytes): + return text + codec = 'utf-8' if kwargs.get('as_utf8') else 'ascii' + return text.encode(codec) + + +def _IsMapEntry(field): + return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.message_type.has_options and + field.message_type.GetOptions().map_entry) + + +def _IsGroupLike(field): + """Determines if a field is consistent with a proto2 group. + + Args: + field: The field descriptor. + + Returns: + True if this field is group-like, false otherwise. + """ + # Groups are always tag-delimited. + if field.type != descriptor.FieldDescriptor.TYPE_GROUP: + return False + + # Group fields always are always the lowercase type name. + if field.name != field.message_type.name.lower(): + return False + + if field.message_type.file != field.file: + return False + + # Group messages are always defined in the same scope as the field. File + # level extensions will compare NULL == NULL here, which is why the file + # comparison above is necessary to ensure both come from the same file. + return ( + field.message_type.containing_type == field.extension_scope + if field.is_extension + else field.message_type.containing_type == field.containing_type + ) + + +def PrintMessage(message, + out, + indent=0, + as_utf8=_as_utf8_default, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Convert the message to text format and write it to the out stream. + + Args: + message: The Message object to convert to text format. + out: A file handle to write the message to. + indent: The initial indent level for pretty print. + as_utf8: Return unescaped Unicode for non-ASCII characters. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, print fields of a proto message using the order + defined in source code instead of the field number. By default, use the + field number order. + float_format: If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest + float that has same value in wire will be printed. Also affect double + field if double_format is not set but float_format is set. + double_format: If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, str() is used. + use_field_number: If True, print field numbers instead of names. + descriptor_pool: A DescriptorPool used to resolve Any types. + message_formatter: A function(message, indent, as_one_line): unicode|None + to custom format selected sub-messages (usually based on message type). + Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if + the field is a proto message. + """ + printer = _Printer( + out=out, indent=indent, as_utf8=as_utf8, + as_one_line=as_one_line, + use_short_repeated_primitives=use_short_repeated_primitives, + pointy_brackets=pointy_brackets, + use_index_order=use_index_order, + float_format=float_format, + double_format=double_format, + use_field_number=use_field_number, + descriptor_pool=descriptor_pool, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintMessage(message) + + +def PrintField(field, + value, + out, + indent=0, + as_utf8=_as_utf8_default, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Print a single field name/value pair.""" + printer = _Printer(out, indent, as_utf8, as_one_line, + use_short_repeated_primitives, pointy_brackets, + use_index_order, float_format, double_format, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintField(field, value) + + +def PrintFieldValue(field, + value, + out, + indent=0, + as_utf8=_as_utf8_default, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Print a single field value (not including name).""" + printer = _Printer(out, indent, as_utf8, as_one_line, + use_short_repeated_primitives, pointy_brackets, + use_index_order, float_format, double_format, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintFieldValue(field, value) + + +def _BuildMessageFromTypeName(type_name, descriptor_pool): + """Returns a protobuf message instance. + + Args: + type_name: Fully-qualified protobuf message type name string. + descriptor_pool: DescriptorPool instance. + + Returns: + A Message instance of type matching type_name, or None if the a Descriptor + wasn't found matching type_name. + """ + # pylint: disable=g-import-not-at-top + if descriptor_pool is None: + from google.protobuf import descriptor_pool as pool_mod + descriptor_pool = pool_mod.Default() + from google.protobuf import message_factory + try: + message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) + except KeyError: + return None + message_type = message_factory.GetMessageClass(message_descriptor) + return message_type() + + +# These values must match WireType enum in //google/protobuf/wire_format.h. +WIRETYPE_LENGTH_DELIMITED = 2 +WIRETYPE_START_GROUP = 3 + + +class _Printer(object): + """Text format printer for protocol message.""" + + def __init__( + self, + out, + indent=0, + as_utf8=_as_utf8_default, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Initialize the Printer. + + Double values can be formatted compactly with 15 digits of precision + (which is the most that IEEE 754 "double" can guarantee) using + double_format='.15g'. To ensure that converting to text and back to a proto + will result in an identical value, double_format='.17g' should be used. + + Args: + out: To record the text format result. + indent: The initial indent level for pretty print. + as_utf8: Return unescaped Unicode for non-ASCII characters. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, print fields of a proto message using the order + defined in source code instead of the field number. By default, use the + field number order. + float_format: If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest + float that has same value in wire will be printed. Also affect double + field if double_format is not set but float_format is set. + double_format: If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, str() is used. + use_field_number: If True, print field numbers instead of names. + descriptor_pool: A DescriptorPool used to resolve Any types. + message_formatter: A function(message, indent, as_one_line): unicode|None + to custom format selected sub-messages (usually based on message type). + Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if + the field is a proto message. + """ + self.out = out + self.indent = indent + self.as_utf8 = as_utf8 + self.as_one_line = as_one_line + self.use_short_repeated_primitives = use_short_repeated_primitives + self.pointy_brackets = pointy_brackets + self.use_index_order = use_index_order + self.float_format = float_format + if double_format is not None: + self.double_format = double_format + else: + self.double_format = float_format + self.use_field_number = use_field_number + self.descriptor_pool = descriptor_pool + self.message_formatter = message_formatter + self.print_unknown_fields = print_unknown_fields + self.force_colon = force_colon + + def _TryPrintAsAnyMessage(self, message): + """Serializes if message is a google.protobuf.Any field.""" + if '/' not in message.type_url: + return False + packed_message = _BuildMessageFromTypeName(message.TypeName(), + self.descriptor_pool) + if packed_message: + packed_message.MergeFromString(message.value) + colon = ':' if self.force_colon else '' + self.out.write('%s[%s]%s ' % (self.indent * ' ', message.type_url, colon)) + self._PrintMessageFieldValue(packed_message) + self.out.write(' ' if self.as_one_line else '\n') + return True + else: + return False + + def _TryCustomFormatMessage(self, message): + formatted = self.message_formatter(message, self.indent, self.as_one_line) + if formatted is None: + return False + + out = self.out + out.write(' ' * self.indent) + out.write(formatted) + out.write(' ' if self.as_one_line else '\n') + return True + + def PrintMessage(self, message): + """Convert protobuf message to text format. + + Args: + message: The protocol buffers message. + """ + if self.message_formatter and self._TryCustomFormatMessage(message): + return + if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and + self._TryPrintAsAnyMessage(message)): + return + fields = message.ListFields() + if self.use_index_order: + fields.sort( + key=lambda x: x[0].number if x[0].is_extension else x[0].index) + for field, value in fields: + if _IsMapEntry(field): + for key in sorted(value): + # This is slow for maps with submessage entries because it copies the + # entire tree. Unfortunately this would take significant refactoring + # of this file to work around. + # + # TODO: refactor and optimize if this becomes an issue. + entry_submsg = value.GetEntryClass()(key=key, value=value[key]) + self.PrintField(field, entry_submsg) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if (self.use_short_repeated_primitives + and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE + and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_STRING): + self._PrintShortRepeatedPrimitivesValue(field, value) + else: + for element in value: + self.PrintField(field, element) + else: + self.PrintField(field, value) + + if self.print_unknown_fields: + self._PrintUnknownFields(unknown_fields.UnknownFieldSet(message)) + + def _PrintUnknownFields(self, unknown_field_set): + """Print unknown fields.""" + out = self.out + for field in unknown_field_set: + out.write(' ' * self.indent) + out.write(str(field.field_number)) + if field.wire_type == WIRETYPE_START_GROUP: + if self.as_one_line: + out.write(' { ') + else: + out.write(' {\n') + self.indent += 2 + + self._PrintUnknownFields(field.data) + + if self.as_one_line: + out.write('} ') + else: + self.indent -= 2 + out.write(' ' * self.indent + '}\n') + elif field.wire_type == WIRETYPE_LENGTH_DELIMITED: + try: + # If this field is parseable as a Message, it is probably + # an embedded message. + # pylint: disable=protected-access + (embedded_unknown_message, pos) = decoder._DecodeUnknownFieldSet( + memoryview(field.data), 0, len(field.data)) + except Exception: # pylint: disable=broad-except + pos = 0 + + if pos == len(field.data): + if self.as_one_line: + out.write(' { ') + else: + out.write(' {\n') + self.indent += 2 + + self._PrintUnknownFields(embedded_unknown_message) + + if self.as_one_line: + out.write('} ') + else: + self.indent -= 2 + out.write(' ' * self.indent + '}\n') + else: + # A string or bytes field. self.as_utf8 may not work. + out.write(': \"') + out.write(text_encoding.CEscape(field.data, False)) + out.write('\" ' if self.as_one_line else '\"\n') + else: + # varint, fixed32, fixed64 + out.write(': ') + out.write(str(field.data)) + out.write(' ' if self.as_one_line else '\n') + + def _PrintFieldName(self, field): + """Print field name.""" + out = self.out + out.write(' ' * self.indent) + if self.use_field_number: + out.write(str(field.number)) + else: + if field.is_extension: + out.write('[') + if (field.containing_type.GetOptions().message_set_wire_format and + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL): + out.write(field.message_type.full_name) + else: + out.write(field.full_name) + out.write(']') + elif _IsGroupLike(field): + # For groups, use the capitalized name. + out.write(field.message_type.name) + else: + out.write(field.name) + + if (self.force_colon or + field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE): + # The colon is optional in this case, but our cross-language golden files + # don't include it. Here, the colon is only included if force_colon is + # set to True + out.write(':') + + def PrintField(self, field, value): + """Print a single field name/value pair.""" + self._PrintFieldName(field) + self.out.write(' ') + self.PrintFieldValue(field, value) + self.out.write(' ' if self.as_one_line else '\n') + + def _PrintShortRepeatedPrimitivesValue(self, field, value): + """"Prints short repeated primitives value.""" + # Note: this is called only when value has at least one element. + self._PrintFieldName(field) + self.out.write(' [') + for i in range(len(value) - 1): + self.PrintFieldValue(field, value[i]) + self.out.write(', ') + self.PrintFieldValue(field, value[-1]) + self.out.write(']') + self.out.write(' ' if self.as_one_line else '\n') + + def _PrintMessageFieldValue(self, value): + if self.pointy_brackets: + openb = '<' + closeb = '>' + else: + openb = '{' + closeb = '}' + + if self.as_one_line: + self.out.write('%s ' % openb) + self.PrintMessage(value) + self.out.write(closeb) + else: + self.out.write('%s\n' % openb) + self.indent += 2 + self.PrintMessage(value) + self.indent -= 2 + self.out.write(' ' * self.indent + closeb) + + def PrintFieldValue(self, field, value): + """Print a single field value (not including name). + + For repeated fields, the value should be a single element. + + Args: + field: The descriptor of the field to be printed. + value: The value of the field. + """ + out = self.out + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + self._PrintMessageFieldValue(value) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + enum_value = field.enum_type.values_by_number.get(value, None) + if enum_value is not None: + out.write(enum_value.name) + else: + out.write(str(value)) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + out.write('\"') + if isinstance(value, str) and not self.as_utf8: + out_value = value.encode('utf-8') + else: + out_value = value + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + # We always need to escape all binary data in TYPE_BYTES fields. + out_as_utf8 = False + else: + out_as_utf8 = self.as_utf8 + out.write(text_encoding.CEscape(out_value, out_as_utf8)) + out.write('\"') + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + if value: + out.write('true') + else: + out.write('false') + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + if self.float_format is not None: + out.write('{1:{0}}'.format(self.float_format, value)) + else: + if math.isnan(value): + out.write(str(value)) + else: + out.write(str(type_checkers.ToShortestFloat(value))) + elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_DOUBLE and + self.double_format is not None): + out.write('{1:{0}}'.format(self.double_format, value)) + else: + out.write(str(value)) + + +def Parse(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + NOTE: for historical reasons this function does not clear the input + message. This is different from what the binary msg.ParseFrom(...) does. + If text contains a field already set in message, the value is appended if the + field is repeated. Otherwise, an error is raised. + + Example:: + + a = MyProto() + a.repeated_field.append('test') + b = MyProto() + + # Repeated fields are combined + text_format.Parse(repr(a), b) + text_format.Parse(repr(a), b) # repeated_field contains ["test", "test"] + + # Non-repeated fields cannot be overwritten + a.singular_field = 1 + b.singular_field = 2 + text_format.Parse(repr(a), b) # ParseError + + # Binary version: + b.ParseFromString(a.SerializeToString()) # repeated_field is now "test" + + Caller is responsible for clearing the message as needed. + + Args: + text (str): Message text representation. + message (Message): A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + Message: The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + return ParseLines(text.split(b'\n' if isinstance(text, bytes) else u'\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + + +def Merge(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + Like Parse(), but allows repeated values for a non-repeated field, and uses + the last one. This means any non-repeated, top-level fields specified in text + replace those in the message. + + Args: + text (str): Message text representation. + message (Message): A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + Message: The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + return MergeLines( + text.split(b'\n' if isinstance(text, bytes) else u'\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + + +def ParseLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + See Parse() for caveats. + + Args: + lines: An iterable of lines of a message's text representation. + message: A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + return parser.ParseLines(lines, message) + + +def MergeLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + See Merge() for more details. + + Args: + lines: An iterable of lines of a message's text representation. + message: A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + return parser.MergeLines(lines, message) + + +class _Parser(object): + """Text format parser for protocol message.""" + + def __init__(self, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + self.allow_unknown_extension = allow_unknown_extension + self.allow_field_number = allow_field_number + self.descriptor_pool = descriptor_pool + self.allow_unknown_field = allow_unknown_field + + def ParseLines(self, lines, message): + """Parses a text representation of a protocol message into a message.""" + self._allow_multiple_scalars = False + self._ParseOrMerge(lines, message) + return message + + def MergeLines(self, lines, message): + """Merges a text representation of a protocol message into a message.""" + self._allow_multiple_scalars = True + self._ParseOrMerge(lines, message) + return message + + def _ParseOrMerge(self, lines, message): + """Converts a text representation of a protocol message into a message. + + Args: + lines: Lines of a message's text representation. + message: A protocol buffer message to merge into. + + Raises: + ParseError: On text parsing problems. + """ + # Tokenize expects native str lines. + try: + str_lines = ( + line if isinstance(line, str) else line.decode('utf-8') + for line in lines) + tokenizer = Tokenizer(str_lines) + except UnicodeDecodeError as e: + raise ParseError from e + if message: + self.root_type = message.DESCRIPTOR.full_name + while not tokenizer.AtEnd(): + self._MergeField(tokenizer, message) + + def _MergeField(self, tokenizer, message): + """Merges a single protocol message field into a message. + + Args: + tokenizer: A tokenizer to parse the field name and values. + message: A protocol message to record the data. + + Raises: + ParseError: In case of text parsing problems. + """ + message_descriptor = message.DESCRIPTOR + if (message_descriptor.full_name == _ANY_FULL_TYPE_NAME and + tokenizer.TryConsume('[')): + type_url_prefix, packed_type_name = self._ConsumeAnyTypeUrl(tokenizer) + tokenizer.Consume(']') + tokenizer.TryConsume(':') + self._DetectSilentMarker(tokenizer, message_descriptor.full_name, + type_url_prefix + '/' + packed_type_name) + if tokenizer.TryConsume('<'): + expanded_any_end_token = '>' + else: + tokenizer.Consume('{') + expanded_any_end_token = '}' + expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name, + self.descriptor_pool) + # Direct comparison with None is used instead of implicit bool conversion + # to avoid false positives with falsy initial values, e.g. for + # google.protobuf.ListValue. + if expanded_any_sub_message is None: + raise ParseError('Type %s not found in descriptor pool' % + packed_type_name) + while not tokenizer.TryConsume(expanded_any_end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % + (expanded_any_end_token,)) + self._MergeField(tokenizer, expanded_any_sub_message) + deterministic = False + + message.Pack(expanded_any_sub_message, + type_url_prefix=type_url_prefix, + deterministic=deterministic) + return + + if tokenizer.TryConsume('['): + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + name = '.'.join(name) + + if not message_descriptor.is_extendable: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" does not have extensions.' % + message_descriptor.full_name) + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(name) + # pylint: enable=protected-access + if not field: + if self.allow_unknown_extension: + field = None + else: + raise tokenizer.ParseErrorPreviousToken( + 'Extension "%s" not registered. ' + 'Did you import the _pb2 module which defines it? ' + 'If you are trying to place the extension in the MessageSet ' + 'field of another message that is in an Any or MessageSet field, ' + 'that message\'s _pb2 module must be imported as well' % name) + elif message_descriptor != field.containing_type: + raise tokenizer.ParseErrorPreviousToken( + 'Extension "%s" does not extend message type "%s".' % + (name, message_descriptor.full_name)) + + tokenizer.Consume(']') + + else: + name = tokenizer.ConsumeIdentifierOrNumber() + if self.allow_field_number and name.isdigit(): + number = ParseInteger(name, True, True) + field = message_descriptor.fields_by_number.get(number, None) + if not field and message_descriptor.is_extendable: + field = message.Extensions._FindExtensionByNumber(number) + else: + field = message_descriptor.fields_by_name.get(name, None) + + # Group names are expected to be capitalized as they appear in the + # .proto file, which actually matches their type names, not their field + # names. + if not field: + field = message_descriptor.fields_by_name.get(name.lower(), None) + if field and not _IsGroupLike(field): + field = None + if field and field.message_type.name != name: + field = None + + if not field and not self.allow_unknown_field: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" has no field named "%s".' % + (message_descriptor.full_name, name)) + + if field: + if not self._allow_multiple_scalars and field.containing_oneof: + # Check if there's a different field set in this oneof. + # Note that we ignore the case if the same field was set before, and we + # apply _allow_multiple_scalars to non-scalar fields as well. + which_oneof = message.WhichOneof(field.containing_oneof.name) + if which_oneof is not None and which_oneof != field.name: + raise tokenizer.ParseErrorPreviousToken( + 'Field "%s" is specified along with field "%s", another member ' + 'of oneof "%s" for message type "%s".' % + (field.name, which_oneof, field.containing_oneof.name, + message_descriptor.full_name)) + + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + tokenizer.TryConsume(':') + self._DetectSilentMarker(tokenizer, message_descriptor.full_name, + field.full_name) + merger = self._MergeMessageField + else: + tokenizer.Consume(':') + self._DetectSilentMarker(tokenizer, message_descriptor.full_name, + field.full_name) + merger = self._MergeScalarField + + if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and + tokenizer.TryConsume('[')): + # Short repeated format, e.g. "foo: [1, 2, 3]" + if not tokenizer.TryConsume(']'): + while True: + merger(tokenizer, message, field) + if tokenizer.TryConsume(']'): + break + tokenizer.Consume(',') + + else: + merger(tokenizer, message, field) + + else: # Proto field is unknown. + assert (self.allow_unknown_extension or self.allow_unknown_field) + self._SkipFieldContents(tokenizer, name, message_descriptor.full_name) + + # For historical reasons, fields may optionally be separated by commas or + # semicolons. + if not tokenizer.TryConsume(','): + tokenizer.TryConsume(';') + + def _LogSilentMarker(self, immediate_message_type, field_name): + pass + + def _DetectSilentMarker(self, tokenizer, immediate_message_type, field_name): + if tokenizer.contains_silent_marker_before_current_token: + self._LogSilentMarker(immediate_message_type, field_name) + + def _ConsumeAnyTypeUrl(self, tokenizer): + """Consumes a google.protobuf.Any type URL and returns the type name.""" + # Consume "type.googleapis.com/". + prefix = [tokenizer.ConsumeIdentifier()] + tokenizer.Consume('.') + prefix.append(tokenizer.ConsumeIdentifier()) + tokenizer.Consume('.') + prefix.append(tokenizer.ConsumeIdentifier()) + tokenizer.Consume('/') + # Consume the fully-qualified type name. + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + return '.'.join(prefix), '.'.join(name) + + def _MergeMessageField(self, tokenizer, message, field): + """Merges a single scalar field into a message. + + Args: + tokenizer: A tokenizer to parse the field value. + message: The message of which field is a member. + field: The descriptor of the field to be merged. + + Raises: + ParseError: In case of text parsing problems. + """ + is_map_entry = _IsMapEntry(field) + + if tokenizer.TryConsume('<'): + end_token = '>' + else: + tokenizer.Consume('{') + end_token = '}' + + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if field.is_extension: + sub_message = message.Extensions[field].add() + elif is_map_entry: + sub_message = getattr(message, field.name).GetEntryClass()() + else: + sub_message = getattr(message, field.name).add() + else: + if field.is_extension: + if (not self._allow_multiple_scalars and + message.HasExtension(field)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" extensions.' % + (message.DESCRIPTOR.full_name, field.full_name)) + sub_message = message.Extensions[field] + else: + # Also apply _allow_multiple_scalars to message field. + # TODO: Change to _allow_singular_overwrites. + if (not self._allow_multiple_scalars and + message.HasField(field.name)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" fields.' % + (message.DESCRIPTOR.full_name, field.name)) + sub_message = getattr(message, field.name) + sub_message.SetInParent() + + while not tokenizer.TryConsume(end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) + self._MergeField(tokenizer, sub_message) + + if is_map_entry: + value_cpptype = field.message_type.fields_by_name['value'].cpp_type + if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + value = getattr(message, field.name)[sub_message.key] + value.CopyFrom(sub_message.value) + else: + getattr(message, field.name)[sub_message.key] = sub_message.value + + def _MergeScalarField(self, tokenizer, message, field): + """Merges a single scalar field into a message. + + Args: + tokenizer: A tokenizer to parse the field value. + message: A protocol message to record the data. + field: The descriptor of the field to be merged. + + Raises: + ParseError: In case of text parsing problems. + RuntimeError: On runtime errors. + """ + _ = self.allow_unknown_extension + value = None + + if field.type in (descriptor.FieldDescriptor.TYPE_INT32, + descriptor.FieldDescriptor.TYPE_SINT32, + descriptor.FieldDescriptor.TYPE_SFIXED32): + value = _ConsumeInt32(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_INT64, + descriptor.FieldDescriptor.TYPE_SINT64, + descriptor.FieldDescriptor.TYPE_SFIXED64): + value = _ConsumeInt64(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32, + descriptor.FieldDescriptor.TYPE_FIXED32): + value = _ConsumeUint32(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64, + descriptor.FieldDescriptor.TYPE_FIXED64): + value = _ConsumeUint64(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT, + descriptor.FieldDescriptor.TYPE_DOUBLE): + value = tokenizer.ConsumeFloat() + elif field.type == descriptor.FieldDescriptor.TYPE_BOOL: + value = tokenizer.ConsumeBool() + elif field.type == descriptor.FieldDescriptor.TYPE_STRING: + value = tokenizer.ConsumeString() + elif field.type == descriptor.FieldDescriptor.TYPE_BYTES: + value = tokenizer.ConsumeByteString() + elif field.type == descriptor.FieldDescriptor.TYPE_ENUM: + value = tokenizer.ConsumeEnum(field) + else: + raise RuntimeError('Unknown field type %d' % field.type) + + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if field.is_extension: + message.Extensions[field].append(value) + else: + getattr(message, field.name).append(value) + else: + if field.is_extension: + if (not self._allow_multiple_scalars and + field.has_presence and + message.HasExtension(field)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" extensions.' % + (message.DESCRIPTOR.full_name, field.full_name)) + else: + message.Extensions[field] = value + else: + duplicate_error = False + if not self._allow_multiple_scalars: + if field.has_presence: + duplicate_error = message.HasField(field.name) + else: + # For field that doesn't represent presence, try best effort to + # check multiple scalars by compare to default values. + duplicate_error = bool(getattr(message, field.name)) + + if duplicate_error: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" fields.' % + (message.DESCRIPTOR.full_name, field.name)) + else: + setattr(message, field.name, value) + + def _SkipFieldContents(self, tokenizer, field_name, immediate_message_type): + """Skips over contents (value or message) of a field. + + Args: + tokenizer: A tokenizer to parse the field name and values. + field_name: The field name currently being parsed. + immediate_message_type: The type of the message immediately containing + the silent marker. + """ + # Try to guess the type of this field. + # If this field is not a message, there should be a ":" between the + # field name and the field value and also the field value should not + # start with "{" or "<" which indicates the beginning of a message body. + # If there is no ":" or there is a "{" or "<" after ":", this field has + # to be a message or the input is ill-formed. + if tokenizer.TryConsume( + ':') and not tokenizer.LookingAt('{') and not tokenizer.LookingAt('<'): + self._DetectSilentMarker(tokenizer, immediate_message_type, field_name) + if tokenizer.LookingAt('['): + self._SkipRepeatedFieldValue(tokenizer) + else: + self._SkipFieldValue(tokenizer) + else: + self._DetectSilentMarker(tokenizer, immediate_message_type, field_name) + self._SkipFieldMessage(tokenizer, immediate_message_type) + + def _SkipField(self, tokenizer, immediate_message_type): + """Skips over a complete field (name and value/message). + + Args: + tokenizer: A tokenizer to parse the field name and values. + immediate_message_type: The type of the message immediately containing + the silent marker. + """ + field_name = '' + if tokenizer.TryConsume('['): + # Consume extension or google.protobuf.Any type URL + field_name += '[' + tokenizer.ConsumeIdentifier() + num_identifiers = 1 + while tokenizer.TryConsume('.'): + field_name += '.' + tokenizer.ConsumeIdentifier() + num_identifiers += 1 + # This is possibly a type URL for an Any message. + if num_identifiers == 3 and tokenizer.TryConsume('/'): + field_name += '/' + tokenizer.ConsumeIdentifier() + while tokenizer.TryConsume('.'): + field_name += '.' + tokenizer.ConsumeIdentifier() + tokenizer.Consume(']') + field_name += ']' + else: + field_name += tokenizer.ConsumeIdentifierOrNumber() + + self._SkipFieldContents(tokenizer, field_name, immediate_message_type) + + # For historical reasons, fields may optionally be separated by commas or + # semicolons. + if not tokenizer.TryConsume(','): + tokenizer.TryConsume(';') + + def _SkipFieldMessage(self, tokenizer, immediate_message_type): + """Skips over a field message. + + Args: + tokenizer: A tokenizer to parse the field name and values. + immediate_message_type: The type of the message immediately containing + the silent marker + """ + if tokenizer.TryConsume('<'): + delimiter = '>' + else: + tokenizer.Consume('{') + delimiter = '}' + + while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'): + self._SkipField(tokenizer, immediate_message_type) + + tokenizer.Consume(delimiter) + + def _SkipFieldValue(self, tokenizer): + """Skips over a field value. + + Args: + tokenizer: A tokenizer to parse the field name and values. + + Raises: + ParseError: In case an invalid field value is found. + """ + if (not tokenizer.TryConsumeByteString()and + not tokenizer.TryConsumeIdentifier() and + not _TryConsumeInt64(tokenizer) and + not _TryConsumeUint64(tokenizer) and + not tokenizer.TryConsumeFloat()): + raise ParseError('Invalid field value: ' + tokenizer.token) + + def _SkipRepeatedFieldValue(self, tokenizer): + """Skips over a repeated field value. + + Args: + tokenizer: A tokenizer to parse the field value. + """ + tokenizer.Consume('[') + if not tokenizer.LookingAt(']'): + self._SkipFieldValue(tokenizer) + while tokenizer.TryConsume(','): + self._SkipFieldValue(tokenizer) + tokenizer.Consume(']') + + +class Tokenizer(object): + """Protocol buffer text representation tokenizer. + + This class handles the lower level string parsing by splitting it into + meaningful tokens. + + It was directly ported from the Java protocol buffer API. + """ + + _WHITESPACE = re.compile(r'\s+') + _COMMENT = re.compile(r'(\s*#.*$)', re.MULTILINE) + _WHITESPACE_OR_COMMENT = re.compile(r'(\s|(#.*$))+', re.MULTILINE) + _TOKEN = re.compile('|'.join([ + r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier + r'([0-9+-]|(\.[0-9]))[0-9a-zA-Z_.+-]*', # a number + ] + [ # quoted str for each quote mark + # Avoid backtracking! https://stackoverflow.com/a/844267 + r'{qt}[^{qt}\n\\]*((\\.)+[^{qt}\n\\]*)*({qt}|\\?$)'.format(qt=mark) + for mark in _QUOTES + ])) + + _IDENTIFIER = re.compile(r'[^\d\W]\w*') + _IDENTIFIER_OR_NUMBER = re.compile(r'\w+') + + def __init__(self, lines, skip_comments=True): + self._position = 0 + self._line = -1 + self._column = 0 + self._token_start = None + self.token = '' + self._lines = iter(lines) + self._current_line = '' + self._previous_line = 0 + self._previous_column = 0 + self._more_lines = True + self._skip_comments = skip_comments + self._whitespace_pattern = (skip_comments and self._WHITESPACE_OR_COMMENT + or self._WHITESPACE) + self.contains_silent_marker_before_current_token = False + + self._SkipWhitespace() + self.NextToken() + + def LookingAt(self, token): + return self.token == token + + def AtEnd(self): + """Checks the end of the text was reached. + + Returns: + True iff the end was reached. + """ + return not self.token + + def _PopLine(self): + while len(self._current_line) <= self._column: + try: + self._current_line = next(self._lines) + except StopIteration: + self._current_line = '' + self._more_lines = False + return + else: + self._line += 1 + self._column = 0 + + def _SkipWhitespace(self): + while True: + self._PopLine() + match = self._whitespace_pattern.match(self._current_line, self._column) + if not match: + break + self.contains_silent_marker_before_current_token = match.group(0) == ( + ' ' + _DEBUG_STRING_SILENT_MARKER) + length = len(match.group(0)) + self._column += length + + def TryConsume(self, token): + """Tries to consume a given piece of text. + + Args: + token: Text to consume. + + Returns: + True iff the text was consumed. + """ + if self.token == token: + self.NextToken() + return True + return False + + def Consume(self, token): + """Consumes a piece of text. + + Args: + token: Text to consume. + + Raises: + ParseError: If the text couldn't be consumed. + """ + if not self.TryConsume(token): + raise self.ParseError('Expected "%s".' % token) + + def ConsumeComment(self): + result = self.token + if not self._COMMENT.match(result): + raise self.ParseError('Expected comment.') + self.NextToken() + return result + + def ConsumeCommentOrTrailingComment(self): + """Consumes a comment, returns a 2-tuple (trailing bool, comment str).""" + + # Tokenizer initializes _previous_line and _previous_column to 0. As the + # tokenizer starts, it looks like there is a previous token on the line. + just_started = self._line == 0 and self._column == 0 + + before_parsing = self._previous_line + comment = self.ConsumeComment() + + # A trailing comment is a comment on the same line than the previous token. + trailing = (self._previous_line == before_parsing + and not just_started) + + return trailing, comment + + def TryConsumeIdentifier(self): + try: + self.ConsumeIdentifier() + return True + except ParseError: + return False + + def ConsumeIdentifier(self): + """Consumes protocol message field identifier. + + Returns: + Identifier string. + + Raises: + ParseError: If an identifier couldn't be consumed. + """ + result = self.token + if not self._IDENTIFIER.match(result): + raise self.ParseError('Expected identifier.') + self.NextToken() + return result + + def TryConsumeIdentifierOrNumber(self): + try: + self.ConsumeIdentifierOrNumber() + return True + except ParseError: + return False + + def ConsumeIdentifierOrNumber(self): + """Consumes protocol message field identifier. + + Returns: + Identifier string. + + Raises: + ParseError: If an identifier couldn't be consumed. + """ + result = self.token + if not self._IDENTIFIER_OR_NUMBER.match(result): + raise self.ParseError('Expected identifier or number, got %s.' % result) + self.NextToken() + return result + + def TryConsumeInteger(self): + try: + self.ConsumeInteger() + return True + except ParseError: + return False + + def ConsumeInteger(self): + """Consumes an integer number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer couldn't be consumed. + """ + try: + result = _ParseAbstractInteger(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def TryConsumeFloat(self): + try: + self.ConsumeFloat() + return True + except ParseError: + return False + + def ConsumeFloat(self): + """Consumes an floating point number. + + Returns: + The number parsed. + + Raises: + ParseError: If a floating point number couldn't be consumed. + """ + try: + result = ParseFloat(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ConsumeBool(self): + """Consumes a boolean value. + + Returns: + The bool parsed. + + Raises: + ParseError: If a boolean value couldn't be consumed. + """ + try: + result = ParseBool(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def TryConsumeByteString(self): + try: + self.ConsumeByteString() + return True + except ParseError: + return False + + def ConsumeString(self): + """Consumes a string value. + + Returns: + The string parsed. + + Raises: + ParseError: If a string value couldn't be consumed. + """ + the_bytes = self.ConsumeByteString() + try: + return str(the_bytes, 'utf-8') + except UnicodeDecodeError as e: + raise self._StringParseError(e) + + def ConsumeByteString(self): + """Consumes a byte array value. + + Returns: + The array parsed (as a string). + + Raises: + ParseError: If a byte array value couldn't be consumed. + """ + the_list = [self._ConsumeSingleByteString()] + while self.token and self.token[0] in _QUOTES: + the_list.append(self._ConsumeSingleByteString()) + return b''.join(the_list) + + def _ConsumeSingleByteString(self): + """Consume one token of a string literal. + + String literals (whether bytes or text) can come in multiple adjacent + tokens which are automatically concatenated, like in C or Python. This + method only consumes one token. + + Returns: + The token parsed. + Raises: + ParseError: When the wrong format data is found. + """ + text = self.token + if len(text) < 1 or text[0] not in _QUOTES: + raise self.ParseError('Expected string but found: %r' % (text,)) + + if len(text) < 2 or text[-1] != text[0]: + raise self.ParseError('String missing ending quote: %r' % (text,)) + + try: + result = text_encoding.CUnescape(text[1:-1]) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ConsumeEnum(self, field): + try: + result = ParseEnum(field, self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ParseErrorPreviousToken(self, message): + """Creates and *returns* a ParseError for the previously read token. + + Args: + message: A message to set for the exception. + + Returns: + A ParseError instance. + """ + return ParseError(message, self._previous_line + 1, + self._previous_column + 1) + + def ParseError(self, message): + """Creates and *returns* a ParseError for the current token.""" + return ParseError('\'' + self._current_line + '\': ' + message, + self._line + 1, self._column + 1) + + def _StringParseError(self, e): + return self.ParseError('Couldn\'t parse string: ' + str(e)) + + def NextToken(self): + """Reads the next meaningful token.""" + self._previous_line = self._line + self._previous_column = self._column + self.contains_silent_marker_before_current_token = False + + self._column += len(self.token) + self._SkipWhitespace() + + if not self._more_lines: + self.token = '' + return + + match = self._TOKEN.match(self._current_line, self._column) + if not match and not self._skip_comments: + match = self._COMMENT.match(self._current_line, self._column) + if match: + token = match.group(0) + self.token = token + else: + self.token = self._current_line[self._column] + +# Aliased so it can still be accessed by current visibility violators. +# TODO: Migrate violators to textformat_tokenizer. +_Tokenizer = Tokenizer # pylint: disable=invalid-name + + +def _ConsumeInt32(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=False) + + +def _ConsumeUint32(tokenizer): + """Consumes an unsigned 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=False) + + +def _TryConsumeInt64(tokenizer): + try: + _ConsumeInt64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeInt64(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=True) + + +def _TryConsumeUint64(tokenizer): + try: + _ConsumeUint64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeUint64(tokenizer): + """Consumes an unsigned 64bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 64bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=True) + + +def _ConsumeInteger(tokenizer, is_signed=False, is_long=False): + """Consumes an integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer with given characteristics couldn't be consumed. + """ + try: + result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long) + except ValueError as e: + raise tokenizer.ParseError(str(e)) + tokenizer.NextToken() + return result + + +def ParseInteger(text, is_signed=False, is_long=False): + """Parses an integer. + + Args: + text: The text to parse. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer value. + + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + result = _ParseAbstractInteger(text) + + # Check if the integer is sane. Exceptions handled by callers. + checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] + checker.CheckValue(result) + return result + + +def _ParseAbstractInteger(text): + """Parses an integer without checking size/signedness. + + Args: + text: The text to parse. + + Returns: + The integer value. + + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + orig_text = text + c_octal_match = re.match(r'(-?)0(\d+)$', text) + if c_octal_match: + # Python 3 no longer supports 0755 octal syntax without the 'o', so + # we always use the '0o' prefix for multi-digit numbers starting with 0. + text = c_octal_match.group(1) + '0o' + c_octal_match.group(2) + try: + return int(text, 0) + except ValueError: + raise ValueError('Couldn\'t parse integer: %s' % orig_text) + + +def ParseFloat(text): + """Parse a floating point number. + + Args: + text: Text to parse. + + Returns: + The number parsed. + + Raises: + ValueError: If a floating point number couldn't be parsed. + """ + try: + # Assume Python compatible syntax. + return float(text) + except ValueError: + # Check alternative spellings. + if _FLOAT_INFINITY.match(text): + if text[0] == '-': + return float('-inf') + else: + return float('inf') + elif _FLOAT_NAN.match(text): + return float('nan') + else: + # assume '1.0f' format + try: + return float(text.rstrip('f')) + except ValueError: + raise ValueError("Couldn't parse float: %s" % text) + + +def ParseBool(text): + """Parse a boolean value. + + Args: + text: Text to parse. + + Returns: + Boolean values parsed + + Raises: + ValueError: If text is not a valid boolean. + """ + if text in ('true', 't', '1', 'True'): + return True + elif text in ('false', 'f', '0', 'False'): + return False + else: + raise ValueError('Expected "true" or "false".') + + +def ParseEnum(field, value): + """Parse an enum value. + + The value can be specified by a number (the enum value), or by + a string literal (the enum name). + + Args: + field: Enum field descriptor. + value: String value. + + Returns: + Enum value number. + + Raises: + ValueError: If the enum value could not be parsed. + """ + enum_descriptor = field.enum_type + try: + number = int(value, 0) + except ValueError: + # Identifier. + enum_value = enum_descriptor.values_by_name.get(value, None) + if enum_value is None: + raise ValueError('Enum type "%s" has no value named %s.' % + (enum_descriptor.full_name, value)) + else: + if not field.enum_type.is_closed: + return number + enum_value = enum_descriptor.values_by_number.get(number, None) + if enum_value is None: + raise ValueError('Enum type "%s" has no value with number %d.' % + (enum_descriptor.full_name, number)) + return enum_value.number diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/timestamp.py b/.venv/lib/python3.12/site-packages/google/protobuf/timestamp.py new file mode 100644 index 00000000..b23b9f11 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/timestamp.py @@ -0,0 +1,112 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains the Timestamp helper APIs.""" + +import datetime +from typing import Optional + +from google.protobuf.timestamp_pb2 import Timestamp + + +def from_json_string(value: str) -> Timestamp: + """Parse a RFC 3339 date string format to Timestamp. + + Args: + value: A date string. Any fractional digits (or none) and any offset are + accepted as long as they fit into nano-seconds precision. Example of + accepted format: '1972-01-01T10:00:20.021-05:00' + + Raises: + ValueError: On parsing problems. + """ + timestamp = Timestamp() + timestamp.FromJsonString(value) + return timestamp + + +def from_microseconds(micros: float) -> Timestamp: + """Converts microseconds since epoch to Timestamp.""" + timestamp = Timestamp() + timestamp.FromMicroseconds(micros) + return timestamp + + +def from_milliseconds(millis: float) -> Timestamp: + """Converts milliseconds since epoch to Timestamp.""" + timestamp = Timestamp() + timestamp.FromMilliseconds(millis) + return timestamp + + +def from_nanoseconds(nanos: float) -> Timestamp: + """Converts nanoseconds since epoch to Timestamp.""" + timestamp = Timestamp() + timestamp.FromNanoseconds(nanos) + return timestamp + + +def from_seconds(seconds: float) -> Timestamp: + """Converts seconds since epoch to Timestamp.""" + timestamp = Timestamp() + timestamp.FromSeconds(seconds) + return timestamp + + +def from_current_time() -> Timestamp: + """Converts the current UTC to Timestamp.""" + timestamp = Timestamp() + timestamp.FromDatetime(datetime.datetime.now(tz=datetime.timezone.utc)) + return timestamp + + +def to_json_string(ts: Timestamp) -> str: + """Converts Timestamp to RFC 3339 date string format. + + Returns: + A string converted from timestamp. The string is always Z-normalized + and uses 3, 6 or 9 fractional digits as required to represent the + exact time. Example of the return format: '1972-01-01T10:00:20.021Z' + """ + return ts.ToJsonString() + + +def to_microseconds(ts: Timestamp) -> int: + """Converts Timestamp to microseconds since epoch.""" + return ts.ToMicroseconds() + + +def to_milliseconds(ts: Timestamp) -> int: + """Converts Timestamp to milliseconds since epoch.""" + return ts.ToMilliseconds() + + +def to_nanoseconds(ts: Timestamp) -> int: + """Converts Timestamp to nanoseconds since epoch.""" + return ts.ToNanoseconds() + + +def to_seconds(ts: Timestamp) -> int: + """Converts Timestamp to seconds since epoch.""" + return ts.ToSeconds() + + +def to_datetime( + ts: Timestamp, tz: Optional[datetime.tzinfo] = None +) -> datetime.datetime: + """Converts Timestamp to a datetime. + + Args: + tz: A datetime.tzinfo subclass; defaults to None. + + Returns: + If tzinfo is None, returns a timezone-naive UTC datetime (with no timezone + information, i.e. not aware that it's UTC). + + Otherwise, returns a timezone-aware datetime in the input timezone. + """ + return ts.ToDatetime(tzinfo=tz) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/timestamp_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/timestamp_pb2.py new file mode 100644 index 00000000..2bf7cef3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/timestamp_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/timestamp.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/timestamp.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n\tTimestamp\x12\x18\n\x07seconds\x18\x01 \x01(\x03R\x07seconds\x12\x14\n\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n\x13\x63om.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.timestamp_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\016TimestampProtoP\001Z2google.golang.org/protobuf/types/known/timestamppb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _globals['_TIMESTAMP']._serialized_start=52 + _globals['_TIMESTAMP']._serialized_end=111 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/type_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/type_pb2.py new file mode 100644 index 00000000..4c63c33c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/type_pb2.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/type.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/type.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1agoogle/protobuf/type.proto\x12\x0fgoogle.protobuf\x1a\x19google/protobuf/any.proto\x1a$google/protobuf/source_context.proto\"\xa7\x02\n\x04Type\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12.\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x16.google.protobuf.FieldR\x06\x66ields\x12\x16\n\x06oneofs\x18\x03 \x03(\tR\x06oneofs\x12\x31\n\x07options\x18\x04 \x03(\x0b\x32\x17.google.protobuf.OptionR\x07options\x12\x45\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContextR\rsourceContext\x12/\n\x06syntax\x18\x06 \x01(\x0e\x32\x17.google.protobuf.SyntaxR\x06syntax\x12\x18\n\x07\x65\x64ition\x18\x07 \x01(\tR\x07\x65\x64ition\"\xb4\x06\n\x05\x46ield\x12/\n\x04kind\x18\x01 \x01(\x0e\x32\x1b.google.protobuf.Field.KindR\x04kind\x12\x44\n\x0b\x63\x61rdinality\x18\x02 \x01(\x0e\x32\".google.protobuf.Field.CardinalityR\x0b\x63\x61rdinality\x12\x16\n\x06number\x18\x03 \x01(\x05R\x06number\x12\x12\n\x04name\x18\x04 \x01(\tR\x04name\x12\x19\n\x08type_url\x18\x06 \x01(\tR\x07typeUrl\x12\x1f\n\x0boneof_index\x18\x07 \x01(\x05R\noneofIndex\x12\x16\n\x06packed\x18\x08 \x01(\x08R\x06packed\x12\x31\n\x07options\x18\t \x03(\x0b\x32\x17.google.protobuf.OptionR\x07options\x12\x1b\n\tjson_name\x18\n \x01(\tR\x08jsonName\x12#\n\rdefault_value\x18\x0b \x01(\tR\x0c\x64\x65\x66\x61ultValue\"\xc8\x02\n\x04Kind\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"t\n\x0b\x43\x61rdinality\x12\x17\n\x13\x43\x41RDINALITY_UNKNOWN\x10\x00\x12\x18\n\x14\x43\x41RDINALITY_OPTIONAL\x10\x01\x12\x18\n\x14\x43\x41RDINALITY_REQUIRED\x10\x02\x12\x18\n\x14\x43\x41RDINALITY_REPEATED\x10\x03\"\x99\x02\n\x04\x45num\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x38\n\tenumvalue\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.EnumValueR\tenumvalue\x12\x31\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.OptionR\x07options\x12\x45\n\x0esource_context\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.SourceContextR\rsourceContext\x12/\n\x06syntax\x18\x05 \x01(\x0e\x32\x17.google.protobuf.SyntaxR\x06syntax\x12\x18\n\x07\x65\x64ition\x18\x06 \x01(\tR\x07\x65\x64ition\"j\n\tEnumValue\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n\x06number\x18\x02 \x01(\x05R\x06number\x12\x31\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.OptionR\x07options\"H\n\x06Option\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.AnyR\x05value*C\n\x06Syntax\x12\x11\n\rSYNTAX_PROTO2\x10\x00\x12\x11\n\rSYNTAX_PROTO3\x10\x01\x12\x13\n\x0fSYNTAX_EDITIONS\x10\x02\x42{\n\x13\x63om.google.protobufB\tTypeProtoP\x01Z-google.golang.org/protobuf/types/known/typepb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.type_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\tTypeProtoP\001Z-google.golang.org/protobuf/types/known/typepb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _globals['_SYNTAX']._serialized_start=1699 + _globals['_SYNTAX']._serialized_end=1766 + _globals['_TYPE']._serialized_start=113 + _globals['_TYPE']._serialized_end=408 + _globals['_FIELD']._serialized_start=411 + _globals['_FIELD']._serialized_end=1231 + _globals['_FIELD_KIND']._serialized_start=785 + _globals['_FIELD_KIND']._serialized_end=1113 + _globals['_FIELD_CARDINALITY']._serialized_start=1115 + _globals['_FIELD_CARDINALITY']._serialized_end=1231 + _globals['_ENUM']._serialized_start=1234 + _globals['_ENUM']._serialized_end=1515 + _globals['_ENUMVALUE']._serialized_start=1517 + _globals['_ENUMVALUE']._serialized_end=1623 + _globals['_OPTION']._serialized_start=1625 + _globals['_OPTION']._serialized_end=1697 +# @@protoc_insertion_point(module_scope) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/unknown_fields.py b/.venv/lib/python3.12/site-packages/google/protobuf/unknown_fields.py new file mode 100644 index 00000000..9b1e5493 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/unknown_fields.py @@ -0,0 +1,97 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Contains Unknown Fields APIs. + +Simple usage example: + unknown_field_set = UnknownFieldSet(message) + for unknown_field in unknown_field_set: + wire_type = unknown_field.wire_type + field_number = unknown_field.field_number + data = unknown_field.data +""" + + +from google.protobuf.internal import api_implementation + +if api_implementation._c_module is not None: # pylint: disable=protected-access + UnknownFieldSet = api_implementation._c_module.UnknownFieldSet # pylint: disable=protected-access +else: + from google.protobuf.internal import decoder # pylint: disable=g-import-not-at-top + from google.protobuf.internal import wire_format # pylint: disable=g-import-not-at-top + + class UnknownField: + """A parsed unknown field.""" + + # Disallows assignment to other attributes. + __slots__ = ['_field_number', '_wire_type', '_data'] + + def __init__(self, field_number, wire_type, data): + self._field_number = field_number + self._wire_type = wire_type + self._data = data + return + + @property + def field_number(self): + return self._field_number + + @property + def wire_type(self): + return self._wire_type + + @property + def data(self): + return self._data + + class UnknownFieldSet: + """UnknownField container.""" + + # Disallows assignment to other attributes. + __slots__ = ['_values'] + + def __init__(self, msg): + + def InternalAdd(field_number, wire_type, data): + unknown_field = UnknownField(field_number, wire_type, data) + self._values.append(unknown_field) + + self._values = [] + msg_des = msg.DESCRIPTOR + # pylint: disable=protected-access + unknown_fields = msg._unknown_fields + if (msg_des.has_options and + msg_des.GetOptions().message_set_wire_format): + local_decoder = decoder.UnknownMessageSetItemDecoder() + for _, buffer in unknown_fields: + (field_number, data) = local_decoder(memoryview(buffer)) + InternalAdd(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED, data) + else: + for tag_bytes, buffer in unknown_fields: + # pylint: disable=protected-access + (tag, _) = decoder._DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if field_number == 0: + raise RuntimeError('Field number 0 is illegal.') + (data, _) = decoder._DecodeUnknownField( + memoryview(buffer), 0, wire_type) + InternalAdd(field_number, wire_type, data) + + def __getitem__(self, index): + size = len(self._values) + if index < 0: + index += size + if index < 0 or index >= size: + raise IndexError('index %d out of range'.index) + + return self._values[index] + + def __len__(self): + return len(self._values) + + def __iter__(self): + return iter(self._values) diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/util/__init__.py b/.venv/lib/python3.12/site-packages/google/protobuf/util/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/util/__init__.py diff --git a/.venv/lib/python3.12/site-packages/google/protobuf/wrappers_pb2.py b/.venv/lib/python3.12/site-packages/google/protobuf/wrappers_pb2.py new file mode 100644 index 00000000..d2f98b68 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/google/protobuf/wrappers_pb2.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/protobuf/wrappers.proto +# Protobuf Python Version: 5.29.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 4, + '', + 'google/protobuf/wrappers.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n\x0b\x44oubleValue\x12\x14\n\x05value\x18\x01 \x01(\x01R\x05value\"\"\n\nFloatValue\x12\x14\n\x05value\x18\x01 \x01(\x02R\x05value\"\"\n\nInt64Value\x12\x14\n\x05value\x18\x01 \x01(\x03R\x05value\"#\n\x0bUInt64Value\x12\x14\n\x05value\x18\x01 \x01(\x04R\x05value\"\"\n\nInt32Value\x12\x14\n\x05value\x18\x01 \x01(\x05R\x05value\"#\n\x0bUInt32Value\x12\x14\n\x05value\x18\x01 \x01(\rR\x05value\"!\n\tBoolValue\x12\x14\n\x05value\x18\x01 \x01(\x08R\x05value\"#\n\x0bStringValue\x12\x14\n\x05value\x18\x01 \x01(\tR\x05value\"\"\n\nBytesValue\x12\x14\n\x05value\x18\x01 \x01(\x0cR\x05valueB\x83\x01\n\x13\x63om.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.wrappers_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\rWrappersProtoP\001Z1google.golang.org/protobuf/types/known/wrapperspb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _globals['_DOUBLEVALUE']._serialized_start=51 + _globals['_DOUBLEVALUE']._serialized_end=86 + _globals['_FLOATVALUE']._serialized_start=88 + _globals['_FLOATVALUE']._serialized_end=122 + _globals['_INT64VALUE']._serialized_start=124 + _globals['_INT64VALUE']._serialized_end=158 + _globals['_UINT64VALUE']._serialized_start=160 + _globals['_UINT64VALUE']._serialized_end=195 + _globals['_INT32VALUE']._serialized_start=197 + _globals['_INT32VALUE']._serialized_end=231 + _globals['_UINT32VALUE']._serialized_start=233 + _globals['_UINT32VALUE']._serialized_end=268 + _globals['_BOOLVALUE']._serialized_start=270 + _globals['_BOOLVALUE']._serialized_end=303 + _globals['_STRINGVALUE']._serialized_start=305 + _globals['_STRINGVALUE']._serialized_end=340 + _globals['_BYTESVALUE']._serialized_start=342 + _globals['_BYTESVALUE']._serialized_end=376 +# @@protoc_insertion_point(module_scope) |