about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/azure/ai/inference/models
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/azure/ai/inference/models
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/azure/ai/inference/models')
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/inference/models/__init__.py96
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/inference/models/_enums.py146
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/inference/models/_models.py1458
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/inference/models/_patch.py576
4 files changed, 2276 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/inference/models/__init__.py b/.venv/lib/python3.12/site-packages/azure/ai/inference/models/__init__.py
new file mode 100644
index 00000000..66e62570
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/inference/models/__init__.py
@@ -0,0 +1,96 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+
+from ._models import (  # type: ignore
+    AudioContentItem,
+    ChatChoice,
+    ChatCompletions,
+    ChatCompletionsNamedToolChoice,
+    ChatCompletionsNamedToolChoiceFunction,
+    ChatCompletionsToolCall,
+    ChatCompletionsToolDefinition,
+    ChatResponseMessage,
+    CompletionsUsage,
+    ContentItem,
+    EmbeddingItem,
+    EmbeddingsResult,
+    EmbeddingsUsage,
+    FunctionCall,
+    FunctionDefinition,
+    ImageContentItem,
+    ImageEmbeddingInput,
+    ImageUrl,
+    InputAudio,
+    JsonSchemaFormat,
+    ModelInfo,
+    StreamingChatChoiceUpdate,
+    StreamingChatCompletionsUpdate,
+    StreamingChatResponseMessageUpdate,
+    StreamingChatResponseToolCallUpdate,
+    TextContentItem,
+)
+
+from ._enums import (  # type: ignore
+    AudioContentFormat,
+    ChatCompletionsToolChoicePreset,
+    ChatRole,
+    CompletionsFinishReason,
+    EmbeddingEncodingFormat,
+    EmbeddingInputType,
+    ImageDetailLevel,
+    ModelType,
+)
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AudioContentItem",
+    "ChatChoice",
+    "ChatCompletions",
+    "ChatCompletionsNamedToolChoice",
+    "ChatCompletionsNamedToolChoiceFunction",
+    "ChatCompletionsToolCall",
+    "ChatCompletionsToolDefinition",
+    "ChatResponseMessage",
+    "CompletionsUsage",
+    "ContentItem",
+    "EmbeddingItem",
+    "EmbeddingsResult",
+    "EmbeddingsUsage",
+    "FunctionCall",
+    "FunctionDefinition",
+    "ImageContentItem",
+    "ImageEmbeddingInput",
+    "ImageUrl",
+    "InputAudio",
+    "JsonSchemaFormat",
+    "ModelInfo",
+    "StreamingChatChoiceUpdate",
+    "StreamingChatCompletionsUpdate",
+    "StreamingChatResponseMessageUpdate",
+    "StreamingChatResponseToolCallUpdate",
+    "TextContentItem",
+    "AudioContentFormat",
+    "ChatCompletionsToolChoicePreset",
+    "ChatRole",
+    "CompletionsFinishReason",
+    "EmbeddingEncodingFormat",
+    "EmbeddingInputType",
+    "ImageDetailLevel",
+    "ModelType",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/inference/models/_enums.py b/.venv/lib/python3.12/site-packages/azure/ai/inference/models/_enums.py
new file mode 100644
index 00000000..6214f668
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/inference/models/_enums.py
@@ -0,0 +1,146 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+from azure.core import CaseInsensitiveEnumMeta
+
+
+class AudioContentFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """A representation of the possible audio formats for audio."""
+
+    WAV = "wav"
+    """Specifies audio in WAV format."""
+    MP3 = "mp3"
+    """Specifies audio in MP3 format."""
+
+
+class ChatCompletionsToolChoicePreset(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Represents a generic policy for how a chat completions tool may be selected."""
+
+    AUTO = "auto"
+    """Specifies that the model may either use any of the tools provided in this chat completions
+    request or
+    instead return a standard chat completions response as if no tools were provided."""
+    NONE = "none"
+    """Specifies that the model should not respond with a tool call and should instead provide a
+    standard chat
+    completions response. Response content may still be influenced by the provided tool
+    definitions."""
+    REQUIRED = "required"
+    """Specifies that the model should respond with a call to one or more tools."""
+
+
+class ChatRole(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """A description of the intended purpose of a message within a chat completions interaction."""
+
+    SYSTEM = "system"
+    """The role that instructs or sets the behavior of the assistant."""
+    USER = "user"
+    """The role that provides input for chat completions."""
+    ASSISTANT = "assistant"
+    """The role that provides responses to system-instructed, user-prompted input."""
+    TOOL = "tool"
+    """The role that represents extension tool activity within a chat completions operation."""
+    DEVELOPER = "developer"
+    """The role that instructs or sets the behavior of the assistant. Some AI models support this role
+    instead of the 'system' role."""
+
+
+class CompletionsFinishReason(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Representation of the manner in which a completions response concluded."""
+
+    STOPPED = "stop"
+    """Completions ended normally and reached its end of token generation."""
+    TOKEN_LIMIT_REACHED = "length"
+    """Completions exhausted available token limits before generation could complete."""
+    CONTENT_FILTERED = "content_filter"
+    """Completions generated a response that was identified as potentially sensitive per content
+    moderation policies."""
+    TOOL_CALLS = "tool_calls"
+    """Completion ended with the model calling a provided tool for output."""
+
+
+class EmbeddingEncodingFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """The format of the embeddings result.
+    Returns a 422 error if the model doesn't support the value or parameter.
+    """
+
+    BASE64 = "base64"
+    """Base64"""
+    BINARY = "binary"
+    """Binary"""
+    FLOAT = "float"
+    """Floating point"""
+    INT8 = "int8"
+    """Signed 8-bit integer"""
+    UBINARY = "ubinary"
+    """ubinary"""
+    UINT8 = "uint8"
+    """Unsigned 8-bit integer"""
+
+
+class EmbeddingInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Represents the input types used for embedding search."""
+
+    TEXT = "text"
+    """Indicates the input is a general text input."""
+    QUERY = "query"
+    """Indicates the input represents a search query to find the most relevant documents in your
+    vector database."""
+    DOCUMENT = "document"
+    """Indicates the input represents a document that is stored in a vector database."""
+
+
+class ExtraParameters(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Controls what happens if extra parameters, undefined by the REST API, are passed in the JSON
+    request payload.
+    """
+
+    ERROR = "error"
+    """The service will error if it detected extra parameters in the request payload. This is the
+    service default."""
+    DROP = "drop"
+    """The service will ignore (drop) extra parameters in the request payload. It will only pass the
+    known parameters to the back-end AI model."""
+    PASS_THROUGH = "pass-through"
+    """The service will pass extra parameters to the back-end AI model."""
+
+
+class ImageDetailLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """A representation of the possible image detail levels for image-based chat completions message
+    content.
+    """
+
+    AUTO = "auto"
+    """Specifies that the model should determine which detail level to apply using heuristics like
+    image size."""
+    LOW = "low"
+    """Specifies that image evaluation should be constrained to the 'low-res' model that may be faster
+    and consume fewer
+    tokens but may also be less accurate for highly detailed images."""
+    HIGH = "high"
+    """Specifies that image evaluation should enable the 'high-res' model that may be more accurate
+    for highly detailed
+    images but may also be slower and consume more tokens."""
+
+
+class ModelType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """The type of AI model."""
+
+    EMBEDDINGS = "embeddings"
+    """A model capable of generating embeddings from a text"""
+    IMAGE_GENERATION = "image_generation"
+    """A model capable of generating images from an image and text description"""
+    TEXT_GENERATION = "text_generation"
+    """A text generation model"""
+    IMAGE_EMBEDDINGS = "image_embeddings"
+    """A model capable of generating embeddings from an image"""
+    AUDIO_GENERATION = "audio_generation"
+    """A text-to-audio generative model"""
+    CHAT_COMPLETION = "chat_completion"
+    """A model capable of taking chat-formatted messages and generate responses"""
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/inference/models/_models.py b/.venv/lib/python3.12/site-packages/azure/ai/inference/models/_models.py
new file mode 100644
index 00000000..53934528
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/inference/models/_models.py
@@ -0,0 +1,1458 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=useless-super-delegation
+
+import datetime
+from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload
+
+from .. import _model_base
+from .._model_base import rest_discriminator, rest_field
+from ._enums import ChatRole
+
+if TYPE_CHECKING:
+    from .. import models as _models
+
+
+class ContentItem(_model_base.Model):
+    """An abstract representation of a structured content item within a chat message.
+
+    You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+    ImageContentItem, AudioContentItem, TextContentItem
+
+    :ivar type: The discriminated object type. Required. Default value is None.
+    :vartype type: str
+    """
+
+    __mapping__: Dict[str, _model_base.Model] = {}
+    type: str = rest_discriminator(name="type")
+    """The discriminated object type. Required. Default value is None."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        type: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class AudioContentItem(ContentItem, discriminator="input_audio"):
+    """A structured chat content item containing an audio content.
+
+    :ivar type: The discriminated object type: always 'input_audio' for this type. Required.
+     Default value is "input_audio".
+    :vartype type: str
+    :ivar input_audio: The details of the input audio. Required.
+    :vartype input_audio: ~azure.ai.inference.models.InputAudio
+    """
+
+    type: Literal["input_audio"] = rest_discriminator(name="type")  # type: ignore
+    """The discriminated object type: always 'input_audio' for this type. Required. Default value is
+     \"input_audio\"."""
+    input_audio: "_models.InputAudio" = rest_field()
+    """The details of the input audio. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        input_audio: "_models.InputAudio",
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, type="input_audio", **kwargs)
+
+
+class ChatChoice(_model_base.Model):
+    """The representation of a single prompt completion as part of an overall chat completions
+    request.
+    Generally, ``n`` choices are generated per provided prompt with a default value of 1.
+    Token limits and other settings may limit the number of choices generated.
+
+
+    :ivar index: The ordered index associated with this chat completions choice. Required.
+    :vartype index: int
+    :ivar finish_reason: The reason that this chat completions choice completed its generated.
+     Required. Known values are: "stop", "length", "content_filter", and "tool_calls".
+    :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason
+    :ivar message: The chat message for a given chat completions prompt. Required.
+    :vartype message: ~azure.ai.inference.models.ChatResponseMessage
+    """
+
+    index: int = rest_field()
+    """The ordered index associated with this chat completions choice. Required."""
+    finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field()
+    """The reason that this chat completions choice completed its generated. Required. Known values
+     are: \"stop\", \"length\", \"content_filter\", and \"tool_calls\"."""
+    message: "_models.ChatResponseMessage" = rest_field()
+    """The chat message for a given chat completions prompt. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        index: int,
+        finish_reason: Union[str, "_models.CompletionsFinishReason"],
+        message: "_models.ChatResponseMessage",
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class ChatCompletions(_model_base.Model):
+    """Representation of the response data from a chat completions request.
+    Completions support a wide variety of tasks and generate text that continues from or
+    "completes"
+    provided prompt data.
+
+
+    :ivar id: A unique identifier associated with this chat completions response. Required.
+    :vartype id: str
+    :ivar created: The first timestamp associated with generation activity for this completions
+     response,
+     represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.
+    :vartype created: ~datetime.datetime
+    :ivar model: The model used for the chat completion. Required.
+    :vartype model: str
+    :ivar choices: The collection of completions choices associated with this completions response.
+     Generally, ``n`` choices are generated per provided prompt with a default value of 1.
+     Token limits and other settings may limit the number of choices generated. Required.
+    :vartype choices: list[~azure.ai.inference.models.ChatChoice]
+    :ivar usage: Usage information for tokens processed and generated as part of this completions
+     operation. Required.
+    :vartype usage: ~azure.ai.inference.models.CompletionsUsage
+    """
+
+    id: str = rest_field()
+    """A unique identifier associated with this chat completions response. Required."""
+    created: datetime.datetime = rest_field(format="unix-timestamp")
+    """The first timestamp associated with generation activity for this completions response,
+     represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required."""
+    model: str = rest_field()
+    """The model used for the chat completion. Required."""
+    choices: List["_models.ChatChoice"] = rest_field()
+    """The collection of completions choices associated with this completions response.
+     Generally, ``n`` choices are generated per provided prompt with a default value of 1.
+     Token limits and other settings may limit the number of choices generated. Required."""
+    usage: "_models.CompletionsUsage" = rest_field()
+    """Usage information for tokens processed and generated as part of this completions operation.
+     Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        id: str,  # pylint: disable=redefined-builtin
+        created: datetime.datetime,
+        model: str,
+        choices: List["_models.ChatChoice"],
+        usage: "_models.CompletionsUsage",
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class ChatCompletionsNamedToolChoice(_model_base.Model):
+    """A tool selection of a specific, named function tool that will limit chat completions to using
+    the named function.
+
+    :ivar type: The type of the tool. Currently, only ``function`` is supported. Required. Default
+     value is "function".
+    :vartype type: str
+    :ivar function: The function that should be called. Required.
+    :vartype function: ~azure.ai.inference.models.ChatCompletionsNamedToolChoiceFunction
+    """
+
+    type: Literal["function"] = rest_field()
+    """The type of the tool. Currently, only ``function`` is supported. Required. Default value is
+     \"function\"."""
+    function: "_models.ChatCompletionsNamedToolChoiceFunction" = rest_field()
+    """The function that should be called. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        function: "_models.ChatCompletionsNamedToolChoiceFunction",
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+        self.type: Literal["function"] = "function"
+
+
+class ChatCompletionsNamedToolChoiceFunction(_model_base.Model):
+    """A tool selection of a specific, named function tool that will limit chat completions to using
+    the named function.
+
+    :ivar name: The name of the function that should be called. Required.
+    :vartype name: str
+    """
+
+    name: str = rest_field()
+    """The name of the function that should be called. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        name: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class ChatCompletionsResponseFormat(_model_base.Model):
+    """Represents the format that the model must output. Use this to enable JSON mode instead of the
+    default text mode.
+    Note that to enable JSON mode, some AI models may also require you to instruct the model to
+    produce JSON
+    via a system or user message.
+
+    You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+    ChatCompletionsResponseFormatJsonObject, ChatCompletionsResponseFormatJsonSchema,
+    ChatCompletionsResponseFormatText
+
+    :ivar type: The response format type to use for chat completions. Required. Default value is
+     None.
+    :vartype type: str
+    """
+
+    __mapping__: Dict[str, _model_base.Model] = {}
+    type: str = rest_discriminator(name="type")
+    """The response format type to use for chat completions. Required. Default value is None."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        type: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class ChatCompletionsResponseFormatJsonObject(ChatCompletionsResponseFormat, discriminator="json_object"):
+    """A response format for Chat Completions that restricts responses to emitting valid JSON objects.
+    Note that to enable JSON mode, some AI models may also require you to instruct the model to
+    produce JSON
+    via a system or user message.
+
+    :ivar type: Response format type: always 'json_object' for this object. Required. Default value
+     is "json_object".
+    :vartype type: str
+    """
+
+    type: Literal["json_object"] = rest_discriminator(name="type")  # type: ignore
+    """Response format type: always 'json_object' for this object. Required. Default value is
+     \"json_object\"."""
+
+    @overload
+    def __init__(
+        self,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, type="json_object", **kwargs)
+
+
+class ChatCompletionsResponseFormatJsonSchema(ChatCompletionsResponseFormat, discriminator="json_schema"):
+    """A response format for Chat Completions that restricts responses to emitting valid JSON objects,
+    with a
+    JSON schema specified by the caller.
+
+    :ivar type: The type of response format being defined: ``json_schema``. Required. Default value
+     is "json_schema".
+    :vartype type: str
+    :ivar json_schema: The definition of the required JSON schema in the response, and associated
+     metadata. Required.
+    :vartype json_schema: ~azure.ai.inference.models.JsonSchemaFormat
+    """
+
+    type: Literal["json_schema"] = rest_discriminator(name="type")  # type: ignore
+    """The type of response format being defined: ``json_schema``. Required. Default value is
+     \"json_schema\"."""
+    json_schema: "_models.JsonSchemaFormat" = rest_field()
+    """The definition of the required JSON schema in the response, and associated metadata. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        json_schema: "_models.JsonSchemaFormat",
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, type="json_schema", **kwargs)
+
+
+class ChatCompletionsResponseFormatText(ChatCompletionsResponseFormat, discriminator="text"):
+    """A response format for Chat Completions that emits text responses. This is the default response
+    format.
+
+    :ivar type: Response format type: always 'text' for this object. Required. Default value is
+     "text".
+    :vartype type: str
+    """
+
+    type: Literal["text"] = rest_discriminator(name="type")  # type: ignore
+    """Response format type: always 'text' for this object. Required. Default value is \"text\"."""
+
+    @overload
+    def __init__(
+        self,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, type="text", **kwargs)
+
+
+class ChatCompletionsToolCall(_model_base.Model):
+    """A function tool call requested by the AI model.
+
+    :ivar id: The ID of the tool call. Required.
+    :vartype id: str
+    :ivar type: The type of tool call. Currently, only ``function`` is supported. Required. Default
+     value is "function".
+    :vartype type: str
+    :ivar function: The details of the function call requested by the AI model. Required.
+    :vartype function: ~azure.ai.inference.models.FunctionCall
+    """
+
+    id: str = rest_field()
+    """The ID of the tool call. Required."""
+    type: Literal["function"] = rest_field()
+    """The type of tool call. Currently, only ``function`` is supported. Required. Default value is
+     \"function\"."""
+    function: "_models.FunctionCall" = rest_field()
+    """The details of the function call requested by the AI model. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        id: str,  # pylint: disable=redefined-builtin
+        function: "_models.FunctionCall",
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+        self.type: Literal["function"] = "function"
+
+
+class ChatCompletionsToolDefinition(_model_base.Model):
+    """The definition of a chat completions tool that can call a function.
+
+    :ivar type: The type of the tool. Currently, only ``function`` is supported. Required. Default
+     value is "function".
+    :vartype type: str
+    :ivar function: The function definition details for the function tool. Required.
+    :vartype function: ~azure.ai.inference.models.FunctionDefinition
+    """
+
+    type: Literal["function"] = rest_field()
+    """The type of the tool. Currently, only ``function`` is supported. Required. Default value is
+     \"function\"."""
+    function: "_models.FunctionDefinition" = rest_field()
+    """The function definition details for the function tool. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        function: "_models.FunctionDefinition",
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+        self.type: Literal["function"] = "function"
+
+
+class ChatRequestMessage(_model_base.Model):
+    """An abstract representation of a chat message as provided in a request.
+
+    You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+    ChatRequestAssistantMessage, ChatRequestDeveloperMessage, ChatRequestSystemMessage,
+    ChatRequestToolMessage, ChatRequestUserMessage
+
+    :ivar role: The chat role associated with this message. Required. Known values are: "system",
+     "user", "assistant", "tool", and "developer".
+    :vartype role: str or ~azure.ai.inference.models.ChatRole
+    """
+
+    __mapping__: Dict[str, _model_base.Model] = {}
+    role: str = rest_discriminator(name="role")
+    """The chat role associated with this message. Required. Known values are: \"system\", \"user\",
+     \"assistant\", \"tool\", and \"developer\"."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        role: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class ChatRequestAssistantMessage(ChatRequestMessage, discriminator="assistant"):
+    """A request chat message representing response or action from the assistant.
+
+    :ivar role: The chat role associated with this message, which is always 'assistant' for
+     assistant messages. Required. The role that provides responses to system-instructed,
+     user-prompted input.
+    :vartype role: str or ~azure.ai.inference.models.ASSISTANT
+    :ivar content: The content of the message.
+    :vartype content: str
+    :ivar tool_calls: The tool calls that must be resolved and have their outputs appended to
+     subsequent input messages for the chat
+     completions request to resolve as configured.
+    :vartype tool_calls: list[~azure.ai.inference.models.ChatCompletionsToolCall]
+    """
+
+    role: Literal[ChatRole.ASSISTANT] = rest_discriminator(name="role")  # type: ignore
+    """The chat role associated with this message, which is always 'assistant' for assistant messages.
+     Required. The role that provides responses to system-instructed, user-prompted input."""
+    content: Optional[str] = rest_field()
+    """The content of the message."""
+    tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = rest_field()
+    """The tool calls that must be resolved and have their outputs appended to subsequent input
+     messages for the chat
+     completions request to resolve as configured."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        content: Optional[str] = None,
+        tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = None,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, role=ChatRole.ASSISTANT, **kwargs)
+
+
+class ChatRequestDeveloperMessage(ChatRequestMessage, discriminator="developer"):
+    """A request chat message containing system instructions that influence how the model will
+    generate a chat completions
+    response. Some AI models support a developer message instead of a system message.
+
+    :ivar role: The chat role associated with this message, which is always 'developer' for
+     developer messages. Required. The role that instructs or sets the behavior of the assistant.
+     Some AI models support this role instead of the 'system' role.
+    :vartype role: str or ~azure.ai.inference.models.DEVELOPER
+    :ivar content: The contents of the developer message. Required.
+    :vartype content: str
+    """
+
+    role: Literal[ChatRole.DEVELOPER] = rest_discriminator(name="role")  # type: ignore
+    """The chat role associated with this message, which is always 'developer' for developer messages.
+     Required. The role that instructs or sets the behavior of the assistant. Some AI models support
+     this role instead of the 'system' role."""
+    content: str = rest_field()
+    """The contents of the developer message. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        content: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, role=ChatRole.DEVELOPER, **kwargs)
+
+
+class ChatRequestSystemMessage(ChatRequestMessage, discriminator="system"):
+    """A request chat message containing system instructions that influence how the model will
+    generate a chat completions
+    response.
+
+    :ivar role: The chat role associated with this message, which is always 'system' for system
+     messages. Required. The role that instructs or sets the behavior of the assistant.
+    :vartype role: str or ~azure.ai.inference.models.SYSTEM
+    :ivar content: The contents of the system message. Required.
+    :vartype content: str
+    """
+
+    role: Literal[ChatRole.SYSTEM] = rest_discriminator(name="role")  # type: ignore
+    """The chat role associated with this message, which is always 'system' for system messages.
+     Required. The role that instructs or sets the behavior of the assistant."""
+    content: str = rest_field()
+    """The contents of the system message. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        content: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, role=ChatRole.SYSTEM, **kwargs)
+
+
+class ChatRequestToolMessage(ChatRequestMessage, discriminator="tool"):
+    """A request chat message representing requested output from a configured tool.
+
+    :ivar role: The chat role associated with this message, which is always 'tool' for tool
+     messages. Required. The role that represents extension tool activity within a chat completions
+     operation.
+    :vartype role: str or ~azure.ai.inference.models.TOOL
+    :ivar content: The content of the message.
+    :vartype content: str
+    :ivar tool_call_id: The ID of the tool call resolved by the provided content. Required.
+    :vartype tool_call_id: str
+    """
+
+    role: Literal[ChatRole.TOOL] = rest_discriminator(name="role")  # type: ignore
+    """The chat role associated with this message, which is always 'tool' for tool messages. Required.
+     The role that represents extension tool activity within a chat completions operation."""
+    content: Optional[str] = rest_field()
+    """The content of the message."""
+    tool_call_id: str = rest_field()
+    """The ID of the tool call resolved by the provided content. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        tool_call_id: str,
+        content: Optional[str] = None,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, role=ChatRole.TOOL, **kwargs)
+
+
+class ChatRequestUserMessage(ChatRequestMessage, discriminator="user"):
+    """A request chat message representing user input to the assistant.
+
+    :ivar role: The chat role associated with this message, which is always 'user' for user
+     messages. Required. The role that provides input for chat completions.
+    :vartype role: str or ~azure.ai.inference.models.USER
+    :ivar content: The contents of the user message, with available input types varying by selected
+     model. Required. Is either a str type or a [ContentItem] type.
+    :vartype content: str or list[~azure.ai.inference.models.ContentItem]
+    """
+
+    role: Literal[ChatRole.USER] = rest_discriminator(name="role")  # type: ignore
+    """The chat role associated with this message, which is always 'user' for user messages. Required.
+     The role that provides input for chat completions."""
+    content: Union["str", List["_models.ContentItem"]] = rest_field()
+    """The contents of the user message, with available input types varying by selected model.
+     Required. Is either a str type or a [ContentItem] type."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        content: Union[str, List["_models.ContentItem"]],
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, role=ChatRole.USER, **kwargs)
+
+
+class ChatResponseMessage(_model_base.Model):
+    """A representation of a chat message as received in a response.
+
+
+    :ivar role: The chat role associated with the message. Required. Known values are: "system",
+     "user", "assistant", "tool", and "developer".
+    :vartype role: str or ~azure.ai.inference.models.ChatRole
+    :ivar content: The content of the message. Required.
+    :vartype content: str
+    :ivar tool_calls: The tool calls that must be resolved and have their outputs appended to
+     subsequent input messages for the chat
+     completions request to resolve as configured.
+    :vartype tool_calls: list[~azure.ai.inference.models.ChatCompletionsToolCall]
+    """
+
+    role: Union[str, "_models.ChatRole"] = rest_field()
+    """The chat role associated with the message. Required. Known values are: \"system\", \"user\",
+     \"assistant\", \"tool\", and \"developer\"."""
+    content: str = rest_field()
+    """The content of the message. Required."""
+    tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = rest_field()
+    """The tool calls that must be resolved and have their outputs appended to subsequent input
+     messages for the chat
+     completions request to resolve as configured."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        role: Union[str, "_models.ChatRole"],
+        content: str,
+        tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = None,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class CompletionsUsage(_model_base.Model):
+    """Representation of the token counts processed for a completions request.
+    Counts consider all tokens across prompts, choices, choice alternates, best_of generations, and
+    other consumers.
+
+
+    :ivar completion_tokens: The number of tokens generated across all completions emissions.
+     Required.
+    :vartype completion_tokens: int
+    :ivar prompt_tokens: The number of tokens in the provided prompts for the completions request.
+     Required.
+    :vartype prompt_tokens: int
+    :ivar total_tokens: The total number of tokens processed for the completions request and
+     response. Required.
+    :vartype total_tokens: int
+    """
+
+    completion_tokens: int = rest_field()
+    """The number of tokens generated across all completions emissions. Required."""
+    prompt_tokens: int = rest_field()
+    """The number of tokens in the provided prompts for the completions request. Required."""
+    total_tokens: int = rest_field()
+    """The total number of tokens processed for the completions request and response. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        completion_tokens: int,
+        prompt_tokens: int,
+        total_tokens: int,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class EmbeddingItem(_model_base.Model):
+    """Representation of a single embeddings relatedness comparison.
+
+
+    :ivar embedding: List of embedding values for the input prompt. These represent a measurement
+     of the
+     vector-based relatedness of the provided input. Or a base64 encoded string of the embedding
+     vector. Required. Is either a str type or a [float] type.
+    :vartype embedding: str or list[float]
+    :ivar index: Index of the prompt to which the EmbeddingItem corresponds. Required.
+    :vartype index: int
+    """
+
+    embedding: Union["str", List[float]] = rest_field()
+    """List of embedding values for the input prompt. These represent a measurement of the
+     vector-based relatedness of the provided input. Or a base64 encoded string of the embedding
+     vector. Required. Is either a str type or a [float] type."""
+    index: int = rest_field()
+    """Index of the prompt to which the EmbeddingItem corresponds. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        embedding: Union[str, List[float]],
+        index: int,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class EmbeddingsResult(_model_base.Model):
+    """Representation of the response data from an embeddings request.
+    Embeddings measure the relatedness of text strings and are commonly used for search,
+    clustering,
+    recommendations, and other similar scenarios.
+
+
+    :ivar id: Unique identifier for the embeddings result. Required.
+    :vartype id: str
+    :ivar data: Embedding values for the prompts submitted in the request. Required.
+    :vartype data: list[~azure.ai.inference.models.EmbeddingItem]
+    :ivar usage: Usage counts for tokens input using the embeddings API. Required.
+    :vartype usage: ~azure.ai.inference.models.EmbeddingsUsage
+    :ivar model: The model ID used to generate this result. Required.
+    :vartype model: str
+    """
+
+    id: str = rest_field()
+    """Unique identifier for the embeddings result. Required."""
+    data: List["_models.EmbeddingItem"] = rest_field()
+    """Embedding values for the prompts submitted in the request. Required."""
+    usage: "_models.EmbeddingsUsage" = rest_field()
+    """Usage counts for tokens input using the embeddings API. Required."""
+    model: str = rest_field()
+    """The model ID used to generate this result. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        id: str,  # pylint: disable=redefined-builtin
+        data: List["_models.EmbeddingItem"],
+        usage: "_models.EmbeddingsUsage",
+        model: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class EmbeddingsUsage(_model_base.Model):
+    """Measurement of the amount of tokens used in this request and response.
+
+
+    :ivar prompt_tokens: Number of tokens in the request. Required.
+    :vartype prompt_tokens: int
+    :ivar total_tokens: Total number of tokens transacted in this request/response. Should equal
+     the
+     number of tokens in the request. Required.
+    :vartype total_tokens: int
+    """
+
+    prompt_tokens: int = rest_field()
+    """Number of tokens in the request. Required."""
+    total_tokens: int = rest_field()
+    """Total number of tokens transacted in this request/response. Should equal the
+     number of tokens in the request. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        prompt_tokens: int,
+        total_tokens: int,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class FunctionCall(_model_base.Model):
+    """The name and arguments of a function that should be called, as generated by the model.
+
+
+    :ivar name: The name of the function to call. Required.
+    :vartype name: str
+    :ivar arguments: The arguments to call the function with, as generated by the model in JSON
+     format.
+     Note that the model does not always generate valid JSON, and may hallucinate parameters
+     not defined by your function schema. Validate the arguments in your code before calling
+     your function. Required.
+    :vartype arguments: str
+    """
+
+    name: str = rest_field()
+    """The name of the function to call. Required."""
+    arguments: str = rest_field()
+    """The arguments to call the function with, as generated by the model in JSON format.
+     Note that the model does not always generate valid JSON, and may hallucinate parameters
+     not defined by your function schema. Validate the arguments in your code before calling
+     your function. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        name: str,
+        arguments: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class FunctionDefinition(_model_base.Model):
+    """The definition of a caller-specified function that chat completions may invoke in response to
+    matching user input.
+
+    :ivar name: The name of the function to be called. Required.
+    :vartype name: str
+    :ivar description: A description of what the function does. The model will use this description
+     when selecting the function and
+     interpreting its parameters.
+    :vartype description: str
+    :ivar parameters: The parameters the function accepts, described as a JSON Schema object.
+    :vartype parameters: any
+    """
+
+    name: str = rest_field()
+    """The name of the function to be called. Required."""
+    description: Optional[str] = rest_field()
+    """A description of what the function does. The model will use this description when selecting the
+     function and
+     interpreting its parameters."""
+    parameters: Optional[Any] = rest_field()
+    """The parameters the function accepts, described as a JSON Schema object."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        name: str,
+        description: Optional[str] = None,
+        parameters: Optional[Any] = None,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class ImageContentItem(ContentItem, discriminator="image_url"):
+    """A structured chat content item containing an image reference.
+
+    :ivar type: The discriminated object type: always 'image_url' for this type. Required. Default
+     value is "image_url".
+    :vartype type: str
+    :ivar image_url: An internet location, which must be accessible to the model,from which the
+     image may be retrieved. Required.
+    :vartype image_url: ~azure.ai.inference.models.ImageUrl
+    """
+
+    type: Literal["image_url"] = rest_discriminator(name="type")  # type: ignore
+    """The discriminated object type: always 'image_url' for this type. Required. Default value is
+     \"image_url\"."""
+    image_url: "_models.ImageUrl" = rest_field()
+    """An internet location, which must be accessible to the model,from which the image may be
+     retrieved. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        image_url: "_models.ImageUrl",
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, type="image_url", **kwargs)
+
+
+class ImageEmbeddingInput(_model_base.Model):
+    """Represents an image with optional text.
+
+    :ivar image: The input image encoded in base64 string as a data URL. Example:
+     ``data:image/{format};base64,{data}``. Required.
+    :vartype image: str
+    :ivar text: Optional. The text input to feed into the model (like DINO, CLIP).
+     Returns a 422 error if the model doesn't support the value or parameter.
+    :vartype text: str
+    """
+
+    image: str = rest_field()
+    """The input image encoded in base64 string as a data URL. Example:
+     ``data:image/{format};base64,{data}``. Required."""
+    text: Optional[str] = rest_field()
+    """Optional. The text input to feed into the model (like DINO, CLIP).
+     Returns a 422 error if the model doesn't support the value or parameter."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        image: str,
+        text: Optional[str] = None,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class ImageUrl(_model_base.Model):
+    """An internet location from which the model may retrieve an image.
+
+    :ivar url: The URL of the image. Required.
+    :vartype url: str
+    :ivar detail: The evaluation quality setting to use, which controls relative prioritization of
+     speed, token consumption, and
+     accuracy. Known values are: "auto", "low", and "high".
+    :vartype detail: str or ~azure.ai.inference.models.ImageDetailLevel
+    """
+
+    url: str = rest_field()
+    """The URL of the image. Required."""
+    detail: Optional[Union[str, "_models.ImageDetailLevel"]] = rest_field()
+    """The evaluation quality setting to use, which controls relative prioritization of speed, token
+     consumption, and
+     accuracy. Known values are: \"auto\", \"low\", and \"high\"."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        url: str,
+        detail: Optional[Union[str, "_models.ImageDetailLevel"]] = None,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class InputAudio(_model_base.Model):
+    """The details of an audio chat message content part.
+
+    :ivar data: Base64 encoded audio data. Required.
+    :vartype data: str
+    :ivar format: The audio format of the audio content. Required. Known values are: "wav" and
+     "mp3".
+    :vartype format: str or ~azure.ai.inference.models.AudioContentFormat
+    """
+
+    data: str = rest_field()
+    """Base64 encoded audio data. Required."""
+    format: Union[str, "_models.AudioContentFormat"] = rest_field()
+    """The audio format of the audio content. Required. Known values are: \"wav\" and \"mp3\"."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        data: str,
+        format: Union[str, "_models.AudioContentFormat"],
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class JsonSchemaFormat(_model_base.Model):
+    """Defines the response format for chat completions as JSON with a given schema.
+    The AI model will need to adhere to this schema when generating completions.
+
+    :ivar name: A name that labels this JSON schema. Must be a-z, A-Z, 0-9, or contain underscores
+     and dashes, with a maximum length of 64. Required.
+    :vartype name: str
+    :ivar schema: The definition of the JSON schema. See
+     https://json-schema.org/overview/what-is-jsonschema.
+     Note that AI models usually only support a subset of the keywords defined by JSON schema.
+     Consult your AI model documentation to determine what is supported. Required.
+    :vartype schema: dict[str, any]
+    :ivar description: A description of the response format, used by the AI model to determine how
+     to generate responses in this format.
+    :vartype description: str
+    :ivar strict: If set to true, the service will error out if the provided JSON schema contains
+     keywords
+     not supported by the AI model. An example of such keyword may be ``maxLength`` for JSON type
+     ``string``.
+     If false, and the provided JSON schema contains keywords not supported by the AI model,
+     the AI model will not error out. Instead it will ignore the unsupported keywords.
+    :vartype strict: bool
+    """
+
+    name: str = rest_field()
+    """A name that labels this JSON schema. Must be a-z, A-Z, 0-9, or contain underscores and dashes,
+     with a maximum length of 64. Required."""
+    schema: Dict[str, Any] = rest_field()
+    """The definition of the JSON schema. See https://json-schema.org/overview/what-is-jsonschema.
+     Note that AI models usually only support a subset of the keywords defined by JSON schema.
+     Consult your AI model documentation to determine what is supported. Required."""
+    description: Optional[str] = rest_field()
+    """A description of the response format, used by the AI model to determine how to generate
+     responses in this format."""
+    strict: Optional[bool] = rest_field()
+    """If set to true, the service will error out if the provided JSON schema contains keywords
+     not supported by the AI model. An example of such keyword may be ``maxLength`` for JSON type
+     ``string``.
+     If false, and the provided JSON schema contains keywords not supported by the AI model,
+     the AI model will not error out. Instead it will ignore the unsupported keywords."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        name: str,
+        schema: Dict[str, Any],
+        description: Optional[str] = None,
+        strict: Optional[bool] = None,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class ModelInfo(_model_base.Model):
+    """Represents some basic information about the AI model.
+
+
+    :ivar model_name: The name of the AI model. For example: ``Phi21``. Required.
+    :vartype model_name: str
+    :ivar model_type: The type of the AI model. A Unique identifier for the profile. Required.
+     Known values are: "embeddings", "image_generation", "text_generation", "image_embeddings",
+     "audio_generation", and "chat_completion".
+    :vartype model_type: str or ~azure.ai.inference.models.ModelType
+    :ivar model_provider_name: The model provider name. For example: ``Microsoft Research``.
+     Required.
+    :vartype model_provider_name: str
+    """
+
+    model_name: str = rest_field()
+    """The name of the AI model. For example: ``Phi21``. Required."""
+    model_type: Union[str, "_models.ModelType"] = rest_field()
+    """The type of the AI model. A Unique identifier for the profile. Required. Known values are:
+     \"embeddings\", \"image_generation\", \"text_generation\", \"image_embeddings\",
+     \"audio_generation\", and \"chat_completion\"."""
+    model_provider_name: str = rest_field()
+    """The model provider name. For example: ``Microsoft Research``. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        model_name: str,
+        model_type: Union[str, "_models.ModelType"],
+        model_provider_name: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class StreamingChatChoiceUpdate(_model_base.Model):
+    """Represents an update to a single prompt completion when the service is streaming updates
+    using Server Sent Events (SSE).
+    Generally, ``n`` choices are generated per provided prompt with a default value of 1.
+    Token limits and other settings may limit the number of choices generated.
+
+
+    :ivar index: The ordered index associated with this chat completions choice. Required.
+    :vartype index: int
+    :ivar finish_reason: The reason that this chat completions choice completed its generated.
+     Required. Known values are: "stop", "length", "content_filter", and "tool_calls".
+    :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason
+    :ivar delta: An update to the chat message for a given chat completions prompt. Required.
+    :vartype delta: ~azure.ai.inference.models.StreamingChatResponseMessageUpdate
+    """
+
+    index: int = rest_field()
+    """The ordered index associated with this chat completions choice. Required."""
+    finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field()
+    """The reason that this chat completions choice completed its generated. Required. Known values
+     are: \"stop\", \"length\", \"content_filter\", and \"tool_calls\"."""
+    delta: "_models.StreamingChatResponseMessageUpdate" = rest_field()
+    """An update to the chat message for a given chat completions prompt. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        index: int,
+        finish_reason: Union[str, "_models.CompletionsFinishReason"],
+        delta: "_models.StreamingChatResponseMessageUpdate",
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class StreamingChatCompletionsUpdate(_model_base.Model):
+    """Represents a response update to a chat completions request, when the service is streaming
+    updates
+    using Server Sent Events (SSE).
+    Completions support a wide variety of tasks and generate text that continues from or
+    "completes"
+    provided prompt data.
+
+
+    :ivar id: A unique identifier associated with this chat completions response. Required.
+    :vartype id: str
+    :ivar created: The first timestamp associated with generation activity for this completions
+     response,
+     represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.
+    :vartype created: ~datetime.datetime
+    :ivar model: The model used for the chat completion. Required.
+    :vartype model: str
+    :ivar choices: An update to the collection of completion choices associated with this
+     completions response.
+     Generally, ``n`` choices are generated per provided prompt with a default value of 1.
+     Token limits and other settings may limit the number of choices generated. Required.
+    :vartype choices: list[~azure.ai.inference.models.StreamingChatChoiceUpdate]
+    :ivar usage: Usage information for tokens processed and generated as part of this completions
+     operation.
+    :vartype usage: ~azure.ai.inference.models.CompletionsUsage
+    """
+
+    id: str = rest_field()
+    """A unique identifier associated with this chat completions response. Required."""
+    created: datetime.datetime = rest_field(format="unix-timestamp")
+    """The first timestamp associated with generation activity for this completions response,
+     represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required."""
+    model: str = rest_field()
+    """The model used for the chat completion. Required."""
+    choices: List["_models.StreamingChatChoiceUpdate"] = rest_field()
+    """An update to the collection of completion choices associated with this completions response.
+     Generally, ``n`` choices are generated per provided prompt with a default value of 1.
+     Token limits and other settings may limit the number of choices generated. Required."""
+    usage: Optional["_models.CompletionsUsage"] = rest_field()
+    """Usage information for tokens processed and generated as part of this completions operation."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        id: str,  # pylint: disable=redefined-builtin
+        created: datetime.datetime,
+        model: str,
+        choices: List["_models.StreamingChatChoiceUpdate"],
+        usage: Optional["_models.CompletionsUsage"] = None,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class StreamingChatResponseMessageUpdate(_model_base.Model):
+    """A representation of a chat message update as received in a streaming response.
+
+    :ivar role: The chat role associated with the message. If present, should always be
+     'assistant'. Known values are: "system", "user", "assistant", "tool", and "developer".
+    :vartype role: str or ~azure.ai.inference.models.ChatRole
+    :ivar content: The content of the message.
+    :vartype content: str
+    :ivar tool_calls: The tool calls that must be resolved and have their outputs appended to
+     subsequent input messages for the chat
+     completions request to resolve as configured.
+    :vartype tool_calls: list[~azure.ai.inference.models.StreamingChatResponseToolCallUpdate]
+    """
+
+    role: Optional[Union[str, "_models.ChatRole"]] = rest_field()
+    """The chat role associated with the message. If present, should always be 'assistant'. Known
+     values are: \"system\", \"user\", \"assistant\", \"tool\", and \"developer\"."""
+    content: Optional[str] = rest_field()
+    """The content of the message."""
+    tool_calls: Optional[List["_models.StreamingChatResponseToolCallUpdate"]] = rest_field()
+    """The tool calls that must be resolved and have their outputs appended to subsequent input
+     messages for the chat
+     completions request to resolve as configured."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        role: Optional[Union[str, "_models.ChatRole"]] = None,
+        content: Optional[str] = None,
+        tool_calls: Optional[List["_models.StreamingChatResponseToolCallUpdate"]] = None,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class StreamingChatResponseToolCallUpdate(_model_base.Model):
+    """An update to the function tool call information requested by the AI model.
+
+
+    :ivar id: The ID of the tool call. Required.
+    :vartype id: str
+    :ivar function: Updates to the function call requested by the AI model. Required.
+    :vartype function: ~azure.ai.inference.models.FunctionCall
+    """
+
+    id: str = rest_field()
+    """The ID of the tool call. Required."""
+    function: "_models.FunctionCall" = rest_field()
+    """Updates to the function call requested by the AI model. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        id: str,  # pylint: disable=redefined-builtin
+        function: "_models.FunctionCall",
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+
+
+class TextContentItem(ContentItem, discriminator="text"):
+    """A structured chat content item containing plain text.
+
+    :ivar type: The discriminated object type: always 'text' for this type. Required. Default value
+     is "text".
+    :vartype type: str
+    :ivar text: The content of the message. Required.
+    :vartype text: str
+    """
+
+    type: Literal["text"] = rest_discriminator(name="type")  # type: ignore
+    """The discriminated object type: always 'text' for this type. Required. Default value is
+     \"text\"."""
+    text: str = rest_field()
+    """The content of the message. Required."""
+
+    @overload
+    def __init__(
+        self,
+        *,
+        text: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, type="text", **kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/inference/models/_patch.py b/.venv/lib/python3.12/site-packages/azure/ai/inference/models/_patch.py
new file mode 100644
index 00000000..1bc06799
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/inference/models/_patch.py
@@ -0,0 +1,576 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+import base64
+import json
+import logging
+import queue
+import re
+import sys
+
+from typing import Mapping, Literal, Any, List, AsyncIterator, Iterator, Optional, Union, overload
+from azure.core.rest import HttpResponse, AsyncHttpResponse
+from ._enums import ChatRole
+from .._model_base import rest_discriminator, rest_field
+from ._models import ChatRequestMessage
+from ._models import ImageUrl as ImageUrlGenerated
+from ._models import ChatCompletions as ChatCompletionsGenerated
+from ._models import EmbeddingsResult as EmbeddingsResultGenerated
+from ._models import ImageEmbeddingInput as EmbeddingInputGenerated
+from ._models import InputAudio as InputAudioGenerated
+from .. import models as _models
+
+if sys.version_info >= (3, 11):
+    from typing import Self
+else:
+    from typing_extensions import Self
+
+logger = logging.getLogger(__name__)
+
+
+class UserMessage(ChatRequestMessage, discriminator="user"):
+    """A request chat message representing user input to the assistant.
+
+    :ivar role: The chat role associated with this message, which is always 'user' for user
+     messages. Required. The role that provides input for chat completions.
+    :vartype role: str or ~azure.ai.inference.models.USER
+    :ivar content: The contents of the user message, with available input types varying by selected
+     model. Required. Is either a str type or a [ContentItem] type.
+    :vartype content: str or list[~azure.ai.inference.models.ContentItem]
+    """
+
+    role: Literal[ChatRole.USER] = rest_discriminator(name="role")  # type: ignore
+    """The chat role associated with this message, which is always 'user' for user messages. Required.
+     The role that provides input for chat completions."""
+    content: Union["str", List["_models.ContentItem"]] = rest_field()
+    """The contents of the user message, with available input types varying by selected model.
+     Required. Is either a str type or a [ContentItem] type."""
+
+    @overload
+    def __init__(
+        self,
+        content: Union[str, List["_models.ContentItem"]],
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        if len(args) == 1 and isinstance(args[0], (List, str)):
+            if kwargs.get("content") is not None:
+                raise ValueError("content cannot be provided as positional and keyword arguments")
+            kwargs["content"] = args[0]
+            args = tuple()
+        super().__init__(*args, role=ChatRole.USER, **kwargs)
+
+
+class SystemMessage(ChatRequestMessage, discriminator="system"):
+    """A request chat message containing system instructions that influence how the model will
+    generate a chat completions response.
+
+    :ivar role: The chat role associated with this message, which is always 'system' for system
+     messages. Required.
+    :vartype role: str or ~azure.ai.inference.models.SYSTEM
+    :ivar content: The contents of the system message. Required.
+    :vartype content: str
+    """
+
+    role: Literal[ChatRole.SYSTEM] = rest_discriminator(name="role")  # type: ignore
+    """The chat role associated with this message, which is always 'system' for system messages.
+     Required."""
+    content: str = rest_field()
+    """The contents of the system message. Required."""
+
+    @overload
+    def __init__(
+        self,
+        content: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        if len(args) == 1 and isinstance(args[0], str):
+            if kwargs.get("content") is not None:
+                raise ValueError("content cannot be provided as positional and keyword arguments")
+            kwargs["content"] = args[0]
+            args = tuple()
+        super().__init__(*args, role=ChatRole.SYSTEM, **kwargs)
+
+
+class DeveloperMessage(ChatRequestMessage, discriminator="developer"):
+    """A request chat message containing developer instructions that influence how the model will
+    generate a chat completions response. Some AI models support developer messages instead
+    of system messages.
+
+    :ivar role: The chat role associated with this message, which is always 'developer' for developer
+     messages. Required.
+    :vartype role: str or ~azure.ai.inference.models.DEVELOPER
+    :ivar content: The contents of the developer message. Required.
+    :vartype content: str
+    """
+
+    role: Literal[ChatRole.DEVELOPER] = rest_discriminator(name="role")  # type: ignore
+    """The chat role associated with this message, which is always 'developer' for developer messages.
+     Required."""
+    content: str = rest_field()
+    """The contents of the developer message. Required."""
+
+    @overload
+    def __init__(
+        self,
+        content: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        if len(args) == 1 and isinstance(args[0], str):
+            if kwargs.get("content") is not None:
+                raise ValueError("content cannot be provided as positional and keyword arguments")
+            kwargs["content"] = args[0]
+            args = tuple()
+        super().__init__(*args, role=ChatRole.DEVELOPER, **kwargs)
+
+
+class AssistantMessage(ChatRequestMessage, discriminator="assistant"):
+    """A request chat message representing response or action from the assistant.
+
+    :ivar role: The chat role associated with this message, which is always 'assistant' for
+     assistant messages. Required. The role that provides responses to system-instructed,
+     user-prompted input.
+    :vartype role: str or ~azure.ai.inference.models.ASSISTANT
+    :ivar content: The content of the message.
+    :vartype content: str
+    :ivar tool_calls: The tool calls that must be resolved and have their outputs appended to
+     subsequent input messages for the chat
+     completions request to resolve as configured.
+    :vartype tool_calls: list[~azure.ai.inference.models.ChatCompletionsToolCall]
+    """
+
+    role: Literal[ChatRole.ASSISTANT] = rest_discriminator(name="role")  # type: ignore
+    """The chat role associated with this message, which is always 'assistant' for assistant messages.
+     Required. The role that provides responses to system-instructed, user-prompted input."""
+    content: Optional[str] = rest_field()
+    """The content of the message."""
+    tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = rest_field()
+    """The tool calls that must be resolved and have their outputs appended to subsequent input
+     messages for the chat
+     completions request to resolve as configured."""
+
+    @overload
+    def __init__(
+        self,
+        content: Optional[str] = None,
+        *,
+        tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = None,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        if len(args) == 1 and isinstance(args[0], str):
+            if kwargs.get("content") is not None:
+                raise ValueError("content cannot be provided as positional and keyword arguments")
+            kwargs["content"] = args[0]
+            args = tuple()
+        super().__init__(*args, role=ChatRole.ASSISTANT, **kwargs)
+
+
+class ToolMessage(ChatRequestMessage, discriminator="tool"):
+    """A request chat message representing requested output from a configured tool.
+
+    :ivar role: The chat role associated with this message, which is always 'tool' for tool
+     messages. Required. The role that represents extension tool activity within a chat completions
+     operation.
+    :vartype role: str or ~azure.ai.inference.models.TOOL
+    :ivar content: The content of the message.
+    :vartype content: str
+    :ivar tool_call_id: The ID of the tool call resolved by the provided content. Required.
+    :vartype tool_call_id: str
+    """
+
+    role: Literal[ChatRole.TOOL] = rest_discriminator(name="role")  # type: ignore
+    """The chat role associated with this message, which is always 'tool' for tool messages. Required.
+     The role that represents extension tool activity within a chat completions operation."""
+    content: Optional[str] = rest_field()
+    """The content of the message."""
+    tool_call_id: str = rest_field()
+    """The ID of the tool call resolved by the provided content. Required."""
+
+    @overload
+    def __init__(
+        self,
+        content: Optional[str] = None,
+        *,
+        tool_call_id: str,
+    ) -> None: ...
+
+    @overload
+    def __init__(self, mapping: Mapping[str, Any]) -> None:
+        """
+        :param mapping: raw JSON to initialize the model.
+        :type mapping: Mapping[str, Any]
+        """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        if len(args) == 1 and isinstance(args[0], str):
+            if kwargs.get("content") is not None:
+                raise ValueError("content cannot be provided as positional and keyword arguments")
+            kwargs["content"] = args[0]
+            args = tuple()
+        super().__init__(*args, role=ChatRole.TOOL, **kwargs)
+
+
+class ChatCompletions(ChatCompletionsGenerated):
+    """Representation of the response data from a chat completions request.
+    Completions support a wide variety of tasks and generate text that continues from or
+    "completes"
+    provided prompt data.
+
+
+    :ivar id: A unique identifier associated with this chat completions response. Required.
+    :vartype id: str
+    :ivar created: The first timestamp associated with generation activity for this completions
+     response,
+     represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.
+    :vartype created: ~datetime.datetime
+    :ivar model: The model used for the chat completion. Required.
+    :vartype model: str
+    :ivar usage: Usage information for tokens processed and generated as part of this completions
+     operation. Required.
+    :vartype usage: ~azure.ai.inference.models.CompletionsUsage
+    :ivar choices: The collection of completions choices associated with this completions response.
+     Generally, ``n`` choices are generated per provided prompt with a default value of 1.
+     Token limits and other settings may limit the number of choices generated. Required.
+    :vartype choices: list[~azure.ai.inference.models.ChatChoice]
+    """
+
+    def __str__(self) -> str:
+        # pylint: disable=client-method-name-no-double-underscore
+        return json.dumps(self.as_dict(), indent=2)
+
+
+class EmbeddingsResult(EmbeddingsResultGenerated):
+    """Representation of the response data from an embeddings request.
+    Embeddings measure the relatedness of text strings and are commonly used for search,
+    clustering,
+    recommendations, and other similar scenarios.
+
+
+    :ivar data: Embedding values for the prompts submitted in the request. Required.
+    :vartype data: list[~azure.ai.inference.models.EmbeddingItem]
+    :ivar usage: Usage counts for tokens input using the embeddings API. Required.
+    :vartype usage: ~azure.ai.inference.models.EmbeddingsUsage
+    :ivar model: The model ID used to generate this result. Required.
+    :vartype model: str
+    """
+
+    def __str__(self) -> str:
+        # pylint: disable=client-method-name-no-double-underscore
+        return json.dumps(self.as_dict(), indent=2)
+
+
+class ImageUrl(ImageUrlGenerated):
+
+    @classmethod
+    def load(
+        cls, *, image_file: str, image_format: str, detail: Optional[Union[str, "_models.ImageDetailLevel"]] = None
+    ) -> Self:
+        """
+        Create an ImageUrl object from a local image file. The method reads the image
+        file and encodes it as a base64 string, which together with the image format
+        is then used to format the JSON `url` value passed in the request payload.
+
+        :keyword image_file: The name of the local image file to load. Required.
+        :paramtype image_file: str
+        :keyword image_format: The MIME type format of the image. For example: "jpeg", "png". Required.
+        :paramtype image_format: str
+        :keyword detail: The evaluation quality setting to use, which controls relative prioritization of
+         speed, token consumption, and accuracy. Known values are: "auto", "low", and "high".
+        :paramtype detail: str or ~azure.ai.inference.models.ImageDetailLevel
+        :return: An ImageUrl object with the image data encoded as a base64 string.
+        :rtype: ~azure.ai.inference.models.ImageUrl
+        :raises FileNotFoundError: when the image file could not be opened.
+        """
+        with open(image_file, "rb") as f:
+            image_data = base64.b64encode(f.read()).decode("utf-8")
+        url = f"data:image/{image_format};base64,{image_data}"
+        return cls(url=url, detail=detail)
+
+
+class ImageEmbeddingInput(EmbeddingInputGenerated):
+
+    @classmethod
+    def load(cls, *, image_file: str, image_format: str, text: Optional[str] = None) -> Self:
+        """
+        Create an ImageEmbeddingInput object from a local image file. The method reads the image
+        file and encodes it as a base64 string, which together with the image format
+        is then used to format the JSON `url` value passed in the request payload.
+
+        :keyword image_file: The name of the local image file to load. Required.
+        :paramtype image_file: str
+        :keyword image_format: The MIME type format of the image. For example: "jpeg", "png". Required.
+        :paramtype image_format: str
+        :keyword text: Optional. The text input to feed into the model (like DINO, CLIP).
+         Returns a 422 error if the model doesn't support the value or parameter.
+        :paramtype text: str
+        :return: An ImageEmbeddingInput object with the image data encoded as a base64 string.
+        :rtype: ~azure.ai.inference.models.EmbeddingsInput
+        :raises FileNotFoundError: when the image file could not be opened.
+        """
+        with open(image_file, "rb") as f:
+            image_data = base64.b64encode(f.read()).decode("utf-8")
+        image_uri = f"data:image/{image_format};base64,{image_data}"
+        return cls(image=image_uri, text=text)
+
+
+class BaseStreamingChatCompletions:
+    """A base class for the sync and async streaming chat completions responses, holding any common code
+    to deserializes the Server Sent Events (SSE) response stream into chat completions updates, each one
+    represented by a StreamingChatCompletionsUpdate object.
+    """
+
+    # Enable detailed logs of SSE parsing. For development only, should be `False` by default.
+    _ENABLE_CLASS_LOGS = False
+
+    # The prefix of each line in the SSE stream that contains a JSON string
+    # to deserialize into a StreamingChatCompletionsUpdate object
+    _SSE_DATA_EVENT_PREFIX = b"data: "
+
+    # The line indicating the end of the SSE stream
+    _SSE_DATA_EVENT_DONE = b"data: [DONE]"
+
+    def __init__(self):
+        self._queue: "queue.Queue[_models.StreamingChatCompletionsUpdate]" = queue.Queue()
+        self._incomplete_line = b""
+        self._done = False  # Will be set to True when reading 'data: [DONE]' line
+
+    # See https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream
+    def _deserialize_and_add_to_queue(self, element: bytes) -> bool:
+
+        if self._ENABLE_CLASS_LOGS:
+            logger.debug("[Original element] %s", repr(element))
+
+        # Clear the queue of StreamingChatCompletionsUpdate before processing the next block
+        self._queue.queue.clear()
+
+        # Split the single input bytes object at new line characters, and get a list of bytes objects, each
+        # representing a single "line". The bytes object at the end of the list may be a partial "line" that
+        # does not contain a new line character at the end.
+        # Note 1: DO NOT try to use something like this here:
+        #   line_list: List[str] = re.split(r"(?<=\n)", element.decode("utf-8"))
+        #   to do full UTF8 decoding of the whole input bytes object, as the last line in the list may be partial, and
+        #   as such may contain a partial UTF8 Chinese character (for example). `decode("utf-8")` will raise an
+        #   exception for such a case. See GitHub issue https://github.com/Azure/azure-sdk-for-python/issues/39565
+        # Note 2: Consider future re-write and simplifications of this code by using:
+        #   `codecs.getincrementaldecoder("utf-8")`
+        line_list: List[bytes] = re.split(re.compile(b"(?<=\n)"), element)
+        for index, line in enumerate(line_list):
+
+            if self._ENABLE_CLASS_LOGS:
+                logger.debug("[Original line] %s", repr(line))
+
+            if index == 0:
+                line = self._incomplete_line + line
+                self._incomplete_line = b""
+
+            if index == len(line_list) - 1 and not line.endswith(b"\n"):
+                self._incomplete_line = line
+                return False
+
+            if self._ENABLE_CLASS_LOGS:
+                logger.debug("[Modified line] %s", repr(line))
+
+            if line == b"\n":  # Empty line, indicating flush output to client
+                continue
+
+            if not line.startswith(self._SSE_DATA_EVENT_PREFIX):
+                raise ValueError(f"SSE event not supported (line `{repr(line)}`)")
+
+            if line.startswith(self._SSE_DATA_EVENT_DONE):
+                if self._ENABLE_CLASS_LOGS:
+                    logger.debug("[Done]")
+                return True
+
+            # If you reached here, the line should contain `data: {...}\n`
+            # where the curly braces contain a valid JSON object.
+            # It is now safe to do UTF8 decoding of the line.
+            line_str = line.decode("utf-8")
+
+            # Deserialize it into a StreamingChatCompletionsUpdate object
+            # and add it to the queue.
+            # pylint: disable=W0212 # Access to a protected member _deserialize of a client class
+            update = _models.StreamingChatCompletionsUpdate._deserialize(
+                json.loads(line_str[len(self._SSE_DATA_EVENT_PREFIX) : -1]), []
+            )
+
+            # We skip any update that has a None or empty choices list, and does not have token usage info.
+            # (this is what OpenAI Python SDK does)
+            if update.choices or update.usage:
+                self._queue.put(update)
+
+            if self._ENABLE_CLASS_LOGS:
+                logger.debug("[Added to queue]")
+
+        return False
+
+
+class StreamingChatCompletions(BaseStreamingChatCompletions):
+    """Represents an interator over StreamingChatCompletionsUpdate objects. It can be used for either synchronous or
+    asynchronous iterations. The class deserializes the Server Sent Events (SSE) response stream
+    into chat completions updates, each one represented by a StreamingChatCompletionsUpdate object.
+    """
+
+    def __init__(self, response: HttpResponse):
+        super().__init__()
+        self._response = response
+        self._bytes_iterator: Iterator[bytes] = response.iter_bytes()
+
+    def __iter__(self) -> Any:
+        return self
+
+    def __next__(self) -> "_models.StreamingChatCompletionsUpdate":
+        while self._queue.empty() and not self._done:
+            self._done = self._read_next_block()
+        if self._queue.empty():
+            raise StopIteration
+        return self._queue.get()
+
+    def _read_next_block(self) -> bool:
+        if self._ENABLE_CLASS_LOGS:
+            logger.debug("[Reading next block]")
+        try:
+            element = self._bytes_iterator.__next__()
+        except StopIteration:
+            self.close()
+            return True
+        return self._deserialize_and_add_to_queue(element)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:  # type: ignore
+        self.close()
+
+    def close(self) -> None:
+        self._response.close()
+
+
+class AsyncStreamingChatCompletions(BaseStreamingChatCompletions):
+    """Represents an async interator over StreamingChatCompletionsUpdate objects.
+    It can be used for either synchronous or asynchronous iterations. The class
+    deserializes the Server Sent Events (SSE) response stream into chat
+    completions updates, each one represented by a StreamingChatCompletionsUpdate object.
+    """
+
+    def __init__(self, response: AsyncHttpResponse):
+        super().__init__()
+        self._response = response
+        self._bytes_iterator: AsyncIterator[bytes] = response.iter_bytes()
+
+    def __aiter__(self) -> Any:
+        return self
+
+    async def __anext__(self) -> "_models.StreamingChatCompletionsUpdate":
+        while self._queue.empty() and not self._done:
+            self._done = await self._read_next_block_async()
+        if self._queue.empty():
+            raise StopAsyncIteration
+        return self._queue.get()
+
+    async def _read_next_block_async(self) -> bool:
+        if self._ENABLE_CLASS_LOGS:
+            logger.debug("[Reading next block]")
+        try:
+            element = await self._bytes_iterator.__anext__()
+        except StopAsyncIteration:
+            await self.aclose()
+            return True
+        return self._deserialize_and_add_to_queue(element)
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:  # type: ignore
+        await self.aclose()
+
+    async def aclose(self) -> None:
+        await self._response.close()
+
+
+class InputAudio(InputAudioGenerated):
+
+    @classmethod
+    def load(
+        cls,
+        *,
+        audio_file: str,
+        audio_format: str,
+    ) -> Self:
+        """
+        Create an InputAudio object from a local audio file. The method reads the audio
+        file and encodes it as a base64 string, which together with the audio format
+        is then used to create the InputAudio object passed to the request payload.
+
+        :keyword audio_file: The name of the local audio file to load. Required.
+        :vartype audio_file: str
+        :keyword audio_format: The MIME type format of the audio. For example: "wav", "mp3". Required.
+        :vartype audio_format: str
+        :return: An InputAudio object with the audio data encoded as a base64 string.
+        :rtype: ~azure.ai.inference.models.InputAudio
+        :raises FileNotFoundError: when the image file could not be opened.
+        """
+        with open(audio_file, "rb") as f:
+            audio_data = base64.b64encode(f.read()).decode("utf-8")
+        return cls(data=audio_data, format=audio_format)
+
+
+__all__: List[str] = [
+    "AssistantMessage",
+    "AsyncStreamingChatCompletions",
+    "ChatCompletions",
+    "ChatRequestMessage",
+    "EmbeddingsResult",
+    "ImageEmbeddingInput",
+    "ImageUrl",
+    "InputAudio",
+    "StreamingChatCompletions",
+    "SystemMessage",
+    "ToolMessage",
+    "UserMessage",
+    "DeveloperMessage",
+]  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """