about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/openai/cli
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/openai/cli')
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/__init__.py1
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/__init__.py1
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/_main.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/audio.py108
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/chat/__init__.py13
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/chat/completions.py160
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/completions.py173
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/files.py80
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/image.py139
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/models.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_cli.py233
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_errors.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_models.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_progress.py59
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_tools/__init__.py1
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_tools/_main.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_tools/fine_tunes.py63
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_tools/migrate.py164
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_utils.py45
19 files changed, 1356 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/__init__.py b/.venv/lib/python3.12/site-packages/openai/cli/__init__.py
new file mode 100644
index 00000000..d453d5e1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/__init__.py
@@ -0,0 +1 @@
+from ._cli import main as main
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/__init__.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/__init__.py
new file mode 100644
index 00000000..56a0260a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/__init__.py
@@ -0,0 +1 @@
+from ._main import register_commands as register_commands
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/_main.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/_main.py
new file mode 100644
index 00000000..fe5a5e6f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/_main.py
@@ -0,0 +1,16 @@
+from __future__ import annotations
+
+from argparse import ArgumentParser
+
+from . import chat, audio, files, image, models, completions
+
+
+def register_commands(parser: ArgumentParser) -> None:
+    subparsers = parser.add_subparsers(help="All API subcommands")
+
+    chat.register(subparsers)
+    image.register(subparsers)
+    audio.register(subparsers)
+    files.register(subparsers)
+    models.register(subparsers)
+    completions.register(subparsers)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/audio.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/audio.py
new file mode 100644
index 00000000..269c67df
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/audio.py
@@ -0,0 +1,108 @@
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING, Any, Optional, cast
+from argparse import ArgumentParser
+
+from .._utils import get_client, print_model
+from ..._types import NOT_GIVEN
+from .._models import BaseModel
+from .._progress import BufferReader
+from ...types.audio import Transcription
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    # transcriptions
+    sub = subparser.add_parser("audio.transcriptions.create")
+
+    # Required
+    sub.add_argument("-m", "--model", type=str, default="whisper-1")
+    sub.add_argument("-f", "--file", type=str, required=True)
+    # Optional
+    sub.add_argument("--response-format", type=str)
+    sub.add_argument("--language", type=str)
+    sub.add_argument("-t", "--temperature", type=float)
+    sub.add_argument("--prompt", type=str)
+    sub.set_defaults(func=CLIAudio.transcribe, args_model=CLITranscribeArgs)
+
+    # translations
+    sub = subparser.add_parser("audio.translations.create")
+
+    # Required
+    sub.add_argument("-f", "--file", type=str, required=True)
+    # Optional
+    sub.add_argument("-m", "--model", type=str, default="whisper-1")
+    sub.add_argument("--response-format", type=str)
+    # TODO: doesn't seem to be supported by the API
+    # sub.add_argument("--language", type=str)
+    sub.add_argument("-t", "--temperature", type=float)
+    sub.add_argument("--prompt", type=str)
+    sub.set_defaults(func=CLIAudio.translate, args_model=CLITranslationArgs)
+
+
+class CLITranscribeArgs(BaseModel):
+    model: str
+    file: str
+    response_format: Optional[str] = None
+    language: Optional[str] = None
+    temperature: Optional[float] = None
+    prompt: Optional[str] = None
+
+
+class CLITranslationArgs(BaseModel):
+    model: str
+    file: str
+    response_format: Optional[str] = None
+    language: Optional[str] = None
+    temperature: Optional[float] = None
+    prompt: Optional[str] = None
+
+
+class CLIAudio:
+    @staticmethod
+    def transcribe(args: CLITranscribeArgs) -> None:
+        with open(args.file, "rb") as file_reader:
+            buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
+
+        model = cast(
+            "Transcription | str",
+            get_client().audio.transcriptions.create(
+                file=(args.file, buffer_reader),
+                model=args.model,
+                language=args.language or NOT_GIVEN,
+                temperature=args.temperature or NOT_GIVEN,
+                prompt=args.prompt or NOT_GIVEN,
+                # casts required because the API is typed for enums
+                # but we don't want to validate that here for forwards-compat
+                response_format=cast(Any, args.response_format),
+            ),
+        )
+        if isinstance(model, str):
+            sys.stdout.write(model + "\n")
+        else:
+            print_model(model)
+
+    @staticmethod
+    def translate(args: CLITranslationArgs) -> None:
+        with open(args.file, "rb") as file_reader:
+            buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
+
+        model = cast(
+            "Transcription | str",
+            get_client().audio.translations.create(
+                file=(args.file, buffer_reader),
+                model=args.model,
+                temperature=args.temperature or NOT_GIVEN,
+                prompt=args.prompt or NOT_GIVEN,
+                # casts required because the API is typed for enums
+                # but we don't want to validate that here for forwards-compat
+                response_format=cast(Any, args.response_format),
+            ),
+        )
+        if isinstance(model, str):
+            sys.stdout.write(model + "\n")
+        else:
+            print_model(model)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/__init__.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/__init__.py
new file mode 100644
index 00000000..87d97163
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/__init__.py
@@ -0,0 +1,13 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from argparse import ArgumentParser
+
+from . import completions
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    completions.register(subparser)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/completions.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/completions.py
new file mode 100644
index 00000000..344eeff3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/completions.py
@@ -0,0 +1,160 @@
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING, List, Optional, cast
+from argparse import ArgumentParser
+from typing_extensions import Literal, NamedTuple
+
+from ..._utils import get_client
+from ..._models import BaseModel
+from ...._streaming import Stream
+from ....types.chat import (
+    ChatCompletionRole,
+    ChatCompletionChunk,
+    CompletionCreateParams,
+)
+from ....types.chat.completion_create_params import (
+    CompletionCreateParamsStreaming,
+    CompletionCreateParamsNonStreaming,
+)
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("chat.completions.create")
+
+    sub._action_groups.pop()
+    req = sub.add_argument_group("required arguments")
+    opt = sub.add_argument_group("optional arguments")
+
+    req.add_argument(
+        "-g",
+        "--message",
+        action="append",
+        nargs=2,
+        metavar=("ROLE", "CONTENT"),
+        help="A message in `{role} {content}` format. Use this argument multiple times to add multiple messages.",
+        required=True,
+    )
+    req.add_argument(
+        "-m",
+        "--model",
+        help="The model to use.",
+        required=True,
+    )
+
+    opt.add_argument(
+        "-n",
+        "--n",
+        help="How many completions to generate for the conversation.",
+        type=int,
+    )
+    opt.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate.", type=int)
+    opt.add_argument(
+        "-t",
+        "--temperature",
+        help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+
+Mutually exclusive with `top_p`.""",
+        type=float,
+    )
+    opt.add_argument(
+        "-P",
+        "--top_p",
+        help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
+
+            Mutually exclusive with `temperature`.""",
+        type=float,
+    )
+    opt.add_argument(
+        "--stop",
+        help="A stop sequence at which to stop generating tokens for the message.",
+    )
+    opt.add_argument("--stream", help="Stream messages as they're ready.", action="store_true")
+    sub.set_defaults(func=CLIChatCompletion.create, args_model=CLIChatCompletionCreateArgs)
+
+
+class CLIMessage(NamedTuple):
+    role: ChatCompletionRole
+    content: str
+
+
+class CLIChatCompletionCreateArgs(BaseModel):
+    message: List[CLIMessage]
+    model: str
+    n: Optional[int] = None
+    max_tokens: Optional[int] = None
+    temperature: Optional[float] = None
+    top_p: Optional[float] = None
+    stop: Optional[str] = None
+    stream: bool = False
+
+
+class CLIChatCompletion:
+    @staticmethod
+    def create(args: CLIChatCompletionCreateArgs) -> None:
+        params: CompletionCreateParams = {
+            "model": args.model,
+            "messages": [
+                {"role": cast(Literal["user"], message.role), "content": message.content} for message in args.message
+            ],
+            # type checkers are not good at inferring union types so we have to set stream afterwards
+            "stream": False,
+        }
+        if args.temperature is not None:
+            params["temperature"] = args.temperature
+        if args.stop is not None:
+            params["stop"] = args.stop
+        if args.top_p is not None:
+            params["top_p"] = args.top_p
+        if args.n is not None:
+            params["n"] = args.n
+        if args.stream:
+            params["stream"] = args.stream  # type: ignore
+        if args.max_tokens is not None:
+            params["max_tokens"] = args.max_tokens
+
+        if args.stream:
+            return CLIChatCompletion._stream_create(cast(CompletionCreateParamsStreaming, params))
+
+        return CLIChatCompletion._create(cast(CompletionCreateParamsNonStreaming, params))
+
+    @staticmethod
+    def _create(params: CompletionCreateParamsNonStreaming) -> None:
+        completion = get_client().chat.completions.create(**params)
+        should_print_header = len(completion.choices) > 1
+        for choice in completion.choices:
+            if should_print_header:
+                sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
+
+            content = choice.message.content if choice.message.content is not None else "None"
+            sys.stdout.write(content)
+
+            if should_print_header or not content.endswith("\n"):
+                sys.stdout.write("\n")
+
+            sys.stdout.flush()
+
+    @staticmethod
+    def _stream_create(params: CompletionCreateParamsStreaming) -> None:
+        # cast is required for mypy
+        stream = cast(  # pyright: ignore[reportUnnecessaryCast]
+            Stream[ChatCompletionChunk], get_client().chat.completions.create(**params)
+        )
+        for chunk in stream:
+            should_print_header = len(chunk.choices) > 1
+            for choice in chunk.choices:
+                if should_print_header:
+                    sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
+
+                content = choice.delta.content or ""
+                sys.stdout.write(content)
+
+                if should_print_header:
+                    sys.stdout.write("\n")
+
+                sys.stdout.flush()
+
+        sys.stdout.write("\n")
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/completions.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/completions.py
new file mode 100644
index 00000000..cbdb35bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/completions.py
@@ -0,0 +1,173 @@
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING, Optional, cast
+from argparse import ArgumentParser
+from functools import partial
+
+from openai.types.completion import Completion
+
+from .._utils import get_client
+from ..._types import NOT_GIVEN, NotGivenOr
+from ..._utils import is_given
+from .._errors import CLIError
+from .._models import BaseModel
+from ..._streaming import Stream
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("completions.create")
+
+    # Required
+    sub.add_argument(
+        "-m",
+        "--model",
+        help="The model to use",
+        required=True,
+    )
+
+    # Optional
+    sub.add_argument("-p", "--prompt", help="An optional prompt to complete from")
+    sub.add_argument("--stream", help="Stream tokens as they're ready.", action="store_true")
+    sub.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate", type=int)
+    sub.add_argument(
+        "-t",
+        "--temperature",
+        help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+
+Mutually exclusive with `top_p`.""",
+        type=float,
+    )
+    sub.add_argument(
+        "-P",
+        "--top_p",
+        help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
+
+            Mutually exclusive with `temperature`.""",
+        type=float,
+    )
+    sub.add_argument(
+        "-n",
+        "--n",
+        help="How many sub-completions to generate for each prompt.",
+        type=int,
+    )
+    sub.add_argument(
+        "--logprobs",
+        help="Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.",
+        type=int,
+    )
+    sub.add_argument(
+        "--best_of",
+        help="Generates `best_of` completions server-side and returns the 'best' (the one with the highest log probability per token). Results cannot be streamed.",
+        type=int,
+    )
+    sub.add_argument(
+        "--echo",
+        help="Echo back the prompt in addition to the completion",
+        action="store_true",
+    )
+    sub.add_argument(
+        "--frequency_penalty",
+        help="Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
+        type=float,
+    )
+    sub.add_argument(
+        "--presence_penalty",
+        help="Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
+        type=float,
+    )
+    sub.add_argument("--suffix", help="The suffix that comes after a completion of inserted text.")
+    sub.add_argument("--stop", help="A stop sequence at which to stop generating tokens.")
+    sub.add_argument(
+        "--user",
+        help="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.",
+    )
+    # TODO: add support for logit_bias
+    sub.set_defaults(func=CLICompletions.create, args_model=CLICompletionCreateArgs)
+
+
+class CLICompletionCreateArgs(BaseModel):
+    model: str
+    stream: bool = False
+
+    prompt: Optional[str] = None
+    n: NotGivenOr[int] = NOT_GIVEN
+    stop: NotGivenOr[str] = NOT_GIVEN
+    user: NotGivenOr[str] = NOT_GIVEN
+    echo: NotGivenOr[bool] = NOT_GIVEN
+    suffix: NotGivenOr[str] = NOT_GIVEN
+    best_of: NotGivenOr[int] = NOT_GIVEN
+    top_p: NotGivenOr[float] = NOT_GIVEN
+    logprobs: NotGivenOr[int] = NOT_GIVEN
+    max_tokens: NotGivenOr[int] = NOT_GIVEN
+    temperature: NotGivenOr[float] = NOT_GIVEN
+    presence_penalty: NotGivenOr[float] = NOT_GIVEN
+    frequency_penalty: NotGivenOr[float] = NOT_GIVEN
+
+
+class CLICompletions:
+    @staticmethod
+    def create(args: CLICompletionCreateArgs) -> None:
+        if is_given(args.n) and args.n > 1 and args.stream:
+            raise CLIError("Can't stream completions with n>1 with the current CLI")
+
+        make_request = partial(
+            get_client().completions.create,
+            n=args.n,
+            echo=args.echo,
+            stop=args.stop,
+            user=args.user,
+            model=args.model,
+            top_p=args.top_p,
+            prompt=args.prompt,
+            suffix=args.suffix,
+            best_of=args.best_of,
+            logprobs=args.logprobs,
+            max_tokens=args.max_tokens,
+            temperature=args.temperature,
+            presence_penalty=args.presence_penalty,
+            frequency_penalty=args.frequency_penalty,
+        )
+
+        if args.stream:
+            return CLICompletions._stream_create(
+                # mypy doesn't understand the `partial` function but pyright does
+                cast(Stream[Completion], make_request(stream=True))  # pyright: ignore[reportUnnecessaryCast]
+            )
+
+        return CLICompletions._create(make_request())
+
+    @staticmethod
+    def _create(completion: Completion) -> None:
+        should_print_header = len(completion.choices) > 1
+        for choice in completion.choices:
+            if should_print_header:
+                sys.stdout.write("===== Completion {} =====\n".format(choice.index))
+
+            sys.stdout.write(choice.text)
+
+            if should_print_header or not choice.text.endswith("\n"):
+                sys.stdout.write("\n")
+
+            sys.stdout.flush()
+
+    @staticmethod
+    def _stream_create(stream: Stream[Completion]) -> None:
+        for completion in stream:
+            should_print_header = len(completion.choices) > 1
+            for choice in sorted(completion.choices, key=lambda c: c.index):
+                if should_print_header:
+                    sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
+
+                sys.stdout.write(choice.text)
+
+                if should_print_header:
+                    sys.stdout.write("\n")
+
+                sys.stdout.flush()
+
+        sys.stdout.write("\n")
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/files.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/files.py
new file mode 100644
index 00000000..5f3631b2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/files.py
@@ -0,0 +1,80 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, cast
+from argparse import ArgumentParser
+
+from .._utils import get_client, print_model
+from .._models import BaseModel
+from .._progress import BufferReader
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("files.create")
+
+    sub.add_argument(
+        "-f",
+        "--file",
+        required=True,
+        help="File to upload",
+    )
+    sub.add_argument(
+        "-p",
+        "--purpose",
+        help="Why are you uploading this file? (see https://platform.openai.com/docs/api-reference/ for purposes)",
+        required=True,
+    )
+    sub.set_defaults(func=CLIFile.create, args_model=CLIFileCreateArgs)
+
+    sub = subparser.add_parser("files.retrieve")
+    sub.add_argument("-i", "--id", required=True, help="The files ID")
+    sub.set_defaults(func=CLIFile.get, args_model=CLIFileCreateArgs)
+
+    sub = subparser.add_parser("files.delete")
+    sub.add_argument("-i", "--id", required=True, help="The files ID")
+    sub.set_defaults(func=CLIFile.delete, args_model=CLIFileCreateArgs)
+
+    sub = subparser.add_parser("files.list")
+    sub.set_defaults(func=CLIFile.list)
+
+
+class CLIFileIDArgs(BaseModel):
+    id: str
+
+
+class CLIFileCreateArgs(BaseModel):
+    file: str
+    purpose: str
+
+
+class CLIFile:
+    @staticmethod
+    def create(args: CLIFileCreateArgs) -> None:
+        with open(args.file, "rb") as file_reader:
+            buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
+
+        file = get_client().files.create(
+            file=(args.file, buffer_reader),
+            # casts required because the API is typed for enums
+            # but we don't want to validate that here for forwards-compat
+            purpose=cast(Any, args.purpose),
+        )
+        print_model(file)
+
+    @staticmethod
+    def get(args: CLIFileIDArgs) -> None:
+        file = get_client().files.retrieve(file_id=args.id)
+        print_model(file)
+
+    @staticmethod
+    def delete(args: CLIFileIDArgs) -> None:
+        file = get_client().files.delete(file_id=args.id)
+        print_model(file)
+
+    @staticmethod
+    def list() -> None:
+        files = get_client().files.list()
+        for file in files:
+            print_model(file)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/image.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/image.py
new file mode 100644
index 00000000..3e2a0a90
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/image.py
@@ -0,0 +1,139 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, cast
+from argparse import ArgumentParser
+
+from .._utils import get_client, print_model
+from ..._types import NOT_GIVEN, NotGiven, NotGivenOr
+from .._models import BaseModel
+from .._progress import BufferReader
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("images.generate")
+    sub.add_argument("-m", "--model", type=str)
+    sub.add_argument("-p", "--prompt", type=str, required=True)
+    sub.add_argument("-n", "--num-images", type=int, default=1)
+    sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
+    sub.add_argument("--response-format", type=str, default="url")
+    sub.set_defaults(func=CLIImage.create, args_model=CLIImageCreateArgs)
+
+    sub = subparser.add_parser("images.edit")
+    sub.add_argument("-m", "--model", type=str)
+    sub.add_argument("-p", "--prompt", type=str, required=True)
+    sub.add_argument("-n", "--num-images", type=int, default=1)
+    sub.add_argument(
+        "-I",
+        "--image",
+        type=str,
+        required=True,
+        help="Image to modify. Should be a local path and a PNG encoded image.",
+    )
+    sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
+    sub.add_argument("--response-format", type=str, default="url")
+    sub.add_argument(
+        "-M",
+        "--mask",
+        type=str,
+        required=False,
+        help="Path to a mask image. It should be the same size as the image you're editing and a RGBA PNG image. The Alpha channel acts as the mask.",
+    )
+    sub.set_defaults(func=CLIImage.edit, args_model=CLIImageEditArgs)
+
+    sub = subparser.add_parser("images.create_variation")
+    sub.add_argument("-m", "--model", type=str)
+    sub.add_argument("-n", "--num-images", type=int, default=1)
+    sub.add_argument(
+        "-I",
+        "--image",
+        type=str,
+        required=True,
+        help="Image to modify. Should be a local path and a PNG encoded image.",
+    )
+    sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
+    sub.add_argument("--response-format", type=str, default="url")
+    sub.set_defaults(func=CLIImage.create_variation, args_model=CLIImageCreateVariationArgs)
+
+
+class CLIImageCreateArgs(BaseModel):
+    prompt: str
+    num_images: int
+    size: str
+    response_format: str
+    model: NotGivenOr[str] = NOT_GIVEN
+
+
+class CLIImageCreateVariationArgs(BaseModel):
+    image: str
+    num_images: int
+    size: str
+    response_format: str
+    model: NotGivenOr[str] = NOT_GIVEN
+
+
+class CLIImageEditArgs(BaseModel):
+    image: str
+    num_images: int
+    size: str
+    response_format: str
+    prompt: str
+    mask: NotGivenOr[str] = NOT_GIVEN
+    model: NotGivenOr[str] = NOT_GIVEN
+
+
+class CLIImage:
+    @staticmethod
+    def create(args: CLIImageCreateArgs) -> None:
+        image = get_client().images.generate(
+            model=args.model,
+            prompt=args.prompt,
+            n=args.num_images,
+            # casts required because the API is typed for enums
+            # but we don't want to validate that here for forwards-compat
+            size=cast(Any, args.size),
+            response_format=cast(Any, args.response_format),
+        )
+        print_model(image)
+
+    @staticmethod
+    def create_variation(args: CLIImageCreateVariationArgs) -> None:
+        with open(args.image, "rb") as file_reader:
+            buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
+
+        image = get_client().images.create_variation(
+            model=args.model,
+            image=("image", buffer_reader),
+            n=args.num_images,
+            # casts required because the API is typed for enums
+            # but we don't want to validate that here for forwards-compat
+            size=cast(Any, args.size),
+            response_format=cast(Any, args.response_format),
+        )
+        print_model(image)
+
+    @staticmethod
+    def edit(args: CLIImageEditArgs) -> None:
+        with open(args.image, "rb") as file_reader:
+            buffer_reader = BufferReader(file_reader.read(), desc="Image upload progress")
+
+        if isinstance(args.mask, NotGiven):
+            mask: NotGivenOr[BufferReader] = NOT_GIVEN
+        else:
+            with open(args.mask, "rb") as file_reader:
+                mask = BufferReader(file_reader.read(), desc="Mask progress")
+
+        image = get_client().images.edit(
+            model=args.model,
+            prompt=args.prompt,
+            image=("image", buffer_reader),
+            n=args.num_images,
+            mask=("mask", mask) if not isinstance(mask, NotGiven) else mask,
+            # casts required because the API is typed for enums
+            # but we don't want to validate that here for forwards-compat
+            size=cast(Any, args.size),
+            response_format=cast(Any, args.response_format),
+        )
+        print_model(image)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/models.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/models.py
new file mode 100644
index 00000000..017218fa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/models.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from argparse import ArgumentParser
+
+from .._utils import get_client, print_model
+from .._models import BaseModel
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("models.list")
+    sub.set_defaults(func=CLIModels.list)
+
+    sub = subparser.add_parser("models.retrieve")
+    sub.add_argument("-i", "--id", required=True, help="The model ID")
+    sub.set_defaults(func=CLIModels.get, args_model=CLIModelIDArgs)
+
+    sub = subparser.add_parser("models.delete")
+    sub.add_argument("-i", "--id", required=True, help="The model ID")
+    sub.set_defaults(func=CLIModels.delete, args_model=CLIModelIDArgs)
+
+
+class CLIModelIDArgs(BaseModel):
+    id: str
+
+
+class CLIModels:
+    @staticmethod
+    def get(args: CLIModelIDArgs) -> None:
+        model = get_client().models.retrieve(model=args.id)
+        print_model(model)
+
+    @staticmethod
+    def delete(args: CLIModelIDArgs) -> None:
+        model = get_client().models.delete(model=args.id)
+        print_model(model)
+
+    @staticmethod
+    def list() -> None:
+        models = get_client().models.list()
+        for model in models:
+            print_model(model)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_cli.py b/.venv/lib/python3.12/site-packages/openai/cli/_cli.py
new file mode 100644
index 00000000..fd165f48
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_cli.py
@@ -0,0 +1,233 @@
+from __future__ import annotations
+
+import sys
+import logging
+import argparse
+from typing import Any, List, Type, Optional
+from typing_extensions import ClassVar
+
+import httpx
+import pydantic
+
+import openai
+
+from . import _tools
+from .. import _ApiType, __version__
+from ._api import register_commands
+from ._utils import can_use_http2
+from ._errors import CLIError, display_error
+from .._compat import PYDANTIC_V2, ConfigDict, model_parse
+from .._models import BaseModel
+from .._exceptions import APIError
+
+logger = logging.getLogger()
+formatter = logging.Formatter("[%(asctime)s] %(message)s")
+handler = logging.StreamHandler(sys.stderr)
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+
+
+class Arguments(BaseModel):
+    if PYDANTIC_V2:
+        model_config: ClassVar[ConfigDict] = ConfigDict(
+            extra="ignore",
+        )
+    else:
+
+        class Config(pydantic.BaseConfig):  # type: ignore
+            extra: Any = pydantic.Extra.ignore  # type: ignore
+
+    verbosity: int
+    version: Optional[str] = None
+
+    api_key: Optional[str]
+    api_base: Optional[str]
+    organization: Optional[str]
+    proxy: Optional[List[str]]
+    api_type: Optional[_ApiType] = None
+    api_version: Optional[str] = None
+
+    # azure
+    azure_endpoint: Optional[str] = None
+    azure_ad_token: Optional[str] = None
+
+    # internal, set by subparsers to parse their specific args
+    args_model: Optional[Type[BaseModel]] = None
+
+    # internal, used so that subparsers can forward unknown arguments
+    unknown_args: List[str] = []
+    allow_unknown_args: bool = False
+
+
+def _build_parser() -> argparse.ArgumentParser:
+    parser = argparse.ArgumentParser(description=None, prog="openai")
+    parser.add_argument(
+        "-v",
+        "--verbose",
+        action="count",
+        dest="verbosity",
+        default=0,
+        help="Set verbosity.",
+    )
+    parser.add_argument("-b", "--api-base", help="What API base url to use.")
+    parser.add_argument("-k", "--api-key", help="What API key to use.")
+    parser.add_argument("-p", "--proxy", nargs="+", help="What proxy to use.")
+    parser.add_argument(
+        "-o",
+        "--organization",
+        help="Which organization to run as (will use your default organization if not specified)",
+    )
+    parser.add_argument(
+        "-t",
+        "--api-type",
+        type=str,
+        choices=("openai", "azure"),
+        help="The backend API to call, must be `openai` or `azure`",
+    )
+    parser.add_argument(
+        "--api-version",
+        help="The Azure API version, e.g. 'https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning'",
+    )
+
+    # azure
+    parser.add_argument(
+        "--azure-endpoint",
+        help="The Azure endpoint, e.g. 'https://endpoint.openai.azure.com'",
+    )
+    parser.add_argument(
+        "--azure-ad-token",
+        help="A token from Azure Active Directory, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id",
+    )
+
+    # prints the package version
+    parser.add_argument(
+        "-V",
+        "--version",
+        action="version",
+        version="%(prog)s " + __version__,
+    )
+
+    def help() -> None:
+        parser.print_help()
+
+    parser.set_defaults(func=help)
+
+    subparsers = parser.add_subparsers()
+    sub_api = subparsers.add_parser("api", help="Direct API calls")
+
+    register_commands(sub_api)
+
+    sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience")
+    _tools.register_commands(sub_tools, subparsers)
+
+    return parser
+
+
+def main() -> int:
+    try:
+        _main()
+    except (APIError, CLIError, pydantic.ValidationError) as err:
+        display_error(err)
+        return 1
+    except KeyboardInterrupt:
+        sys.stderr.write("\n")
+        return 1
+    return 0
+
+
+def _parse_args(parser: argparse.ArgumentParser) -> tuple[argparse.Namespace, Arguments, list[str]]:
+    # argparse by default will strip out the `--` but we want to keep it for unknown arguments
+    if "--" in sys.argv:
+        idx = sys.argv.index("--")
+        known_args = sys.argv[1:idx]
+        unknown_args = sys.argv[idx:]
+    else:
+        known_args = sys.argv[1:]
+        unknown_args = []
+
+    parsed, remaining_unknown = parser.parse_known_args(known_args)
+
+    # append any remaining unknown arguments from the initial parsing
+    remaining_unknown.extend(unknown_args)
+
+    args = model_parse(Arguments, vars(parsed))
+    if not args.allow_unknown_args:
+        # we have to parse twice to ensure any unknown arguments
+        # result in an error if that behaviour is desired
+        parser.parse_args()
+
+    return parsed, args, remaining_unknown
+
+
+def _main() -> None:
+    parser = _build_parser()
+    parsed, args, unknown = _parse_args(parser)
+
+    if args.verbosity != 0:
+        sys.stderr.write("Warning: --verbosity isn't supported yet\n")
+
+    proxies: dict[str, httpx.BaseTransport] = {}
+    if args.proxy is not None:
+        for proxy in args.proxy:
+            key = "https://" if proxy.startswith("https") else "http://"
+            if key in proxies:
+                raise CLIError(f"Multiple {key} proxies given - only the last one would be used")
+
+            proxies[key] = httpx.HTTPTransport(proxy=httpx.Proxy(httpx.URL(proxy)))
+
+    http_client = httpx.Client(
+        mounts=proxies or None,
+        http2=can_use_http2(),
+    )
+    openai.http_client = http_client
+
+    if args.organization:
+        openai.organization = args.organization
+
+    if args.api_key:
+        openai.api_key = args.api_key
+
+    if args.api_base:
+        openai.base_url = args.api_base
+
+    # azure
+    if args.api_type is not None:
+        openai.api_type = args.api_type
+
+    if args.azure_endpoint is not None:
+        openai.azure_endpoint = args.azure_endpoint
+
+    if args.api_version is not None:
+        openai.api_version = args.api_version
+
+    if args.azure_ad_token is not None:
+        openai.azure_ad_token = args.azure_ad_token
+
+    try:
+        if args.args_model:
+            parsed.func(
+                model_parse(
+                    args.args_model,
+                    {
+                        **{
+                            # we omit None values so that they can be defaulted to `NotGiven`
+                            # and we'll strip it from the API request
+                            key: value
+                            for key, value in vars(parsed).items()
+                            if value is not None
+                        },
+                        "unknown_args": unknown,
+                    },
+                )
+            )
+        else:
+            parsed.func()
+    finally:
+        try:
+            http_client.close()
+        except Exception:
+            pass
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_errors.py b/.venv/lib/python3.12/site-packages/openai/cli/_errors.py
new file mode 100644
index 00000000..7d0292da
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_errors.py
@@ -0,0 +1,21 @@
+from __future__ import annotations
+
+import sys
+
+import pydantic
+
+from ._utils import Colors, organization_info
+from .._exceptions import APIError, OpenAIError
+
+
+class CLIError(OpenAIError): ...
+
+
+class SilentCLIError(CLIError): ...
+
+
+def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None:
+    if isinstance(err, SilentCLIError):
+        return
+
+    sys.stderr.write("{}{}Error:{} {}\n".format(organization_info(), Colors.FAIL, Colors.ENDC, err))
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_models.py b/.venv/lib/python3.12/site-packages/openai/cli/_models.py
new file mode 100644
index 00000000..5583db26
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_models.py
@@ -0,0 +1,17 @@
+from typing import Any
+from typing_extensions import ClassVar
+
+import pydantic
+
+from .. import _models
+from .._compat import PYDANTIC_V2, ConfigDict
+
+
+class BaseModel(_models.BaseModel):
+    if PYDANTIC_V2:
+        model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True)
+    else:
+
+        class Config(pydantic.BaseConfig):  # type: ignore
+            extra: Any = pydantic.Extra.ignore  # type: ignore
+            arbitrary_types_allowed: bool = True
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_progress.py b/.venv/lib/python3.12/site-packages/openai/cli/_progress.py
new file mode 100644
index 00000000..8a7f2525
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_progress.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+import io
+from typing import Callable
+from typing_extensions import override
+
+
+class CancelledError(Exception):
+    def __init__(self, msg: str) -> None:
+        self.msg = msg
+        super().__init__(msg)
+
+    @override
+    def __str__(self) -> str:
+        return self.msg
+
+    __repr__ = __str__
+
+
+class BufferReader(io.BytesIO):
+    def __init__(self, buf: bytes = b"", desc: str | None = None) -> None:
+        super().__init__(buf)
+        self._len = len(buf)
+        self._progress = 0
+        self._callback = progress(len(buf), desc=desc)
+
+    def __len__(self) -> int:
+        return self._len
+
+    @override
+    def read(self, n: int | None = -1) -> bytes:
+        chunk = io.BytesIO.read(self, n)
+        self._progress += len(chunk)
+
+        try:
+            self._callback(self._progress)
+        except Exception as e:  # catches exception from the callback
+            raise CancelledError("The upload was cancelled: {}".format(e)) from e
+
+        return chunk
+
+
+def progress(total: float, desc: str | None) -> Callable[[float], None]:
+    import tqdm
+
+    meter = tqdm.tqdm(total=total, unit_scale=True, desc=desc)
+
+    def incr(progress: float) -> None:
+        meter.n = progress
+        if progress == total:
+            meter.close()
+        else:
+            meter.refresh()
+
+    return incr
+
+
+def MB(i: int) -> int:
+    return int(i // 1024**2)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_tools/__init__.py b/.venv/lib/python3.12/site-packages/openai/cli/_tools/__init__.py
new file mode 100644
index 00000000..56a0260a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_tools/__init__.py
@@ -0,0 +1 @@
+from ._main import register_commands as register_commands
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_tools/_main.py b/.venv/lib/python3.12/site-packages/openai/cli/_tools/_main.py
new file mode 100644
index 00000000..bd6cda40
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_tools/_main.py
@@ -0,0 +1,17 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from argparse import ArgumentParser
+
+from . import migrate, fine_tunes
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register_commands(parser: ArgumentParser, subparser: _SubParsersAction[ArgumentParser]) -> None:
+    migrate.register(subparser)
+
+    namespaced = parser.add_subparsers(title="Tools", help="Convenience client side tools")
+
+    fine_tunes.register(namespaced)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_tools/fine_tunes.py b/.venv/lib/python3.12/site-packages/openai/cli/_tools/fine_tunes.py
new file mode 100644
index 00000000..2128b889
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_tools/fine_tunes.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING
+from argparse import ArgumentParser
+
+from .._models import BaseModel
+from ...lib._validators import (
+    get_validators,
+    write_out_file,
+    read_any_format,
+    apply_validators,
+    apply_necessary_remediation,
+)
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("fine_tunes.prepare_data")
+    sub.add_argument(
+        "-f",
+        "--file",
+        required=True,
+        help="JSONL, JSON, CSV, TSV, TXT or XLSX file containing prompt-completion examples to be analyzed."
+        "This should be the local file path.",
+    )
+    sub.add_argument(
+        "-q",
+        "--quiet",
+        required=False,
+        action="store_true",
+        help="Auto accepts all suggestions, without asking for user input. To be used within scripts.",
+    )
+    sub.set_defaults(func=prepare_data, args_model=PrepareDataArgs)
+
+
+class PrepareDataArgs(BaseModel):
+    file: str
+
+    quiet: bool
+
+
+def prepare_data(args: PrepareDataArgs) -> None:
+    sys.stdout.write("Analyzing...\n")
+    fname = args.file
+    auto_accept = args.quiet
+    df, remediation = read_any_format(fname)
+    apply_necessary_remediation(None, remediation)
+
+    validators = get_validators()
+
+    assert df is not None
+
+    apply_validators(
+        df,
+        fname,
+        remediation,
+        validators,
+        auto_accept,
+        write_out_file_func=write_out_file,
+    )
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_tools/migrate.py b/.venv/lib/python3.12/site-packages/openai/cli/_tools/migrate.py
new file mode 100644
index 00000000..841b7775
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_tools/migrate.py
@@ -0,0 +1,164 @@
+from __future__ import annotations
+
+import os
+import sys
+import shutil
+import tarfile
+import platform
+import subprocess
+from typing import TYPE_CHECKING, List
+from pathlib import Path
+from argparse import ArgumentParser
+
+import httpx
+
+from .._errors import CLIError, SilentCLIError
+from .._models import BaseModel
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("migrate")
+    sub.set_defaults(func=migrate, args_model=MigrateArgs, allow_unknown_args=True)
+
+    sub = subparser.add_parser("grit")
+    sub.set_defaults(func=grit, args_model=GritArgs, allow_unknown_args=True)
+
+
+class GritArgs(BaseModel):
+    # internal
+    unknown_args: List[str] = []
+
+
+def grit(args: GritArgs) -> None:
+    grit_path = install()
+
+    try:
+        subprocess.check_call([grit_path, *args.unknown_args])
+    except subprocess.CalledProcessError:
+        # stdout and stderr are forwarded by subprocess so an error will already
+        # have been displayed
+        raise SilentCLIError() from None
+
+
+class MigrateArgs(BaseModel):
+    # internal
+    unknown_args: List[str] = []
+
+
+def migrate(args: MigrateArgs) -> None:
+    grit_path = install()
+
+    try:
+        subprocess.check_call([grit_path, "apply", "openai", *args.unknown_args])
+    except subprocess.CalledProcessError:
+        # stdout and stderr are forwarded by subprocess so an error will already
+        # have been displayed
+        raise SilentCLIError() from None
+
+
+# handles downloading the Grit CLI until they provide their own PyPi package
+
+KEYGEN_ACCOUNT = "custodian-dev"
+
+
+def _cache_dir() -> Path:
+    xdg = os.environ.get("XDG_CACHE_HOME")
+    if xdg is not None:
+        return Path(xdg)
+
+    return Path.home() / ".cache"
+
+
+def _debug(message: str) -> None:
+    if not os.environ.get("DEBUG"):
+        return
+
+    sys.stdout.write(f"[DEBUG]: {message}\n")
+
+
+def install() -> Path:
+    """Installs the Grit CLI and returns the location of the binary"""
+    if sys.platform == "win32":
+        raise CLIError("Windows is not supported yet in the migration CLI")
+
+    _debug("Using Grit installer from GitHub")
+
+    platform = "apple-darwin" if sys.platform == "darwin" else "unknown-linux-gnu"
+
+    dir_name = _cache_dir() / "openai-python"
+    install_dir = dir_name / ".install"
+    target_dir = install_dir / "bin"
+
+    target_path = target_dir / "grit"
+    temp_file = target_dir / "grit.tmp"
+
+    if target_path.exists():
+        _debug(f"{target_path} already exists")
+        sys.stdout.flush()
+        return target_path
+
+    _debug(f"Using Grit CLI path: {target_path}")
+
+    target_dir.mkdir(parents=True, exist_ok=True)
+
+    if temp_file.exists():
+        temp_file.unlink()
+
+    arch = _get_arch()
+    _debug(f"Using architecture {arch}")
+
+    file_name = f"grit-{arch}-{platform}"
+    download_url = f"https://github.com/getgrit/gritql/releases/latest/download/{file_name}.tar.gz"
+
+    sys.stdout.write(f"Downloading Grit CLI from {download_url}\n")
+    with httpx.Client() as client:
+        download_response = client.get(download_url, follow_redirects=True)
+        if download_response.status_code != 200:
+            raise CLIError(f"Failed to download Grit CLI from {download_url}")
+        with open(temp_file, "wb") as file:
+            for chunk in download_response.iter_bytes():
+                file.write(chunk)
+
+    unpacked_dir = target_dir / "cli-bin"
+    unpacked_dir.mkdir(parents=True, exist_ok=True)
+
+    with tarfile.open(temp_file, "r:gz") as archive:
+        if sys.version_info >= (3, 12):
+            archive.extractall(unpacked_dir, filter="data")
+        else:
+            archive.extractall(unpacked_dir)
+
+    _move_files_recursively(unpacked_dir, target_dir)
+
+    shutil.rmtree(unpacked_dir)
+    os.remove(temp_file)
+    os.chmod(target_path, 0o755)
+
+    sys.stdout.flush()
+
+    return target_path
+
+
+def _move_files_recursively(source_dir: Path, target_dir: Path) -> None:
+    for item in source_dir.iterdir():
+        if item.is_file():
+            item.rename(target_dir / item.name)
+        elif item.is_dir():
+            _move_files_recursively(item, target_dir)
+
+
+def _get_arch() -> str:
+    architecture = platform.machine().lower()
+
+    # Map the architecture names to Grit equivalents
+    arch_map = {
+        "x86_64": "x86_64",
+        "amd64": "x86_64",
+        "armv7l": "aarch64",
+        "arm64": "aarch64",
+    }
+
+    return arch_map.get(architecture, architecture)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_utils.py b/.venv/lib/python3.12/site-packages/openai/cli/_utils.py
new file mode 100644
index 00000000..673eed61
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_utils.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+import sys
+
+import openai
+
+from .. import OpenAI, _load_client
+from .._compat import model_json
+from .._models import BaseModel
+
+
+class Colors:
+    HEADER = "\033[95m"
+    OKBLUE = "\033[94m"
+    OKGREEN = "\033[92m"
+    WARNING = "\033[93m"
+    FAIL = "\033[91m"
+    ENDC = "\033[0m"
+    BOLD = "\033[1m"
+    UNDERLINE = "\033[4m"
+
+
+def get_client() -> OpenAI:
+    return _load_client()
+
+
+def organization_info() -> str:
+    organization = openai.organization
+    if organization is not None:
+        return "[organization={}] ".format(organization)
+
+    return ""
+
+
+def print_model(model: BaseModel) -> None:
+    sys.stdout.write(model_json(model, indent=2) + "\n")
+
+
+def can_use_http2() -> bool:
+    try:
+        import h2  # type: ignore  # noqa
+    except ImportError:
+        return False
+
+    return True