aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/messages.py
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/openai/resources/chat/completions/messages.py')
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/chat/completions/messages.py212
1 files changed, 212 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/messages.py b/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/messages.py
new file mode 100644
index 00000000..fac15fba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/messages.py
@@ -0,0 +1,212 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.chat.completions import message_list_params
+from ....types.chat.chat_completion_store_message import ChatCompletionStoreMessage
+
+__all__ = ["Messages", "AsyncMessages"]
+
+
+class Messages(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> MessagesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return MessagesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> MessagesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return MessagesWithStreamingResponse(self)
+
+ def list(
+ self,
+ completion_id: str,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[ChatCompletionStoreMessage]:
+ """Get the messages in a stored chat completion.
+
+ Only Chat Completions that have
+ been created with the `store` parameter set to `true` will be returned.
+
+ Args:
+ after: Identifier for the last message from the previous pagination request.
+
+ limit: Number of messages to retrieve.
+
+ order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
+ for descending order. Defaults to `asc`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not completion_id:
+ raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
+ return self._get_api_list(
+ f"/chat/completions/{completion_id}/messages",
+ page=SyncCursorPage[ChatCompletionStoreMessage],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ message_list_params.MessageListParams,
+ ),
+ ),
+ model=ChatCompletionStoreMessage,
+ )
+
+
+class AsyncMessages(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncMessagesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncMessagesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncMessagesWithStreamingResponse(self)
+
+ def list(
+ self,
+ completion_id: str,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[ChatCompletionStoreMessage, AsyncCursorPage[ChatCompletionStoreMessage]]:
+ """Get the messages in a stored chat completion.
+
+ Only Chat Completions that have
+ been created with the `store` parameter set to `true` will be returned.
+
+ Args:
+ after: Identifier for the last message from the previous pagination request.
+
+ limit: Number of messages to retrieve.
+
+ order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
+ for descending order. Defaults to `asc`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not completion_id:
+ raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
+ return self._get_api_list(
+ f"/chat/completions/{completion_id}/messages",
+ page=AsyncCursorPage[ChatCompletionStoreMessage],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ message_list_params.MessageListParams,
+ ),
+ ),
+ model=ChatCompletionStoreMessage,
+ )
+
+
+class MessagesWithRawResponse:
+ def __init__(self, messages: Messages) -> None:
+ self._messages = messages
+
+ self.list = _legacy_response.to_raw_response_wrapper(
+ messages.list,
+ )
+
+
+class AsyncMessagesWithRawResponse:
+ def __init__(self, messages: AsyncMessages) -> None:
+ self._messages = messages
+
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ messages.list,
+ )
+
+
+class MessagesWithStreamingResponse:
+ def __init__(self, messages: Messages) -> None:
+ self._messages = messages
+
+ self.list = to_streamed_response_wrapper(
+ messages.list,
+ )
+
+
+class AsyncMessagesWithStreamingResponse:
+ def __init__(self, messages: AsyncMessages) -> None:
+ self._messages = messages
+
+ self.list = async_to_streamed_response_wrapper(
+ messages.list,
+ )