aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/openai/resources/vector_stores
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/openai/resources/vector_stores
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are hereHEADmaster
Diffstat (limited to '.venv/lib/python3.12/site-packages/openai/resources/vector_stores')
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/vector_stores/__init__.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/vector_stores/file_batches.py801
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/vector_stores/files.py933
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/vector_stores/vector_stores.py868
4 files changed, 2649 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/__init__.py
new file mode 100644
index 00000000..96ae16c3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
+from .file_batches import (
+ FileBatches,
+ AsyncFileBatches,
+ FileBatchesWithRawResponse,
+ AsyncFileBatchesWithRawResponse,
+ FileBatchesWithStreamingResponse,
+ AsyncFileBatchesWithStreamingResponse,
+)
+from .vector_stores import (
+ VectorStores,
+ AsyncVectorStores,
+ VectorStoresWithRawResponse,
+ AsyncVectorStoresWithRawResponse,
+ VectorStoresWithStreamingResponse,
+ AsyncVectorStoresWithStreamingResponse,
+)
+
+__all__ = [
+ "Files",
+ "AsyncFiles",
+ "FilesWithRawResponse",
+ "AsyncFilesWithRawResponse",
+ "FilesWithStreamingResponse",
+ "AsyncFilesWithStreamingResponse",
+ "FileBatches",
+ "AsyncFileBatches",
+ "FileBatchesWithRawResponse",
+ "AsyncFileBatchesWithRawResponse",
+ "FileBatchesWithStreamingResponse",
+ "AsyncFileBatchesWithStreamingResponse",
+ "VectorStores",
+ "AsyncVectorStores",
+ "VectorStoresWithRawResponse",
+ "AsyncVectorStoresWithRawResponse",
+ "VectorStoresWithStreamingResponse",
+ "AsyncVectorStoresWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/file_batches.py b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/file_batches.py
new file mode 100644
index 00000000..9b4b64d3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/file_batches.py
@@ -0,0 +1,801 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import asyncio
+from typing import Dict, List, Iterable, Optional
+from typing_extensions import Union, Literal
+from concurrent.futures import Future, ThreadPoolExecutor, as_completed
+
+import httpx
+import sniffio
+
+from ... import _legacy_response
+from ...types import FileChunkingStrategyParam
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ..._utils import (
+ is_given,
+ maybe_transform,
+ async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncCursorPage, AsyncCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.file_object import FileObject
+from ...types.vector_stores import file_batch_create_params, file_batch_list_files_params
+from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
+from ...types.vector_stores.vector_store_file import VectorStoreFile
+from ...types.vector_stores.vector_store_file_batch import VectorStoreFileBatch
+
+__all__ = ["FileBatches", "AsyncFileBatches"]
+
+
+class FileBatches(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> FileBatchesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return FileBatchesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> FileBatchesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return FileBatchesWithStreamingResponse(self)
+
+ def create(
+ self,
+ vector_store_id: str,
+ *,
+ file_ids: List[str],
+ attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """
+ Create a vector store file batch.
+
+ Args:
+ file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+
+ attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters, booleans, or numbers.
+
+ chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ strategy. Only applicable if `file_ids` is non-empty.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._post(
+ f"/vector_stores/{vector_store_id}/file_batches",
+ body=maybe_transform(
+ {
+ "file_ids": file_ids,
+ "attributes": attributes,
+ "chunking_strategy": chunking_strategy,
+ },
+ file_batch_create_params.FileBatchCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ def retrieve(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """
+ Retrieves a vector store file batch.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ def cancel(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Cancel a vector store file batch.
+
+ This attempts to cancel the processing of
+ files in this batch as soon as possible.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._post(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ def create_and_poll(
+ self,
+ vector_store_id: str,
+ *,
+ file_ids: List[str],
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Create a vector store batch and poll until all files have been processed."""
+ batch = self.create(
+ vector_store_id=vector_store_id,
+ file_ids=file_ids,
+ chunking_strategy=chunking_strategy,
+ )
+ # TODO: don't poll unless necessary??
+ return self.poll(
+ batch.id,
+ vector_store_id=vector_store_id,
+ poll_interval_ms=poll_interval_ms,
+ )
+
+ def list_files(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ after: str | NotGiven = NOT_GIVEN,
+ before: str | NotGiven = NOT_GIVEN,
+ filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[VectorStoreFile]:
+ """
+ Returns a list of vector store files in a batch.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
+
+ filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ page=SyncCursorPage[VectorStoreFile],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "filter": filter,
+ "limit": limit,
+ "order": order,
+ },
+ file_batch_list_files_params.FileBatchListFilesParams,
+ ),
+ ),
+ model=VectorStoreFile,
+ )
+
+ def poll(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Wait for the given file batch to be processed.
+
+ Note: this will return even if one of the files failed to process, you need to
+ check batch.file_counts.failed_count to handle this case.
+ """
+ headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+ if is_given(poll_interval_ms):
+ headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+ while True:
+ response = self.with_raw_response.retrieve(
+ batch_id,
+ vector_store_id=vector_store_id,
+ extra_headers=headers,
+ )
+
+ batch = response.parse()
+ if batch.file_counts.in_progress > 0:
+ if not is_given(poll_interval_ms):
+ from_header = response.headers.get("openai-poll-after-ms")
+ if from_header is not None:
+ poll_interval_ms = int(from_header)
+ else:
+ poll_interval_ms = 1000
+
+ self._sleep(poll_interval_ms / 1000)
+ continue
+
+ return batch
+
+ def upload_and_poll(
+ self,
+ vector_store_id: str,
+ *,
+ files: Iterable[FileTypes],
+ max_concurrency: int = 5,
+ file_ids: List[str] = [],
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Uploads the given files concurrently and then creates a vector store file batch.
+
+ If you've already uploaded certain files that you want to include in this batch
+ then you can pass their IDs through the `file_ids` argument.
+
+ By default, if any file upload fails then an exception will be eagerly raised.
+
+ The number of concurrency uploads is configurable using the `max_concurrency`
+ parameter.
+
+ Note: this method only supports `asyncio` or `trio` as the backing async
+ runtime.
+ """
+ results: list[FileObject] = []
+
+ with ThreadPoolExecutor(max_workers=max_concurrency) as executor:
+ futures: list[Future[FileObject]] = [
+ executor.submit(
+ self._client.files.create,
+ file=file,
+ purpose="assistants",
+ )
+ for file in files
+ ]
+
+ for future in as_completed(futures):
+ exc = future.exception()
+ if exc:
+ raise exc
+
+ results.append(future.result())
+
+ batch = self.create_and_poll(
+ vector_store_id=vector_store_id,
+ file_ids=[*file_ids, *(f.id for f in results)],
+ poll_interval_ms=poll_interval_ms,
+ chunking_strategy=chunking_strategy,
+ )
+ return batch
+
+
+class AsyncFileBatches(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncFileBatchesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncFileBatchesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncFileBatchesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncFileBatchesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ vector_store_id: str,
+ *,
+ file_ids: List[str],
+ attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """
+ Create a vector store file batch.
+
+ Args:
+ file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+
+ attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters, booleans, or numbers.
+
+ chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ strategy. Only applicable if `file_ids` is non-empty.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._post(
+ f"/vector_stores/{vector_store_id}/file_batches",
+ body=await async_maybe_transform(
+ {
+ "file_ids": file_ids,
+ "attributes": attributes,
+ "chunking_strategy": chunking_strategy,
+ },
+ file_batch_create_params.FileBatchCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ async def retrieve(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """
+ Retrieves a vector store file batch.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._get(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ async def cancel(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Cancel a vector store file batch.
+
+ This attempts to cancel the processing of
+ files in this batch as soon as possible.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._post(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ async def create_and_poll(
+ self,
+ vector_store_id: str,
+ *,
+ file_ids: List[str],
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Create a vector store batch and poll until all files have been processed."""
+ batch = await self.create(
+ vector_store_id=vector_store_id,
+ file_ids=file_ids,
+ chunking_strategy=chunking_strategy,
+ )
+ # TODO: don't poll unless necessary??
+ return await self.poll(
+ batch.id,
+ vector_store_id=vector_store_id,
+ poll_interval_ms=poll_interval_ms,
+ )
+
+ def list_files(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ after: str | NotGiven = NOT_GIVEN,
+ before: str | NotGiven = NOT_GIVEN,
+ filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[VectorStoreFile, AsyncCursorPage[VectorStoreFile]]:
+ """
+ Returns a list of vector store files in a batch.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
+
+ filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ page=AsyncCursorPage[VectorStoreFile],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "filter": filter,
+ "limit": limit,
+ "order": order,
+ },
+ file_batch_list_files_params.FileBatchListFilesParams,
+ ),
+ ),
+ model=VectorStoreFile,
+ )
+
+ async def poll(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Wait for the given file batch to be processed.
+
+ Note: this will return even if one of the files failed to process, you need to
+ check batch.file_counts.failed_count to handle this case.
+ """
+ headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+ if is_given(poll_interval_ms):
+ headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+ while True:
+ response = await self.with_raw_response.retrieve(
+ batch_id,
+ vector_store_id=vector_store_id,
+ extra_headers=headers,
+ )
+
+ batch = response.parse()
+ if batch.file_counts.in_progress > 0:
+ if not is_given(poll_interval_ms):
+ from_header = response.headers.get("openai-poll-after-ms")
+ if from_header is not None:
+ poll_interval_ms = int(from_header)
+ else:
+ poll_interval_ms = 1000
+
+ await self._sleep(poll_interval_ms / 1000)
+ continue
+
+ return batch
+
+ async def upload_and_poll(
+ self,
+ vector_store_id: str,
+ *,
+ files: Iterable[FileTypes],
+ max_concurrency: int = 5,
+ file_ids: List[str] = [],
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Uploads the given files concurrently and then creates a vector store file batch.
+
+ If you've already uploaded certain files that you want to include in this batch
+ then you can pass their IDs through the `file_ids` argument.
+
+ By default, if any file upload fails then an exception will be eagerly raised.
+
+ The number of concurrency uploads is configurable using the `max_concurrency`
+ parameter.
+
+ Note: this method only supports `asyncio` or `trio` as the backing async
+ runtime.
+ """
+ uploaded_files: list[FileObject] = []
+
+ async_library = sniffio.current_async_library()
+
+ if async_library == "asyncio":
+
+ async def asyncio_upload_file(semaphore: asyncio.Semaphore, file: FileTypes) -> None:
+ async with semaphore:
+ file_obj = await self._client.files.create(
+ file=file,
+ purpose="assistants",
+ )
+ uploaded_files.append(file_obj)
+
+ semaphore = asyncio.Semaphore(max_concurrency)
+
+ tasks = [asyncio_upload_file(semaphore, file) for file in files]
+
+ await asyncio.gather(*tasks)
+ elif async_library == "trio":
+ # We only import if the library is being used.
+ # We support Python 3.7 so are using an older version of trio that does not have type information
+ import trio # type: ignore # pyright: ignore[reportMissingTypeStubs]
+
+ async def trio_upload_file(limiter: trio.CapacityLimiter, file: FileTypes) -> None:
+ async with limiter:
+ file_obj = await self._client.files.create(
+ file=file,
+ purpose="assistants",
+ )
+ uploaded_files.append(file_obj)
+
+ limiter = trio.CapacityLimiter(max_concurrency)
+
+ async with trio.open_nursery() as nursery:
+ for file in files:
+ nursery.start_soon(trio_upload_file, limiter, file) # pyright: ignore [reportUnknownMemberType]
+ else:
+ raise RuntimeError(
+ f"Async runtime {async_library} is not supported yet. Only asyncio or trio is supported",
+ )
+
+ batch = await self.create_and_poll(
+ vector_store_id=vector_store_id,
+ file_ids=[*file_ids, *(f.id for f in uploaded_files)],
+ poll_interval_ms=poll_interval_ms,
+ chunking_strategy=chunking_strategy,
+ )
+ return batch
+
+
+class FileBatchesWithRawResponse:
+ def __init__(self, file_batches: FileBatches) -> None:
+ self._file_batches = file_batches
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ file_batches.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ file_batches.retrieve,
+ )
+ self.cancel = _legacy_response.to_raw_response_wrapper(
+ file_batches.cancel,
+ )
+ self.list_files = _legacy_response.to_raw_response_wrapper(
+ file_batches.list_files,
+ )
+
+
+class AsyncFileBatchesWithRawResponse:
+ def __init__(self, file_batches: AsyncFileBatches) -> None:
+ self._file_batches = file_batches
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ file_batches.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ file_batches.retrieve,
+ )
+ self.cancel = _legacy_response.async_to_raw_response_wrapper(
+ file_batches.cancel,
+ )
+ self.list_files = _legacy_response.async_to_raw_response_wrapper(
+ file_batches.list_files,
+ )
+
+
+class FileBatchesWithStreamingResponse:
+ def __init__(self, file_batches: FileBatches) -> None:
+ self._file_batches = file_batches
+
+ self.create = to_streamed_response_wrapper(
+ file_batches.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ file_batches.retrieve,
+ )
+ self.cancel = to_streamed_response_wrapper(
+ file_batches.cancel,
+ )
+ self.list_files = to_streamed_response_wrapper(
+ file_batches.list_files,
+ )
+
+
+class AsyncFileBatchesWithStreamingResponse:
+ def __init__(self, file_batches: AsyncFileBatches) -> None:
+ self._file_batches = file_batches
+
+ self.create = async_to_streamed_response_wrapper(
+ file_batches.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ file_batches.retrieve,
+ )
+ self.cancel = async_to_streamed_response_wrapper(
+ file_batches.cancel,
+ )
+ self.list_files = async_to_streamed_response_wrapper(
+ file_batches.list_files,
+ )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/files.py b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/files.py
new file mode 100644
index 00000000..7d93798a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/files.py
@@ -0,0 +1,933 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Dict, Union, Optional
+from typing_extensions import Literal, assert_never
+
+import httpx
+
+from ... import _legacy_response
+from ...types import FileChunkingStrategyParam
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ..._utils import (
+ is_given,
+ maybe_transform,
+ async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.vector_stores import file_list_params, file_create_params, file_update_params
+from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
+from ...types.vector_stores.vector_store_file import VectorStoreFile
+from ...types.vector_stores.file_content_response import FileContentResponse
+from ...types.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted
+
+__all__ = ["Files", "AsyncFiles"]
+
+
+class Files(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> FilesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return FilesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> FilesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return FilesWithStreamingResponse(self)
+
+ def create(
+ self,
+ vector_store_id: str,
+ *,
+ file_id: str,
+ attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """
+ Create a vector store file by attaching a
+ [File](https://platform.openai.com/docs/api-reference/files) to a
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).
+
+ Args:
+ file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID that the
+ vector store should use. Useful for tools like `file_search` that can access
+ files.
+
+ attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters, booleans, or numbers.
+
+ chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ strategy. Only applicable if `file_ids` is non-empty.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._post(
+ f"/vector_stores/{vector_store_id}/files",
+ body=maybe_transform(
+ {
+ "file_id": file_id,
+ "attributes": attributes,
+ "chunking_strategy": chunking_strategy,
+ },
+ file_create_params.FileCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFile,
+ )
+
+ def retrieve(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """
+ Retrieves a vector store file.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get(
+ f"/vector_stores/{vector_store_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFile,
+ )
+
+ def update(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ attributes: Optional[Dict[str, Union[str, float, bool]]],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """
+ Update attributes on a vector store file.
+
+ Args:
+ attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters, booleans, or numbers.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._post(
+ f"/vector_stores/{vector_store_id}/files/{file_id}",
+ body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFile,
+ )
+
+ def list(
+ self,
+ vector_store_id: str,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ before: str | NotGiven = NOT_GIVEN,
+ filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[VectorStoreFile]:
+ """
+ Returns a list of vector store files.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
+
+ filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ f"/vector_stores/{vector_store_id}/files",
+ page=SyncCursorPage[VectorStoreFile],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "filter": filter,
+ "limit": limit,
+ "order": order,
+ },
+ file_list_params.FileListParams,
+ ),
+ ),
+ model=VectorStoreFile,
+ )
+
+ def delete(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileDeleted:
+ """Delete a vector store file.
+
+ This will remove the file from the vector store but
+ the file itself will not be deleted. To delete the file, use the
+ [delete file](https://platform.openai.com/docs/api-reference/files/delete)
+ endpoint.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._delete(
+ f"/vector_stores/{vector_store_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileDeleted,
+ )
+
+ def create_and_poll(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Attach a file to the given vector store and wait for it to be processed."""
+ self.create(vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy)
+
+ return self.poll(
+ file_id,
+ vector_store_id=vector_store_id,
+ poll_interval_ms=poll_interval_ms,
+ )
+
+ def poll(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Wait for the vector store file to finish processing.
+
+ Note: this will return even if the file failed to process, you need to check
+ file.last_error and file.status to handle these cases
+ """
+ headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+ if is_given(poll_interval_ms):
+ headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+ while True:
+ response = self.with_raw_response.retrieve(
+ file_id,
+ vector_store_id=vector_store_id,
+ extra_headers=headers,
+ )
+
+ file = response.parse()
+ if file.status == "in_progress":
+ if not is_given(poll_interval_ms):
+ from_header = response.headers.get("openai-poll-after-ms")
+ if from_header is not None:
+ poll_interval_ms = int(from_header)
+ else:
+ poll_interval_ms = 1000
+
+ self._sleep(poll_interval_ms / 1000)
+ elif file.status == "cancelled" or file.status == "completed" or file.status == "failed":
+ return file
+ else:
+ if TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(file.status)
+ else:
+ return file
+
+ def upload(
+ self,
+ *,
+ vector_store_id: str,
+ file: FileTypes,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Upload a file to the `files` API and then attach it to the given vector store.
+
+ Note the file will be asynchronously processed (you can use the alternative
+ polling helper method to wait for processing to complete).
+ """
+ file_obj = self._client.files.create(file=file, purpose="assistants")
+ return self.create(vector_store_id=vector_store_id, file_id=file_obj.id, chunking_strategy=chunking_strategy)
+
+ def upload_and_poll(
+ self,
+ *,
+ vector_store_id: str,
+ file: FileTypes,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Add a file to a vector store and poll until processing is complete."""
+ file_obj = self._client.files.create(file=file, purpose="assistants")
+ return self.create_and_poll(
+ vector_store_id=vector_store_id,
+ file_id=file_obj.id,
+ chunking_strategy=chunking_strategy,
+ poll_interval_ms=poll_interval_ms,
+ )
+
+ def content(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncPage[FileContentResponse]:
+ """
+ Retrieve the parsed contents of a vector store file.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ f"/vector_stores/{vector_store_id}/files/{file_id}/content",
+ page=SyncPage[FileContentResponse],
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ model=FileContentResponse,
+ )
+
+
+class AsyncFiles(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncFilesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncFilesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncFilesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ vector_store_id: str,
+ *,
+ file_id: str,
+ attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """
+ Create a vector store file by attaching a
+ [File](https://platform.openai.com/docs/api-reference/files) to a
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).
+
+ Args:
+ file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID that the
+ vector store should use. Useful for tools like `file_search` that can access
+ files.
+
+ attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters, booleans, or numbers.
+
+ chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ strategy. Only applicable if `file_ids` is non-empty.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._post(
+ f"/vector_stores/{vector_store_id}/files",
+ body=await async_maybe_transform(
+ {
+ "file_id": file_id,
+ "attributes": attributes,
+ "chunking_strategy": chunking_strategy,
+ },
+ file_create_params.FileCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFile,
+ )
+
+ async def retrieve(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """
+ Retrieves a vector store file.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._get(
+ f"/vector_stores/{vector_store_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFile,
+ )
+
+ async def update(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ attributes: Optional[Dict[str, Union[str, float, bool]]],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """
+ Update attributes on a vector store file.
+
+ Args:
+ attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters, booleans, or numbers.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._post(
+ f"/vector_stores/{vector_store_id}/files/{file_id}",
+ body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFile,
+ )
+
+ def list(
+ self,
+ vector_store_id: str,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ before: str | NotGiven = NOT_GIVEN,
+ filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[VectorStoreFile, AsyncCursorPage[VectorStoreFile]]:
+ """
+ Returns a list of vector store files.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
+
+ filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ f"/vector_stores/{vector_store_id}/files",
+ page=AsyncCursorPage[VectorStoreFile],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "filter": filter,
+ "limit": limit,
+ "order": order,
+ },
+ file_list_params.FileListParams,
+ ),
+ ),
+ model=VectorStoreFile,
+ )
+
+ async def delete(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileDeleted:
+ """Delete a vector store file.
+
+ This will remove the file from the vector store but
+ the file itself will not be deleted. To delete the file, use the
+ [delete file](https://platform.openai.com/docs/api-reference/files/delete)
+ endpoint.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._delete(
+ f"/vector_stores/{vector_store_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileDeleted,
+ )
+
+ async def create_and_poll(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Attach a file to the given vector store and wait for it to be processed."""
+ await self.create(vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy)
+
+ return await self.poll(
+ file_id,
+ vector_store_id=vector_store_id,
+ poll_interval_ms=poll_interval_ms,
+ )
+
+ async def poll(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Wait for the vector store file to finish processing.
+
+ Note: this will return even if the file failed to process, you need to check
+ file.last_error and file.status to handle these cases
+ """
+ headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+ if is_given(poll_interval_ms):
+ headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+ while True:
+ response = await self.with_raw_response.retrieve(
+ file_id,
+ vector_store_id=vector_store_id,
+ extra_headers=headers,
+ )
+
+ file = response.parse()
+ if file.status == "in_progress":
+ if not is_given(poll_interval_ms):
+ from_header = response.headers.get("openai-poll-after-ms")
+ if from_header is not None:
+ poll_interval_ms = int(from_header)
+ else:
+ poll_interval_ms = 1000
+
+ await self._sleep(poll_interval_ms / 1000)
+ elif file.status == "cancelled" or file.status == "completed" or file.status == "failed":
+ return file
+ else:
+ if TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(file.status)
+ else:
+ return file
+
+ async def upload(
+ self,
+ *,
+ vector_store_id: str,
+ file: FileTypes,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Upload a file to the `files` API and then attach it to the given vector store.
+
+ Note the file will be asynchronously processed (you can use the alternative
+ polling helper method to wait for processing to complete).
+ """
+ file_obj = await self._client.files.create(file=file, purpose="assistants")
+ return await self.create(
+ vector_store_id=vector_store_id, file_id=file_obj.id, chunking_strategy=chunking_strategy
+ )
+
+ async def upload_and_poll(
+ self,
+ *,
+ vector_store_id: str,
+ file: FileTypes,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Add a file to a vector store and poll until processing is complete."""
+ file_obj = await self._client.files.create(file=file, purpose="assistants")
+ return await self.create_and_poll(
+ vector_store_id=vector_store_id,
+ file_id=file_obj.id,
+ poll_interval_ms=poll_interval_ms,
+ chunking_strategy=chunking_strategy,
+ )
+
+ def content(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[FileContentResponse, AsyncPage[FileContentResponse]]:
+ """
+ Retrieve the parsed contents of a vector store file.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ f"/vector_stores/{vector_store_id}/files/{file_id}/content",
+ page=AsyncPage[FileContentResponse],
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ model=FileContentResponse,
+ )
+
+
+class FilesWithRawResponse:
+ def __init__(self, files: Files) -> None:
+ self._files = files
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ files.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ files.retrieve,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ files.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ files.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ files.delete,
+ )
+ self.content = _legacy_response.to_raw_response_wrapper(
+ files.content,
+ )
+
+
+class AsyncFilesWithRawResponse:
+ def __init__(self, files: AsyncFiles) -> None:
+ self._files = files
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ files.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ files.retrieve,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ files.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ files.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ files.delete,
+ )
+ self.content = _legacy_response.async_to_raw_response_wrapper(
+ files.content,
+ )
+
+
+class FilesWithStreamingResponse:
+ def __init__(self, files: Files) -> None:
+ self._files = files
+
+ self.create = to_streamed_response_wrapper(
+ files.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ files.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ files.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ files.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ files.delete,
+ )
+ self.content = to_streamed_response_wrapper(
+ files.content,
+ )
+
+
+class AsyncFilesWithStreamingResponse:
+ def __init__(self, files: AsyncFiles) -> None:
+ self._files = files
+
+ self.create = async_to_streamed_response_wrapper(
+ files.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ files.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ files.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ files.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ files.delete,
+ )
+ self.content = async_to_streamed_response_wrapper(
+ files.content,
+ )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/vector_stores.py b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/vector_stores.py
new file mode 100644
index 00000000..aaa6ed27
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/vector_stores.py
@@ -0,0 +1,868 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from .files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
+from ...types import (
+ FileChunkingStrategyParam,
+ vector_store_list_params,
+ vector_store_create_params,
+ vector_store_search_params,
+ vector_store_update_params,
+)
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
+ maybe_transform,
+ async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
+from .file_batches import (
+ FileBatches,
+ AsyncFileBatches,
+ FileBatchesWithRawResponse,
+ AsyncFileBatchesWithRawResponse,
+ FileBatchesWithStreamingResponse,
+ AsyncFileBatchesWithStreamingResponse,
+)
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.vector_store import VectorStore
+from ...types.vector_store_deleted import VectorStoreDeleted
+from ...types.shared_params.metadata import Metadata
+from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
+from ...types.vector_store_search_response import VectorStoreSearchResponse
+
+__all__ = ["VectorStores", "AsyncVectorStores"]
+
+
+class VectorStores(SyncAPIResource):
+ @cached_property
+ def files(self) -> Files:
+ return Files(self._client)
+
+ @cached_property
+ def file_batches(self) -> FileBatches:
+ return FileBatches(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> VectorStoresWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return VectorStoresWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> VectorStoresWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return VectorStoresWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+ file_ids: List[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Create a vector store.
+
+ Args:
+ chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ strategy. Only applicable if `file_ids` is non-empty.
+
+ expires_after: The expiration policy for a vector store.
+
+ file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ name: The name of the vector store.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._post(
+ "/vector_stores",
+ body=maybe_transform(
+ {
+ "chunking_strategy": chunking_strategy,
+ "expires_after": expires_after,
+ "file_ids": file_ids,
+ "metadata": metadata,
+ "name": name,
+ },
+ vector_store_create_params.VectorStoreCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ def retrieve(
+ self,
+ vector_store_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Retrieves a vector store.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get(
+ f"/vector_stores/{vector_store_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ def update(
+ self,
+ vector_store_id: str,
+ *,
+ expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+ name: Optional[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Modifies a vector store.
+
+ Args:
+ expires_after: The expiration policy for a vector store.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ name: The name of the vector store.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._post(
+ f"/vector_stores/{vector_store_id}",
+ body=maybe_transform(
+ {
+ "expires_after": expires_after,
+ "metadata": metadata,
+ "name": name,
+ },
+ vector_store_update_params.VectorStoreUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ before: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[VectorStore]:
+ """Returns a list of vector stores.
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ "/vector_stores",
+ page=SyncCursorPage[VectorStore],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "limit": limit,
+ "order": order,
+ },
+ vector_store_list_params.VectorStoreListParams,
+ ),
+ ),
+ model=VectorStore,
+ )
+
+ def delete(
+ self,
+ vector_store_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreDeleted:
+ """
+ Delete a vector store.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._delete(
+ f"/vector_stores/{vector_store_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreDeleted,
+ )
+
+ def search(
+ self,
+ vector_store_id: str,
+ *,
+ query: Union[str, List[str]],
+ filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN,
+ max_num_results: int | NotGiven = NOT_GIVEN,
+ ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN,
+ rewrite_query: bool | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncPage[VectorStoreSearchResponse]:
+ """
+ Search a vector store for relevant chunks based on a query and file attributes
+ filter.
+
+ Args:
+ query: A query string for a search
+
+ filters: A filter to apply based on file attributes.
+
+ max_num_results: The maximum number of results to return. This number should be between 1 and 50
+ inclusive.
+
+ ranking_options: Ranking options for search.
+
+ rewrite_query: Whether to rewrite the natural language query for vector search.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ f"/vector_stores/{vector_store_id}/search",
+ page=SyncPage[VectorStoreSearchResponse],
+ body=maybe_transform(
+ {
+ "query": query,
+ "filters": filters,
+ "max_num_results": max_num_results,
+ "ranking_options": ranking_options,
+ "rewrite_query": rewrite_query,
+ },
+ vector_store_search_params.VectorStoreSearchParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ model=VectorStoreSearchResponse,
+ method="post",
+ )
+
+
+class AsyncVectorStores(AsyncAPIResource):
+ @cached_property
+ def files(self) -> AsyncFiles:
+ return AsyncFiles(self._client)
+
+ @cached_property
+ def file_batches(self) -> AsyncFileBatches:
+ return AsyncFileBatches(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncVectorStoresWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncVectorStoresWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncVectorStoresWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncVectorStoresWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+ expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+ file_ids: List[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Create a vector store.
+
+ Args:
+ chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ strategy. Only applicable if `file_ids` is non-empty.
+
+ expires_after: The expiration policy for a vector store.
+
+ file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ name: The name of the vector store.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._post(
+ "/vector_stores",
+ body=await async_maybe_transform(
+ {
+ "chunking_strategy": chunking_strategy,
+ "expires_after": expires_after,
+ "file_ids": file_ids,
+ "metadata": metadata,
+ "name": name,
+ },
+ vector_store_create_params.VectorStoreCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ async def retrieve(
+ self,
+ vector_store_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Retrieves a vector store.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._get(
+ f"/vector_stores/{vector_store_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ async def update(
+ self,
+ vector_store_id: str,
+ *,
+ expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+ name: Optional[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Modifies a vector store.
+
+ Args:
+ expires_after: The expiration policy for a vector store.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ name: The name of the vector store.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._post(
+ f"/vector_stores/{vector_store_id}",
+ body=await async_maybe_transform(
+ {
+ "expires_after": expires_after,
+ "metadata": metadata,
+ "name": name,
+ },
+ vector_store_update_params.VectorStoreUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ before: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[VectorStore, AsyncCursorPage[VectorStore]]:
+ """Returns a list of vector stores.
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ "/vector_stores",
+ page=AsyncCursorPage[VectorStore],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "limit": limit,
+ "order": order,
+ },
+ vector_store_list_params.VectorStoreListParams,
+ ),
+ ),
+ model=VectorStore,
+ )
+
+ async def delete(
+ self,
+ vector_store_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreDeleted:
+ """
+ Delete a vector store.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._delete(
+ f"/vector_stores/{vector_store_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreDeleted,
+ )
+
+ def search(
+ self,
+ vector_store_id: str,
+ *,
+ query: Union[str, List[str]],
+ filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN,
+ max_num_results: int | NotGiven = NOT_GIVEN,
+ ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN,
+ rewrite_query: bool | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[VectorStoreSearchResponse, AsyncPage[VectorStoreSearchResponse]]:
+ """
+ Search a vector store for relevant chunks based on a query and file attributes
+ filter.
+
+ Args:
+ query: A query string for a search
+
+ filters: A filter to apply based on file attributes.
+
+ max_num_results: The maximum number of results to return. This number should be between 1 and 50
+ inclusive.
+
+ ranking_options: Ranking options for search.
+
+ rewrite_query: Whether to rewrite the natural language query for vector search.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ f"/vector_stores/{vector_store_id}/search",
+ page=AsyncPage[VectorStoreSearchResponse],
+ body=maybe_transform(
+ {
+ "query": query,
+ "filters": filters,
+ "max_num_results": max_num_results,
+ "ranking_options": ranking_options,
+ "rewrite_query": rewrite_query,
+ },
+ vector_store_search_params.VectorStoreSearchParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ model=VectorStoreSearchResponse,
+ method="post",
+ )
+
+
+class VectorStoresWithRawResponse:
+ def __init__(self, vector_stores: VectorStores) -> None:
+ self._vector_stores = vector_stores
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ vector_stores.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ vector_stores.retrieve,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ vector_stores.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ vector_stores.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ vector_stores.delete,
+ )
+ self.search = _legacy_response.to_raw_response_wrapper(
+ vector_stores.search,
+ )
+
+ @cached_property
+ def files(self) -> FilesWithRawResponse:
+ return FilesWithRawResponse(self._vector_stores.files)
+
+ @cached_property
+ def file_batches(self) -> FileBatchesWithRawResponse:
+ return FileBatchesWithRawResponse(self._vector_stores.file_batches)
+
+
+class AsyncVectorStoresWithRawResponse:
+ def __init__(self, vector_stores: AsyncVectorStores) -> None:
+ self._vector_stores = vector_stores
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.retrieve,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.delete,
+ )
+ self.search = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.search,
+ )
+
+ @cached_property
+ def files(self) -> AsyncFilesWithRawResponse:
+ return AsyncFilesWithRawResponse(self._vector_stores.files)
+
+ @cached_property
+ def file_batches(self) -> AsyncFileBatchesWithRawResponse:
+ return AsyncFileBatchesWithRawResponse(self._vector_stores.file_batches)
+
+
+class VectorStoresWithStreamingResponse:
+ def __init__(self, vector_stores: VectorStores) -> None:
+ self._vector_stores = vector_stores
+
+ self.create = to_streamed_response_wrapper(
+ vector_stores.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ vector_stores.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ vector_stores.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ vector_stores.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ vector_stores.delete,
+ )
+ self.search = to_streamed_response_wrapper(
+ vector_stores.search,
+ )
+
+ @cached_property
+ def files(self) -> FilesWithStreamingResponse:
+ return FilesWithStreamingResponse(self._vector_stores.files)
+
+ @cached_property
+ def file_batches(self) -> FileBatchesWithStreamingResponse:
+ return FileBatchesWithStreamingResponse(self._vector_stores.file_batches)
+
+
+class AsyncVectorStoresWithStreamingResponse:
+ def __init__(self, vector_stores: AsyncVectorStores) -> None:
+ self._vector_stores = vector_stores
+
+ self.create = async_to_streamed_response_wrapper(
+ vector_stores.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ vector_stores.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ vector_stores.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ vector_stores.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ vector_stores.delete,
+ )
+ self.search = async_to_streamed_response_wrapper(
+ vector_stores.search,
+ )
+
+ @cached_property
+ def files(self) -> AsyncFilesWithStreamingResponse:
+ return AsyncFilesWithStreamingResponse(self._vector_stores.files)
+
+ @cached_property
+ def file_batches(self) -> AsyncFileBatchesWithStreamingResponse:
+ return AsyncFileBatchesWithStreamingResponse(self._vector_stores.file_batches)