about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/sdk/asnyc_methods
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/sdk/asnyc_methods')
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/__init__.py23
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/chunks.py186
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/collections.py343
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/conversations.py310
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/documents.py706
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/graphs.py614
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/indices.py116
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/prompts.py128
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/retrieval.py394
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/system.py43
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/users.py589
11 files changed, 3452 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/__init__.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/__init__.py
new file mode 100644
index 00000000..efa520d6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/__init__.py
@@ -0,0 +1,23 @@
+from .chunks import ChunksSDK
+from .collections import CollectionsSDK
+from .conversations import ConversationsSDK
+from .documents import DocumentsSDK
+from .graphs import GraphsSDK
+from .indices import IndicesSDK
+from .prompts import PromptsSDK
+from .retrieval import RetrievalSDK
+from .system import SystemSDK
+from .users import UsersSDK
+
+__all__ = [
+    "ChunksSDK",
+    "CollectionsSDK",
+    "ConversationsSDK",
+    "DocumentsSDK",
+    "GraphsSDK",
+    "IndicesSDK",
+    "PromptsSDK",
+    "RetrievalSDK",
+    "SystemSDK",
+    "UsersSDK",
+]
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/chunks.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/chunks.py
new file mode 100644
index 00000000..a64142d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/chunks.py
@@ -0,0 +1,186 @@
+import json
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedChunkResponse,
+    WrappedChunksResponse,
+    WrappedVectorSearchResponse,
+)
+
+from ..models import SearchSettings
+
+
+class ChunksSDK:
+    """SDK for interacting with chunks in the v3 API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    async def update(
+        self,
+        chunk: dict[str, str],
+    ) -> WrappedChunkResponse:
+        """Update an existing chunk.
+
+        Args:
+            chunk (dict[str, str]): Chunk to update. Should contain:
+                - id: UUID of the chunk
+                - metadata: Dictionary of metadata
+        Returns:
+            WrappedChunkResponse
+        """
+        response_dict = await self.client._make_request(
+            "POST",
+            f"chunks/{str(chunk['id'])}",
+            json=chunk,
+            version="v3",
+        )
+
+        return WrappedChunkResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedChunkResponse:
+        """Get a specific chunk.
+
+        Args:
+            id (str | UUID): Chunk ID to retrieve
+
+        Returns:
+            WrappedChunkResponse
+        """
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"chunks/{id}",
+            version="v3",
+        )
+
+        return WrappedChunkResponse(**response_dict)
+
+    # FIXME: Is this the most appropriate name for this method?
+    async def list_by_document(
+        self,
+        document_id: str | UUID,
+        metadata_filter: Optional[dict] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedChunksResponse:
+        """List chunks for a specific document.
+
+        Args:
+            document_id (str | UUID): Document ID to get chunks for
+            metadata_filter (Optional[dict]): Filter chunks by metadata
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedChunksResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if metadata_filter:
+            params["metadata_filter"] = json.dumps(metadata_filter)
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(document_id)}/chunks",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedChunksResponse(**response_dict)
+
+    async def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a specific chunk.
+
+        Args:
+            id (str | UUID): ID of chunk to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"chunks/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def list(
+        self,
+        include_vectors: bool = False,
+        metadata_filter: Optional[dict] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+        filters: Optional[dict] = None,
+    ) -> WrappedChunksResponse:
+        """List chunks with pagination support.
+
+        Args:
+            include_vectors (bool, optional): Include vector data in response. Defaults to False.
+            metadata_filter (Optional[dict], optional): Filter by metadata. Defaults to None.
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedChunksResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+            "include_vectors": include_vectors,
+        }
+        if filters:
+            params["filters"] = json.dumps(filters)
+
+        if metadata_filter:
+            params["metadata_filter"] = json.dumps(metadata_filter)
+
+        response_dict = await self.client._make_request(
+            "GET",
+            "chunks",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedChunksResponse(**response_dict)
+
+    async def search(
+        self,
+        query: str,
+        search_settings: Optional[dict | SearchSettings] = None,
+    ) -> WrappedVectorSearchResponse:
+        """Conduct a vector and/or graph search.
+
+        Args:
+            query (str): The query to search for.
+            search_settings (Optional[dict, SearchSettings]]): Vector search settings.
+
+        Returns:
+            WrappedVectorSearchResponse
+        """
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+
+        data: dict[str, Any] = {
+            "query": query,
+            "search_settings": search_settings,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "chunks/search",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedVectorSearchResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/collections.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/collections.py
new file mode 100644
index 00000000..a768b72e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/collections.py
@@ -0,0 +1,343 @@
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedCollectionResponse,
+    WrappedCollectionsResponse,
+    WrappedDocumentsResponse,
+    WrappedGenericMessageResponse,
+    WrappedUsersResponse,
+)
+
+
+class CollectionsSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self,
+        name: str,
+        description: Optional[str] = None,
+    ) -> WrappedCollectionResponse:
+        """Create a new collection.
+
+        Args:
+            name (str): Name of the collection
+            description (Optional[str]): Description of the collection
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        data: dict[str, Any] = {"name": name, "description": description}
+        response_dict = await self.client._make_request(
+            "POST",
+            "collections",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCollectionResponse(**response_dict)
+
+    async def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCollectionsResponse:
+        """List collections with pagination and filtering options.
+
+        Args:
+            ids (Optional[list[str | UUID]]): Filter collections by ids
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedCollectionsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = ids
+
+        response_dict = await self.client._make_request(
+            "GET", "collections", params=params, version="v3"
+        )
+
+        return WrappedCollectionsResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedCollectionResponse:
+        """Get detailed information about a specific collection.
+
+        Args:
+            id (str | UUID): Collection ID to retrieve
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET", f"collections/{str(id)}", version="v3"
+        )
+
+        return WrappedCollectionResponse(**response_dict)
+
+    async def update(
+        self,
+        id: str | UUID,
+        name: Optional[str] = None,
+        description: Optional[str] = None,
+        generate_description: Optional[bool] = False,
+    ) -> WrappedCollectionResponse:
+        """Update collection information.
+
+        Args:
+            id (str | UUID): Collection ID to update
+            name (Optional[str]): Optional new name for the collection
+            description (Optional[str]): Optional new description for the collection
+            generate_description (Optional[bool]): Whether to generate a new synthetic description for the collection.
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        data: dict[str, Any] = {}
+        if name is not None:
+            data["name"] = name
+        if description is not None:
+            data["description"] = description
+        if generate_description:
+            data["generate_description"] = str(generate_description)
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"collections/{str(id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCollectionResponse(**response_dict)
+
+    async def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a collection.
+
+        Args:
+            id (str | UUID): Collection ID to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE", f"collections/{str(id)}", version="v3"
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def list_documents(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedDocumentsResponse:
+        """List all documents in a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedDocumentsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"collections/{str(id)}/documents",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedDocumentsResponse(**response_dict)
+
+    async def add_document(
+        self,
+        id: str | UUID,
+        document_id: str | UUID,
+    ) -> WrappedGenericMessageResponse:
+        """Add a document to a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            document_id (str | UUID): Document ID to add
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        response_dict = await self.client._make_request(
+            "POST",
+            f"collections/{str(id)}/documents/{str(document_id)}",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def remove_document(
+        self,
+        id: str | UUID,
+        document_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a document from a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            document_id (str | UUID): Document ID to remove
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"collections/{str(id)}/documents/{str(document_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def list_users(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedUsersResponse:
+        """List all users in a collection.
+
+        Args:
+            id (str, UUID): Collection ID
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedUsersResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET", f"collections/{str(id)}/users", params=params, version="v3"
+        )
+
+        return WrappedUsersResponse(**response_dict)
+
+    async def add_user(
+        self,
+        id: str | UUID,
+        user_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Add a user to a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            user_id (str | UUID): User ID to add
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "POST", f"collections/{str(id)}/users/{str(user_id)}", version="v3"
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def remove_user(
+        self,
+        id: str | UUID,
+        user_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a user from a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            user_id (str | UUID): User ID to remove
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"collections/{str(id)}/users/{str(user_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def extract(
+        self,
+        id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Extract entities and relationships from documents in a collection.
+
+        Args:
+            id (str | UUID): Collection ID to extract from
+            settings (Optional[dict]): Settings for the entities and relationships extraction process
+            run_with_orchestration (Optional[bool]): Whether to run the extraction process with orchestration.
+                Defaults to True
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        params = {"run_with_orchestration": run_with_orchestration}
+
+        data: dict[str, Any] = {}
+        if settings is not None:
+            data["settings"] = settings
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"collections/{str(id)}/extract",
+            params=params,
+            json=data or None,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def retrieve_by_name(
+        self, name: str, owner_id: Optional[str] = None
+    ) -> WrappedCollectionResponse:
+        """Retrieve a collection by its name.
+
+        For non-superusers, the backend will use the authenticated user's ID.
+        For superusers, the caller must supply an owner_id to restrict the search.
+
+        Args:
+            name (str): The name of the collection to retrieve.
+            owner_id (Optional[str]): The owner ID to restrict the search. Required for superusers.
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        query_params: dict[str, Any] = {}
+        if owner_id is not None:
+            query_params["owner_id"] = owner_id
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"collections/name/{name}",
+            params=query_params,
+            version="v3",
+        )
+        return WrappedCollectionResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/conversations.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/conversations.py
new file mode 100644
index 00000000..885f9fc5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/conversations.py
@@ -0,0 +1,310 @@
+from builtins import list as _list
+from pathlib import Path
+from typing import Any, Optional
+from uuid import UUID
+
+import aiofiles
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedConversationMessagesResponse,
+    WrappedConversationResponse,
+    WrappedConversationsResponse,
+    WrappedMessageResponse,
+)
+
+
+class ConversationsSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self,
+        name: Optional[str] = None,
+    ) -> WrappedConversationResponse:
+        """Create a new conversation.
+
+        Returns:
+            WrappedConversationResponse
+        """
+        data: dict[str, Any] = {}
+        if name:
+            data["name"] = name
+
+        response_dict = await self.client._make_request(
+            "POST",
+            "conversations",
+            data=data,
+            version="v3",
+        )
+
+        return WrappedConversationResponse(**response_dict)
+
+    async def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedConversationsResponse:
+        """List conversations with pagination and sorting options.
+
+        Args:
+            ids (Optional[list[str | UUID]]): List of conversation IDs to retrieve
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedConversationsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = ids
+
+        response_dict = await self.client._make_request(
+            "GET",
+            "conversations",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedConversationsResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedConversationMessagesResponse:
+        """Get detailed information about a specific conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to retrieve
+
+        Returns:
+            WrappedConversationMessagesResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"conversations/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedConversationMessagesResponse(**response_dict)
+
+    async def update(
+        self,
+        id: str | UUID,
+        name: str,
+    ) -> WrappedConversationResponse:
+        """Update an existing conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to update
+            name (str): The new name of the conversation
+
+        Returns:
+            WrappedConversationResponse
+        """
+        data: dict[str, Any] = {
+            "name": name,
+        }
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"conversations/{str(id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedConversationResponse(**response_dict)
+
+    async def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"conversations/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def add_message(
+        self,
+        id: str | UUID,
+        content: str,
+        role: str,
+        metadata: Optional[dict] = None,
+        parent_id: Optional[str] = None,
+    ) -> WrappedMessageResponse:
+        """Add a new message to a conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to add the message to
+            content (str): The content of the message
+            role (str): The role of the message (e.g., "user" or "assistant")
+            parent_id (Optional[str]): The ID of the parent message
+            metadata (Optional[dict]): Additional metadata to attach to the message
+
+        Returns:
+            WrappedMessageResponse
+        """
+        data: dict[str, Any] = {
+            "content": content,
+            "role": role,
+        }
+        if parent_id:
+            data["parent_id"] = parent_id
+        if metadata:
+            data["metadata"] = metadata
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"conversations/{str(id)}/messages",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedMessageResponse(**response_dict)
+
+    async def update_message(
+        self,
+        id: str | UUID,
+        message_id: str,
+        content: Optional[str] = None,
+        metadata: Optional[dict] = None,
+    ) -> WrappedMessageResponse:
+        """Update an existing message in a conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation containing the message
+            message_id (str): The ID of the message to update
+            content (str): The new content of the message
+            metadata (dict): Additional metadata to attach to the message
+
+        Returns:
+            WrappedMessageResponse
+        """
+        data: dict[str, Any] = {"content": content}
+        if metadata:
+            data["metadata"] = metadata
+        response_dict = await self.client._make_request(
+            "POST",
+            f"conversations/{str(id)}/messages/{message_id}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedMessageResponse(**response_dict)
+
+    async def export(
+        self,
+        output_path: str | Path,
+        columns: Optional[_list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export conversations to a CSV file, streaming the results directly
+        to disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting conversations
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        async with aiofiles.open(output_path, "wb") as f:
+            async with self.client.session.post(
+                f"{self.client.base_url}/v3/conversations/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                async for chunk in response.content.iter_chunks():
+                    if chunk:
+                        await f.write(chunk[0])
+
+    async def export_messages(
+        self,
+        output_path: str | Path,
+        columns: Optional[_list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export messages to a CSV file, streaming the results directly to
+        disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting messages
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        async with aiofiles.open(output_path, "wb") as f:
+            async with self.client.session.post(
+                f"{self.client.base_url}/v3/conversations/export_messages",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                async for chunk in response.content.iter_chunks():
+                    if chunk:
+                        await f.write(chunk[0])
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/documents.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/documents.py
new file mode 100644
index 00000000..b8a254ee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/documents.py
@@ -0,0 +1,706 @@
+import json
+from datetime import datetime
+from io import BytesIO
+from pathlib import Path
+from typing import Any, Optional
+from uuid import UUID
+
+import aiofiles
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedChunksResponse,
+    WrappedCollectionsResponse,
+    WrappedDocumentResponse,
+    WrappedDocumentSearchResponse,
+    WrappedDocumentsResponse,
+    WrappedEntitiesResponse,
+    WrappedGenericMessageResponse,
+    WrappedIngestionResponse,
+    WrappedRelationshipsResponse,
+)
+
+from ..models import IngestionMode, SearchMode, SearchSettings
+
+
+class DocumentsSDK:
+    """SDK for interacting with documents in the v3 API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self,
+        file_path: Optional[str] = None,
+        raw_text: Optional[str] = None,
+        chunks: Optional[list[str]] = None,
+        id: Optional[str | UUID] = None,
+        ingestion_mode: Optional[str] = None,
+        collection_ids: Optional[list[str | UUID]] = None,
+        metadata: Optional[dict] = None,
+        ingestion_config: Optional[dict | IngestionMode] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedIngestionResponse:
+        """Create a new document from either a file or content.
+
+        Args:
+            file_path (Optional[str]): The file to upload, if any
+            content (Optional[str]): Optional text content to upload, if no file path is provided
+            id (Optional[str | UUID]): Optional ID to assign to the document
+            collection_ids (Optional[list[str | UUID]]): Collection IDs to associate with the document. If none are provided, the document will be assigned to the user's default collection.
+            metadata (Optional[dict]): Optional metadata to assign to the document
+            ingestion_config (Optional[dict]): Optional ingestion configuration to use
+            run_with_orchestration (Optional[bool]): Whether to run with orchestration
+
+        Returns:
+            WrappedIngestionResponse
+        """
+        if not file_path and not raw_text and not chunks:
+            raise ValueError(
+                "Either `file_path`, `raw_text` or `chunks` must be provided"
+            )
+        if (
+            (file_path and raw_text)
+            or (file_path and chunks)
+            or (raw_text and chunks)
+        ):
+            raise ValueError(
+                "Only one of `file_path`, `raw_text` or `chunks` may be provided"
+            )
+
+        data: dict[str, Any] = {}
+        files = None
+
+        if id:
+            data["id"] = str(id)
+        if metadata:
+            data["metadata"] = json.dumps(metadata)
+        if ingestion_config:
+            if isinstance(ingestion_config, IngestionMode):
+                ingestion_config = {"mode": ingestion_config.value}
+            app_config: dict[str, Any] = (
+                {}
+                if isinstance(ingestion_config, dict)
+                else ingestion_config["app"]
+            )
+            ingestion_config = dict(ingestion_config)
+            ingestion_config["app"] = app_config
+            data["ingestion_config"] = json.dumps(ingestion_config)
+        if collection_ids:
+            collection_ids = [
+                str(collection_id) for collection_id in collection_ids
+            ]  # type: ignore
+            data["collection_ids"] = json.dumps(collection_ids)
+        if run_with_orchestration is not None:
+            data["run_with_orchestration"] = str(run_with_orchestration)
+        if ingestion_mode is not None:
+            data["ingestion_mode"] = ingestion_mode
+        if file_path:
+            # Create a new file instance that will remain open during the request
+            file_instance = open(file_path, "rb")
+            files = [
+                (
+                    "file",
+                    (file_path, file_instance, "application/octet-stream"),
+                )
+            ]
+            try:
+                response_dict = await self.client._make_request(
+                    "POST",
+                    "documents",
+                    data=data,
+                    files=files,
+                    version="v3",
+                )
+            finally:
+                # Ensure we close the file after the request is complete
+                file_instance.close()
+        elif raw_text:
+            data["raw_text"] = raw_text  # type: ignore
+            response_dict = await self.client._make_request(
+                "POST",
+                "documents",
+                data=data,
+                version="v3",
+            )
+        else:
+            data["chunks"] = json.dumps(chunks)
+            response_dict = await self.client._make_request(
+                "POST",
+                "documents",
+                data=data,
+                version="v3",
+            )
+
+        return WrappedIngestionResponse(**response_dict)
+
+    async def append_metadata(
+        self,
+        id: str | UUID,
+        metadata: list[dict],
+    ) -> WrappedDocumentResponse:
+        """Append metadata to a document.
+
+        Args:
+            id (str | UUID): ID of document to append metadata to
+            metadata (list[dict]): Metadata to append
+
+        Returns:
+            WrappedDocumentResponse
+        """
+        data = json.dumps(metadata)
+        response_dict = await self.client._make_request(
+            "PATCH",
+            f"documents/{str(id)}/metadata",
+            data=data,
+            version="v3",
+        )
+
+        return WrappedDocumentResponse(**response_dict)
+
+    async def replace_metadata(
+        self,
+        id: str | UUID,
+        metadata: list[dict],
+    ) -> WrappedDocumentResponse:
+        """Replace metadata for a document.
+
+        Args:
+            id (str | UUID): ID of document to replace metadata for
+            metadata (list[dict]): The metadata that will replace the existing metadata
+        """
+        data = json.dumps(metadata)
+        response_dict = await self.client._make_request(
+            "PUT",
+            f"documents/{str(id)}/metadata",
+            data=data,
+            version="v3",
+        )
+
+        return WrappedDocumentResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedDocumentResponse:
+        """Get a specific document by ID.
+
+        Args:
+            id (str | UUID): ID of document to retrieve
+
+        Returns:
+            WrappedDocumentResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedDocumentResponse(**response_dict)
+
+    async def download(
+        self,
+        id: str | UUID,
+    ) -> BytesIO:
+        response = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/download",
+            version="v3",
+        )
+        if not isinstance(response, BytesIO):
+            raise ValueError("Expected BytesIO response")
+        return response
+
+    async def download_zip(
+        self,
+        document_ids: Optional[list[str | UUID]] = None,
+        start_date: Optional[datetime] = None,
+        end_date: Optional[datetime] = None,
+        output_path: Optional[str | Path] = None,
+    ) -> BytesIO | None:
+        """Download multiple documents as a zip file."""
+        params: dict[str, Any] = {}
+        if document_ids:
+            params["document_ids"] = [str(doc_id) for doc_id in document_ids]
+        if start_date:
+            params["start_date"] = start_date.isoformat()
+        if end_date:
+            params["end_date"] = end_date.isoformat()
+
+        response = await self.client._make_request(
+            "GET",
+            "documents/download_zip",
+            params=params,
+            version="v3",
+        )
+
+        if not isinstance(response, BytesIO):
+            raise ValueError("Expected BytesIO response")
+
+        if output_path:
+            output_path = (
+                Path(output_path)
+                if isinstance(output_path, str)
+                else output_path
+            )
+            async with aiofiles.open(output_path, "wb") as f:
+                await f.write(response.getvalue())
+            return None
+
+        return response
+
+    async def export(
+        self,
+        output_path: str | Path,
+        columns: Optional[list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export documents to a CSV file, streaming the results directly to
+        disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting documents
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        async with aiofiles.open(output_path, "wb") as f:
+            async with self.client.session.post(
+                f"{self.client.base_url}/v3/documents/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                async for chunk in response.content.iter_chunks():
+                    if chunk:
+                        await f.write(chunk[0])
+
+    async def export_entities(
+        self,
+        id: str | UUID,
+        output_path: str | Path,
+        columns: Optional[list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export documents to a CSV file, streaming the results directly to
+        disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting documents
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        async with aiofiles.open(output_path, "wb") as f:
+            async with self.client.session.post(
+                f"{self.client.base_url}/v3/documents/{str(id)}/entities/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                async for chunk in response.content.iter_chunks():
+                    if chunk:
+                        await f.write(chunk[0])
+
+    async def export_relationships(
+        self,
+        id: str | UUID,
+        output_path: str | Path,
+        columns: Optional[list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export document relationships to a CSV file, streaming the results
+        directly to disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting documents
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        async with aiofiles.open(output_path, "wb") as f:
+            async with self.client.session.post(
+                f"{self.client.base_url}/v3/documents/{str(id)}/relationships/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                async for chunk in response.content.iter_chunks():
+                    if chunk:
+                        await f.write(chunk[0])
+
+    async def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a specific document.
+
+        Args:
+            id (str | UUID): ID of document to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"documents/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def list_chunks(
+        self,
+        id: str | UUID,
+        include_vectors: Optional[bool] = False,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedChunksResponse:
+        """Get chunks for a specific document.
+
+        Args:
+            id (str | UUID): ID of document to retrieve chunks for
+            include_vectors (Optional[bool]): Whether to include vector embeddings in the response
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedChunksResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+            "include_vectors": include_vectors,
+        }
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/chunks",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedChunksResponse(**response_dict)
+
+    async def list_collections(
+        self,
+        id: str | UUID,
+        include_vectors: Optional[bool] = False,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCollectionsResponse:
+        """List collections for a specific document.
+
+        Args:
+            id (str | UUID): ID of document to retrieve collections for
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedCollectionsResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/collections",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedCollectionsResponse(**response_dict)
+
+    async def delete_by_filter(
+        self,
+        filters: dict,
+    ) -> WrappedBooleanResponse:
+        """Delete documents based on filters.
+
+        Args:
+            filters (dict): Filters to apply when selecting documents to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        filters_json = json.dumps(filters)
+        response_dict = await self.client._make_request(
+            "DELETE",
+            "documents/by-filter",
+            data=filters_json,
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def extract(
+        self,
+        id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Extract entities and relationships from a document.
+
+        Args:
+            id (str, UUID): ID of document to extract from
+            settings (Optional[dict]): Settings for extraction process
+            run_with_orchestration (Optional[bool]): Whether to run with orchestration
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        data: dict[str, Any] = {}
+        if settings:
+            data["settings"] = json.dumps(settings)
+        if run_with_orchestration is not None:
+            data["run_with_orchestration"] = str(run_with_orchestration)
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"documents/{str(id)}/extract",
+            params=data,
+            version="v3",
+        )
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def list_entities(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+        include_embeddings: Optional[bool] = False,
+    ) -> WrappedEntitiesResponse:
+        """List entities extracted from a document.
+
+        Args:
+            id (str | UUID): ID of document to get entities from
+            offset (Optional[int]): Number of items to skip
+            limit (Optional[int]): Max number of items to return
+            include_embeddings (Optional[bool]): Whether to include embeddings
+
+        Returns:
+            WrappedEntitiesResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+            "include_embeddings": include_embeddings,
+        }
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/entities",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedEntitiesResponse(**response_dict)
+
+    async def list_relationships(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+        entity_names: Optional[list[str]] = None,
+        relationship_types: Optional[list[str]] = None,
+    ) -> WrappedRelationshipsResponse:
+        """List relationships extracted from a document.
+
+        Args:
+            id (str | UUID): ID of document to get relationships from
+            offset (Optional[int]): Number of items to skip
+            limit (Optional[int]): Max number of items to return
+            entity_names (Optional[list[str]]): Filter by entity names
+            relationship_types (Optional[list[str]]): Filter by relationship types
+
+        Returns:
+            WrappedRelationshipsResponse
+        """
+        params: dict[str, Any] = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if entity_names:
+            params["entity_names"] = entity_names
+        if relationship_types:
+            params["relationship_types"] = relationship_types
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/relationships",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedRelationshipsResponse(**response_dict)
+
+    async def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedDocumentsResponse:
+        """List documents with pagination.
+
+        Args:
+            ids (Optional[list[str | UUID]]): Optional list of document IDs to filter by
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedDocumentsResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = [str(doc_id) for doc_id in ids]  # type: ignore
+
+        response_dict = await self.client._make_request(
+            "GET",
+            "documents",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedDocumentsResponse(**response_dict)
+
+    async def search(
+        self,
+        query: str,
+        search_mode: Optional[str | SearchMode] = "custom",
+        search_settings: Optional[dict | SearchSettings] = None,
+    ) -> WrappedDocumentSearchResponse:
+        """Conduct a vector and/or graph search.
+
+        Args:
+            query (str): The query to search for.
+            search_settings (Optional[dict, SearchSettings]]): Vector search settings.
+
+        Returns:
+            WrappedDocumentSearchResponse
+        """
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+        data: dict[str, Any] = {
+            "query": query,
+            "search_settings": search_settings,
+        }
+        if search_mode:
+            data["search_mode"] = search_mode
+
+        response_dict = await self.client._make_request(
+            "POST",
+            "documents/search",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedDocumentSearchResponse(**response_dict)
+
+    async def deduplicate(
+        self,
+        id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Deduplicate entities and relationships from a document.
+
+        Args:
+            id (str, UUID): ID of document to extract from
+            settings (Optional[dict]): Settings for extraction process
+            run_with_orchestration (Optional[bool]): Whether to run with orchestration
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        data: dict[str, Any] = {}
+        if settings:
+            data["settings"] = json.dumps(settings)
+        if run_with_orchestration is not None:
+            data["run_with_orchestration"] = str(run_with_orchestration)
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"documents/{str(id)}/deduplicate",
+            params=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/graphs.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/graphs.py
new file mode 100644
index 00000000..676aceaa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/graphs.py
@@ -0,0 +1,614 @@
+from builtins import list as _list
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedCommunitiesResponse,
+    WrappedCommunityResponse,
+    WrappedEntitiesResponse,
+    WrappedEntityResponse,
+    WrappedGenericMessageResponse,
+    WrappedGraphResponse,
+    WrappedGraphsResponse,
+    WrappedRelationshipResponse,
+    WrappedRelationshipsResponse,
+)
+
+
+class GraphsSDK:
+    """SDK for interacting with knowledge graphs in the v3 API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    async def list(
+        self,
+        collection_ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedGraphsResponse:
+        """List graphs with pagination and filtering options.
+
+        Args:
+            ids (Optional[list[str | UUID]]): Filter graphs by ids
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedGraphsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if collection_ids:
+            params["collection_ids"] = collection_ids
+
+        response_dict = await self.client._make_request(
+            "GET", "graphs", params=params, version="v3"
+        )
+
+        return WrappedGraphsResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        collection_id: str | UUID,
+    ) -> WrappedGraphResponse:
+        """Get detailed information about a specific graph.
+
+        Args:
+            collection_id (str | UUID): Graph ID to retrieve
+
+        Returns:
+            WrappedGraphResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET", f"graphs/{str(collection_id)}", version="v3"
+        )
+
+        return WrappedGraphResponse(**response_dict)
+
+    async def reset(
+        self,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Deletes a graph and all its associated data.
+
+        This endpoint permanently removes the specified graph along with all
+        entities and relationships that belong to only this graph.
+
+        Entities and relationships extracted from documents are not deleted.
+
+        Args:
+            collection_id (str | UUID): Graph ID to reset
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "POST", f"graphs/{str(collection_id)}/reset", version="v3"
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def update(
+        self,
+        collection_id: str | UUID,
+        name: Optional[str] = None,
+        description: Optional[str] = None,
+    ) -> WrappedGraphResponse:
+        """Update graph information.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            name (Optional[str]): Optional new name for the graph
+            description (Optional[str]): Optional new description for the graph
+
+        Returns:
+            WrappedGraphResponse
+        """
+        data: dict[str, Any] = {}
+        if name is not None:
+            data["name"] = name
+        if description is not None:
+            data["description"] = description
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGraphResponse(**response_dict)
+
+    async def list_entities(
+        self,
+        collection_id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedEntitiesResponse:
+        """List entities in a graph.
+
+        Args:
+            collection_id (str | UUID): Graph ID to list entities from
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedEntitiesResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/entities",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedEntitiesResponse(**response_dict)
+
+    async def get_entity(
+        self,
+        collection_id: str | UUID,
+        entity_id: str | UUID,
+    ) -> WrappedEntityResponse:
+        """Get entity information in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            entity_id (str | UUID): Entity ID to get from the graph
+
+        Returns:
+            WrappedEntityResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/entities/{str(entity_id)}",
+            version="v3",
+        )
+
+        return WrappedEntityResponse(**response_dict)
+
+    async def remove_entity(
+        self,
+        collection_id: str | UUID,
+        entity_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove an entity from a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            entity_id (str | UUID): Entity ID to remove from the graph
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        return await self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/entities/{str(entity_id)}",
+            version="v3",
+        )
+
+    async def list_relationships(
+        self,
+        collection_id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedRelationshipsResponse:
+        """List relationships in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedRelationshipsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/relationships",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedRelationshipsResponse(**response_dict)
+
+    async def get_relationship(
+        self,
+        collection_id: str | UUID,
+        relationship_id: str | UUID,
+    ) -> WrappedRelationshipResponse:
+        """Get relationship information in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            relationship_id (str | UUID): Relationship ID to get from the graph
+
+        Returns:
+            WrappedRelationshipResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/relationships/{str(relationship_id)}",
+            version="v3",
+        )
+
+        return WrappedRelationshipResponse(**response_dict)
+
+    async def remove_relationship(
+        self,
+        collection_id: str | UUID,
+        relationship_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a relationship from a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            relationship_id (str | UUID): Relationship ID to remove from the graph
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/relationships/{str(relationship_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def build(
+        self,
+        collection_id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: bool = True,
+    ) -> WrappedGenericMessageResponse:
+        """Build a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            settings (dict): Settings for the build
+            run_with_orchestration (bool, optional): Whether to run with orchestration. Defaults to True.
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        data: dict[str, Any] = {
+            "run_with_orchestration": run_with_orchestration,
+        }
+        if settings:
+            data["settings"] = settings
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/communities/build",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def list_communities(
+        self,
+        collection_id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCommunitiesResponse:
+        """List communities in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedCommunitiesResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/communities",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedCommunitiesResponse(**response_dict)
+
+    async def get_community(
+        self,
+        collection_id: str | UUID,
+        community_id: str | UUID,
+    ) -> WrappedCommunityResponse:
+        """Get community information in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            community_id (str | UUID): Community ID to get from the graph
+
+        Returns:
+            WrappedCommunityResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/communities/{str(community_id)}",
+            version="v3",
+        )
+
+        return WrappedCommunityResponse(**response_dict)
+
+    async def update_community(
+        self,
+        collection_id: str | UUID,
+        community_id: str | UUID,
+        name: Optional[str] = None,
+        summary: Optional[str] = None,
+        findings: Optional[_list[str]] = None,
+        rating: Optional[int] = None,
+        rating_explanation: Optional[str] = None,
+        level: Optional[int] = None,
+        attributes: Optional[dict] = None,
+    ) -> WrappedCommunityResponse:
+        """Update community information.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            community_id (str | UUID): Community ID to update
+            name (Optional[str]): Optional new name for the community
+            summary (Optional[str]): Optional new summary for the community
+            findings (Optional[list[str]]): Optional new findings for the community
+            rating (Optional[int]): Optional new rating for the community
+            rating_explanation (Optional[str]): Optional new rating explanation for the community
+            level (Optional[int]): Optional new level for the community
+            attributes (Optional[dict]): Optional new attributes for the community
+
+        Returns:
+            WrappedCommunityResponse
+        """
+        data: dict[str, Any] = {}
+        if name is not None:
+            data["name"] = name
+        if summary is not None:
+            data["summary"] = summary
+        if findings is not None:
+            data["findings"] = findings
+        if rating is not None:
+            data["rating"] = str(rating)
+        if rating_explanation is not None:
+            data["rating_explanation"] = rating_explanation
+        if level is not None:
+            data["level"] = level
+        if attributes is not None:
+            data["attributes"] = attributes
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/communities/{str(community_id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCommunityResponse(**response_dict)
+
+    async def delete_community(
+        self,
+        collection_id: str | UUID,
+        community_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a community from a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            community_id (str | UUID): Community ID to remove from the graph
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/communities/{str(community_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def pull(
+        self,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Adds documents to a graph by copying their entities and
+        relationships.
+
+        This endpoint:
+            1. Copies document entities to the graphs_entities table
+            2. Copies document relationships to the graphs_relationships table
+            3. Associates the documents with the graph
+
+        When a document is added:
+            - Its entities and relationships are copied to graph-specific tables
+            - Existing entities/relationships are updated by merging their properties
+            - The document ID is recorded in the graph's document_ids array
+
+        Documents added to a graph will contribute their knowledge to:
+            - Graph analysis and querying
+            - Community detection
+            - Knowledge graph enrichment
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/pull",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def remove_document(
+        self,
+        collection_id: str | UUID,
+        document_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Removes a document from a graph and removes any associated entities.
+
+        This endpoint:
+            1. Removes the document ID from the graph's document_ids array
+            2. Optionally deletes the document's copied entities and relationships
+
+        The user must have access to both the graph and the document being removed.
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/documents/{str(document_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def create_entity(
+        self,
+        collection_id: str | UUID,
+        name: str,
+        description: str,
+        category: Optional[str] = None,
+        metadata: Optional[dict] = None,
+    ) -> WrappedEntityResponse:
+        """Creates a new entity in the graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            name (str): The name of the entity to create
+            description (Optional[str]): The description of the entity
+            category (Optional[str]): The category of the entity
+            metadata (Optional[dict]): Additional metadata for the entity
+
+        Returns:
+            WrappedEntityResponse
+        """
+        data: dict[str, Any] = {
+            "name": name,
+            "description": description,
+        }
+        if category is not None:
+            data["category"] = category
+        if metadata is not None:
+            data["metadata"] = metadata
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/entities",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedEntityResponse(**response_dict)
+
+    async def create_relationship(
+        self,
+        collection_id: str | UUID,
+        subject: str,
+        subject_id: str | UUID,
+        predicate: str,
+        object: str,
+        object_id: str | UUID,
+        description: str,
+        weight: Optional[float] = None,
+        metadata: Optional[dict] = None,
+    ) -> WrappedRelationshipResponse:
+        """Creates a new relationship in the graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            subject (str): The subject of the relationship
+            subject_id (str | UUID): The ID of the subject entity
+            predicate (str): The predicate/type of the relationship
+            object (str): The object of the relationship
+            object_id (str | UUID): The ID of the object entity
+            description (Optional[str]): Description of the relationship
+            weight (Optional[float]): Weight/strength of the relationship
+            metadata (Optional[dict]): Additional metadata for the relationship
+
+        Returns:
+            WrappedRelationshipResponse
+        """
+        data: dict[str, Any] = {
+            "subject": subject,
+            "subject_id": str(subject_id),
+            "predicate": predicate,
+            "object": object,
+            "object_id": str(object_id),
+            "description": description,
+        }
+        if weight is not None:
+            data["weight"] = weight
+        if metadata is not None:
+            data["metadata"] = metadata
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/relationships",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedRelationshipResponse(**response_dict)
+
+    async def create_community(
+        self,
+        collection_id: str | UUID,
+        name: str,
+        summary: str,
+        findings: Optional[_list[str]] = None,
+        rating: Optional[float] = None,
+        rating_explanation: Optional[str] = None,
+    ) -> WrappedCommunityResponse:
+        """Creates a new community in the graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            name (str): The name of the community
+            summary (str): A summary description of the community
+            findings (Optional[list[str]]): List of findings about the community
+            rating (Optional[float]): Rating between 1 and 10
+            rating_explanation (Optional[str]): Explanation for the rating
+
+        Returns:
+            WrappedCommunityResponse
+        """
+        data: dict[str, Any] = {
+            "name": name,
+            "summary": summary,
+        }
+        if findings is not None:
+            data["findings"] = findings
+        if rating is not None:
+            data["rating"] = rating
+        if rating_explanation is not None:
+            data["rating_explanation"] = rating_explanation
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/communities",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCommunityResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/indices.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/indices.py
new file mode 100644
index 00000000..966023ed
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/indices.py
@@ -0,0 +1,116 @@
+import json
+from typing import Any, Optional
+
+from shared.api.models import (
+    WrappedGenericMessageResponse,
+    WrappedVectorIndexResponse,
+    WrappedVectorIndicesResponse,
+)
+
+
+class IndicesSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self,
+        config: dict,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Create a new vector similarity search index in the database.
+
+        Args:
+            config (dict | IndexConfig): Configuration for the vector index.
+            run_with_orchestration (Optional[bool]): Whether to run index creation as an orchestrated task.
+        """
+        if not isinstance(config, dict):
+            config = config.model_dump()
+
+        data: dict[str, Any] = {
+            "config": config,
+            "run_with_orchestration": run_with_orchestration,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "indices",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def list(
+        self,
+        filters: Optional[dict] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 10,
+    ) -> WrappedVectorIndicesResponse:
+        """List existing vector similarity search indices with pagination
+        support.
+
+        Args:
+            filters (Optional[dict]): Filter criteria for indices.
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedVectorIndicesResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if filters:
+            params["filters"] = json.dumps(filters)
+        response_dict = await self.client._make_request(
+            "GET",
+            "indices",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedVectorIndicesResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        index_name: str,
+        table_name: str = "vectors",
+    ) -> WrappedVectorIndexResponse:
+        """Get detailed information about a specific vector index.
+
+        Args:
+            index_name (str): The name of the index to retrieve.
+            table_name (str): The name of the table where the index is stored.
+
+        Returns:
+            WrappedGetIndexResponse: The response containing the index details.
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"indices/{table_name}/{index_name}",
+            version="v3",
+        )
+
+        return WrappedVectorIndexResponse(**response_dict)
+
+    async def delete(
+        self,
+        index_name: str,
+        table_name: str = "vectors",
+    ) -> WrappedGenericMessageResponse:
+        """Delete an existing vector index.
+
+        Args:
+            index_name (str): The name of the index to retrieve.
+            table_name (str): The name of the table where the index is stored.
+
+        Returns:
+            WrappedGetIndexResponse: The response containing the index details.
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"indices/{table_name}/{index_name}",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/prompts.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/prompts.py
new file mode 100644
index 00000000..c7cdb50a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/prompts.py
@@ -0,0 +1,128 @@
+import json
+from typing import Any, Optional
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedGenericMessageResponse,
+    WrappedPromptResponse,
+    WrappedPromptsResponse,
+)
+
+
+class PromptsSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self, name: str, template: str, input_types: dict
+    ) -> WrappedGenericMessageResponse:
+        """Create a new prompt.
+
+        Args:
+            name (str): The name of the prompt
+            template (str): The template string for the prompt
+            input_types (dict): A dictionary mapping input names to their types
+        Returns:
+            dict: Created prompt information
+        """
+        data: dict[str, Any] = {
+            "name": name,
+            "template": template,
+            "input_types": input_types,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "prompts",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def list(self) -> WrappedPromptsResponse:
+        """List all available prompts.
+
+        Returns:
+            dict: List of all available prompts
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            "prompts",
+            version="v3",
+        )
+
+        return WrappedPromptsResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        name: str,
+        inputs: Optional[dict] = None,
+        prompt_override: Optional[str] = None,
+    ) -> WrappedPromptResponse:
+        """Get a specific prompt by name, optionally with inputs and override.
+
+        Args:
+            name (str): The name of the prompt to retrieve
+            inputs (Optional[dict]): JSON-encoded inputs for the prompt
+            prompt_override (Optional[str]): An override for the prompt template
+        Returns:
+            dict: The requested prompt with applied inputs and/or override
+        """
+        params = {}
+        if inputs:
+            params["inputs"] = json.dumps(inputs)
+        if prompt_override:
+            params["prompt_override"] = prompt_override
+        response_dict = await self.client._make_request(
+            "POST",
+            f"prompts/{name}",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedPromptResponse(**response_dict)
+
+    async def update(
+        self,
+        name: str,
+        template: Optional[str] = None,
+        input_types: Optional[dict] = None,
+    ) -> WrappedGenericMessageResponse:
+        """Update an existing prompt's template and/or input types.
+
+        Args:
+            name (str): The name of the prompt to update
+            template (Optional[str]): The updated template string for the prompt
+            input_types (Optional[dict]): The updated dictionary mapping input names to their types
+        Returns:
+            dict: The updated prompt details
+        """
+        data: dict = {}
+        if template:
+            data["template"] = template
+        if input_types:
+            data["input_types"] = json.dumps(input_types)
+        response_dict = await self.client._make_request(
+            "PUT",
+            f"prompts/{name}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def delete(self, name: str) -> WrappedBooleanResponse:
+        """Delete a prompt by name.
+
+        Args:
+            name (str): The name of the prompt to delete
+        Returns:
+            bool: True if deletion was successful
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"prompts/{name}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/retrieval.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/retrieval.py
new file mode 100644
index 00000000..d825a91f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/retrieval.py
@@ -0,0 +1,394 @@
+from typing import Generator
+
+from shared.api.models import (
+    CitationEvent,
+    FinalAnswerEvent,
+    MessageEvent,
+    SearchResultsEvent,
+    ThinkingEvent,
+    ToolCallEvent,
+    ToolResultEvent,
+    UnknownEvent,
+    WrappedAgentResponse,
+    WrappedRAGResponse,
+    WrappedSearchResponse,
+)
+
+from ..models import (
+    Message,
+)
+from ..sync_methods.retrieval import parse_retrieval_event
+
+
+class RetrievalSDK:
+    """
+    SDK for interacting with documents in the v3 API (Asynchronous).
+    """
+
+    def __init__(self, client):
+        self.client = client
+
+    async def search(self, **kwargs) -> WrappedSearchResponse:
+        """
+        Conduct a vector and/or graph search (async).
+
+        Args:
+            query (str): Search query to find relevant documents.
+            search_mode (Optional[str | SearchMode]): Pre-configured search modes: "basic", "advanced", or "custom".
+            search_settings (Optional[dict | SearchSettings]): The search configuration object. If search_mode is "custom",
+                these settings are used as-is. For "basic" or "advanced", these settings
+                will override the default mode configuration.
+
+        Returns:
+            WrappedSearchResponse: The search results.
+        """
+        # Extract the required query parameter
+        query = kwargs.pop("query", None)
+        if query is None:
+            raise ValueError("'query' is a required parameter for search")
+
+        # Process common parameters
+        search_mode = kwargs.pop("search_mode", "custom")
+        search_settings = kwargs.pop("search_settings", None)
+
+        # Handle type conversions
+        if search_mode and not isinstance(search_mode, str):
+            search_mode = search_mode.value
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+
+        # Build payload
+        payload = {
+            "query": query,
+            "search_mode": search_mode,
+            "search_settings": search_settings,
+            **kwargs,  # Include any additional parameters
+        }
+
+        # Filter out None values
+        payload = {k: v for k, v in payload.items() if v is not None}
+
+        response_dict = await self.client._make_request(
+            "POST",
+            "retrieval/search",
+            json=payload,
+            version="v3",
+        )
+        return WrappedSearchResponse(**response_dict)
+
+    async def completion(self, **kwargs):
+        """
+        Get a completion from the model (async).
+
+        Args:
+            messages (list[dict | Message]): List of messages to generate completion for. Each message
+                should have a 'role' and 'content'.
+            generation_config (Optional[dict | GenerationConfig]): Configuration for text generation.
+
+        Returns:
+            The completion response.
+        """
+        # Extract required parameters
+        messages = kwargs.pop("messages", None)
+        if messages is None:
+            raise ValueError(
+                "'messages' is a required parameter for completion"
+            )
+
+        # Process optional parameters
+        generation_config = kwargs.pop("generation_config", None)
+
+        # Handle type conversions
+        cast_messages = [
+            Message(**msg) if isinstance(msg, dict) else msg
+            for msg in messages
+        ]
+        if generation_config and not isinstance(generation_config, dict):
+            generation_config = generation_config.model_dump()
+
+        # Build payload
+        payload = {
+            "messages": [msg.model_dump() for msg in cast_messages],
+            "generation_config": generation_config,
+            **kwargs,  # Include any additional parameters
+        }
+
+        # Filter out None values
+        payload = {k: v for k, v in payload.items() if v is not None}
+
+        return await self.client._make_request(
+            "POST",
+            "retrieval/completion",
+            json=payload,
+            version="v3",
+        )
+
+    async def embedding(self, **kwargs):
+        """
+        Generate an embedding for given text (async).
+
+        Args:
+            text (str): Text to generate embeddings for.
+
+        Returns:
+            The embedding vector.
+        """
+        # Extract required parameters
+        text = kwargs.pop("text", None)
+        if text is None:
+            raise ValueError("'text' is a required parameter for embedding")
+
+        # Build payload
+        payload = {"text": text, **kwargs}  # Include any additional parameters
+
+        return await self.client._make_request(
+            "POST",
+            "retrieval/embedding",
+            data=payload,
+            version="v3",
+        )
+
+    async def rag(
+        self, **kwargs
+    ) -> (
+        WrappedRAGResponse
+        | Generator[
+            ThinkingEvent
+            | SearchResultsEvent
+            | MessageEvent
+            | CitationEvent
+            | FinalAnswerEvent
+            | ToolCallEvent
+            | ToolResultEvent
+            | UnknownEvent
+            | None,
+            None,
+            None,
+        ]
+    ):
+        """
+        Conducts a Retrieval Augmented Generation (RAG) search (async).
+        May return a `WrappedRAGResponse` or a streaming generator if `stream=True`.
+
+        Args:
+            query (str): The search query.
+            rag_generation_config (Optional[dict | GenerationConfig]): Configuration for RAG generation.
+            search_mode (Optional[str | SearchMode]): Pre-configured search modes: "basic", "advanced", or "custom".
+            search_settings (Optional[dict | SearchSettings]): The search configuration object.
+            task_prompt (Optional[str]): Optional custom prompt to override default.
+            include_title_if_available (Optional[bool]): Include document titles in responses when available.
+            include_web_search (Optional[bool]): Include web search results provided to the LLM.
+
+        Returns:
+            Either a WrappedRAGResponse or an AsyncGenerator for streaming.
+        """
+        # Extract required parameters
+        query = kwargs.pop("query", None)
+        if query is None:
+            raise ValueError("'query' is a required parameter for rag")
+
+        # Process optional parameters
+        rag_generation_config = kwargs.pop("rag_generation_config", None)
+        search_mode = kwargs.pop("search_mode", "custom")
+        search_settings = kwargs.pop("search_settings", None)
+        task_prompt = kwargs.pop("task_prompt", None)
+        include_title_if_available = kwargs.pop(
+            "include_title_if_available", False
+        )
+        include_web_search = kwargs.pop("include_web_search", False)
+
+        # Handle type conversions
+        if rag_generation_config and not isinstance(
+            rag_generation_config, dict
+        ):
+            rag_generation_config = rag_generation_config.model_dump()
+        if search_mode and not isinstance(search_mode, str):
+            search_mode = search_mode.value
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+
+        # Build payload
+        payload = {
+            "query": query,
+            "rag_generation_config": rag_generation_config,
+            "search_mode": search_mode,
+            "search_settings": search_settings,
+            "task_prompt": task_prompt,
+            "include_title_if_available": include_title_if_available,
+            "include_web_search": include_web_search,
+            **kwargs,  # Include any additional parameters
+        }
+
+        # Filter out None values
+        payload = {k: v for k, v in payload.items() if v is not None}
+
+        # Check if streaming is enabled
+        is_stream = False
+        if rag_generation_config and rag_generation_config.get(
+            "stream", False
+        ):
+            is_stream = True
+
+        if is_stream:
+            # Return an async streaming generator
+            raw_stream = self.client._make_streaming_request(
+                "POST",
+                "retrieval/rag",
+                json=payload,
+                version="v3",
+            )
+            # Wrap each raw SSE event with parse_rag_event
+            return (parse_retrieval_event(event) for event in raw_stream)
+
+        # Otherwise, request fully and parse response
+        response_dict = await self.client._make_request(
+            "POST",
+            "retrieval/rag",
+            json=payload,
+            version="v3",
+        )
+        return WrappedRAGResponse(**response_dict)
+
+    async def agent(
+        self, **kwargs
+    ) -> (
+        WrappedAgentResponse
+        | Generator[
+            ThinkingEvent
+            | SearchResultsEvent
+            | MessageEvent
+            | CitationEvent
+            | FinalAnswerEvent
+            | ToolCallEvent
+            | ToolResultEvent
+            | UnknownEvent
+            | None,
+            None,
+            None,
+        ]
+    ):
+        """
+        Performs a single turn in a conversation with a RAG agent (async).
+        May return a `WrappedAgentResponse` or a streaming generator if `stream=True`.
+
+        Args:
+            message (Optional[dict | Message]): Current message to process.
+            messages (Optional[list[dict | Message]]): List of messages (deprecated, use message instead).
+            rag_generation_config (Optional[dict | GenerationConfig]): Configuration for RAG generation in 'rag' mode.
+            research_generation_config (Optional[dict | GenerationConfig]): Configuration for generation in 'research' mode.
+            search_mode (Optional[str | SearchMode]): Pre-configured search modes: "basic", "advanced", or "custom".
+            search_settings (Optional[dict | SearchSettings]): The search configuration object.
+            task_prompt (Optional[str]): Optional custom prompt to override default.
+            include_title_if_available (Optional[bool]): Include document titles from search results.
+            conversation_id (Optional[str | uuid.UUID]): ID of the conversation.
+            tools (Optional[list[str]]): List of tools to execute (deprecated).
+            rag_tools (Optional[list[str]]): List of tools to enable for RAG mode.
+            research_tools (Optional[list[str]]): List of tools to enable for Research mode.
+            max_tool_context_length (Optional[int]): Maximum length of returned tool context.
+            use_system_context (Optional[bool]): Use extended prompt for generation.
+            mode (Optional[Literal["rag", "research"]]): Mode to use for generation: 'rag' or 'research'.
+
+        Returns:
+            Either a WrappedAgentResponse or an AsyncGenerator for streaming.
+        """
+        # Extract parameters
+        message = kwargs.pop("message", None)
+        messages = kwargs.pop("messages", None)  # Deprecated
+        rag_generation_config = kwargs.pop("rag_generation_config", None)
+        research_generation_config = kwargs.pop(
+            "research_generation_config", None
+        )
+        search_mode = kwargs.pop("search_mode", "custom")
+        search_settings = kwargs.pop("search_settings", None)
+        task_prompt = kwargs.pop("task_prompt", None)
+        include_title_if_available = kwargs.pop(
+            "include_title_if_available", True
+        )
+        conversation_id = kwargs.pop("conversation_id", None)
+        tools = kwargs.pop("tools", None)  # Deprecated
+        rag_tools = kwargs.pop("rag_tools", None)
+        research_tools = kwargs.pop("research_tools", None)
+        max_tool_context_length = kwargs.pop("max_tool_context_length", 32768)
+        use_system_context = kwargs.pop("use_system_context", True)
+        mode = kwargs.pop("mode", "rag")
+
+        # Handle type conversions
+        if message and isinstance(message, dict):
+            message = Message(**message).model_dump()
+        elif message:
+            message = message.model_dump()
+
+        if rag_generation_config and not isinstance(
+            rag_generation_config, dict
+        ):
+            rag_generation_config = rag_generation_config.model_dump()
+
+        if research_generation_config and not isinstance(
+            research_generation_config, dict
+        ):
+            research_generation_config = (
+                research_generation_config.model_dump()
+            )
+
+        if search_mode and not isinstance(search_mode, str):
+            search_mode = search_mode.value
+
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+
+        # Build payload
+        payload = {
+            "message": message,
+            "messages": messages,  # Deprecated but included for backward compatibility
+            "rag_generation_config": rag_generation_config,
+            "research_generation_config": research_generation_config,
+            "search_mode": search_mode,
+            "search_settings": search_settings,
+            "task_prompt": task_prompt,
+            "include_title_if_available": include_title_if_available,
+            "conversation_id": (
+                str(conversation_id) if conversation_id else None
+            ),
+            "tools": tools,  # Deprecated but included for backward compatibility
+            "rag_tools": rag_tools,
+            "research_tools": research_tools,
+            "max_tool_context_length": max_tool_context_length,
+            "use_system_context": use_system_context,
+            "mode": mode,
+            **kwargs,  # Include any additional parameters
+        }
+
+        # Remove None values
+        payload = {k: v for k, v in payload.items() if v is not None}
+
+        # Check if streaming is enabled
+        is_stream = False
+        if rag_generation_config and rag_generation_config.get(
+            "stream", False
+        ):
+            is_stream = True
+        elif (
+            research_generation_config
+            and mode == "research"
+            and research_generation_config.get("stream", False)
+        ):
+            is_stream = True
+
+        if is_stream:
+            # Return an async streaming generator
+            raw_stream = self.client._make_streaming_request(
+                "POST",
+                "retrieval/agent",
+                json=payload,
+                version="v3",
+            )
+            # Parse each event in the stream
+            return (parse_retrieval_event(event) for event in raw_stream)
+
+        response_dict = await self.client._make_request(
+            "POST",
+            "retrieval/agent",
+            json=payload,
+            version="v3",
+        )
+        return WrappedAgentResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/system.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/system.py
new file mode 100644
index 00000000..6c57def0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/system.py
@@ -0,0 +1,43 @@
+from shared.api.models import (
+    WrappedGenericMessageResponse,
+    WrappedServerStatsResponse,
+    WrappedSettingsResponse,
+)
+
+
+class SystemSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def health(self) -> WrappedGenericMessageResponse:
+        """Check the health of the R2R server."""
+        response_dict = await self.client._make_request(
+            "GET", "health", version="v3"
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def settings(self) -> WrappedSettingsResponse:
+        """Get the configuration settings for the R2R server.
+
+        Returns:
+            dict: The server settings.
+        """
+        response_dict = await self.client._make_request(
+            "GET", "system/settings", version="v3"
+        )
+
+        return WrappedSettingsResponse(**response_dict)
+
+    async def status(self) -> WrappedServerStatsResponse:
+        """Get statistics about the server, including the start time, uptime,
+        CPU usage, and memory usage.
+
+        Returns:
+            dict: The server statistics.
+        """
+        response_dict = await self.client._make_request(
+            "GET", "system/status", version="v3"
+        )
+
+        return WrappedServerStatsResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/users.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/users.py
new file mode 100644
index 00000000..207c3cf4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/users.py
@@ -0,0 +1,589 @@
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedAPIKeyResponse,
+    WrappedAPIKeysResponse,
+    WrappedBooleanResponse,
+    WrappedCollectionsResponse,
+    WrappedGenericMessageResponse,
+    WrappedLimitsResponse,
+    WrappedLoginResponse,
+    WrappedTokenResponse,
+    WrappedUserResponse,
+    WrappedUsersResponse,
+)
+
+
+class UsersSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self,
+        email: str,
+        password: str,
+        name: Optional[str] = None,
+        bio: Optional[str] = None,
+        profile_picture: Optional[str] = None,
+    ) -> WrappedUserResponse:
+        """Register a new user.
+
+        Args:
+            email (str): User's email address
+            password (str): User's password
+            name (Optional[str]): The name for the new user
+            bio (Optional[str]): The bio for the new user
+            profile_picture (Optional[str]): New user profile picture
+
+        Returns:
+            UserResponse: New user information
+        """
+
+        data: dict = {"email": email, "password": password}
+
+        if name is not None:
+            data["name"] = name
+        if bio is not None:
+            data["bio"] = bio
+        if profile_picture is not None:
+            data["profile_picture"] = profile_picture
+
+        response_dict = await self.client._make_request(
+            "POST",
+            "users",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    async def send_verification_email(
+        self, email: str
+    ) -> WrappedGenericMessageResponse:
+        """Request that a verification email to a user."""
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/send-verification-email",
+            json=email,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def delete(
+        self, id: str | UUID, password: str
+    ) -> WrappedBooleanResponse:
+        """Delete a specific user. Users can only delete their own account
+        unless they are superusers.
+
+        Args:
+            id (str | UUID): User ID to delete
+            password (str): User's password
+
+        Returns:
+            dict: Deletion result
+        """
+        data: dict[str, Any] = {"password": password}
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"users/{str(id)}",
+            json=data,
+            version="v3",
+        )
+        self.client.access_token = None
+        self.client._refresh_token = None
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def verify_email(
+        self, email: str, verification_code: str
+    ) -> WrappedGenericMessageResponse:
+        """Verify a user's email address.
+
+        Args:
+            email (str): User's email address
+            verification_code (str): Verification code sent to the user's email
+
+        Returns:
+            dict: Verification result
+        """
+        data: dict[str, Any] = {
+            "email": email,
+            "verification_code": verification_code,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/verify-email",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def login(self, email: str, password: str) -> WrappedLoginResponse:
+        """Log in a user.
+
+        Args:
+            email (str): User's email address
+            password (str): User's password
+
+        Returns:
+            WrappedLoginResponse
+        """
+        if self.client.api_key:
+            raise ValueError(
+                "Cannot log in after setting an API key, please unset your R2R_API_KEY variable or call client.set_api_key(None)"
+            )
+        data: dict[str, Any] = {"username": email, "password": password}
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/login",
+            data=data,
+            version="v3",
+        )
+
+        login_response = WrappedLoginResponse(**response_dict)
+        self.client.access_token = login_response.results.access_token.token
+        self.client._refresh_token = login_response.results.refresh_token.token
+
+        user = await self.client._make_request(
+            "GET",
+            "users/me",
+            version="v3",
+        )
+
+        user_response = WrappedUserResponse(**user)
+        self.client._user_id = user_response.results.id
+
+        return login_response
+
+    async def logout(self) -> WrappedGenericMessageResponse | None:
+        """Log out the current user."""
+        if self.client.access_token:
+            response_dict = await self.client._make_request(
+                "POST",
+                "users/logout",
+                version="v3",
+            )
+            self.client.access_token = None
+            self.client._refresh_token = None
+
+            return WrappedGenericMessageResponse(**response_dict)
+
+        self.client.access_token = None
+        self.client._refresh_token = None
+        return None
+
+    async def refresh_token(self) -> WrappedTokenResponse:
+        """Refresh the access token using the refresh token."""
+        if self.client._refresh_token:
+            response_dict = await self.client._make_request(
+                "POST",
+                "users/refresh-token",
+                json=self.client._refresh_token,
+                version="v3",
+            )
+        self.client.access_token = response_dict["results"]["access_token"][
+            "token"
+        ]
+        self.client._refresh_token = response_dict["results"]["refresh_token"][
+            "token"
+        ]
+
+        return WrappedTokenResponse(**response_dict)
+
+    async def change_password(
+        self, current_password: str, new_password: str
+    ) -> WrappedGenericMessageResponse:
+        """Change the user's password.
+
+        Args:
+            current_password (str): User's current password
+            new_password (str): User's new password
+
+        Returns:
+            dict: Change password result
+        """
+        data: dict[str, Any] = {
+            "current_password": current_password,
+            "new_password": new_password,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/change-password",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def request_password_reset(
+        self, email: str
+    ) -> WrappedGenericMessageResponse:
+        """Request a password reset.
+
+        Args:
+            email (str): User's email address
+
+        Returns:
+            dict: Password reset request result
+        """
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/request-password-reset",
+            json=email,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def reset_password(
+        self, reset_token: str, new_password: str
+    ) -> WrappedGenericMessageResponse:
+        """Reset password using a reset token.
+
+        Args:
+            reset_token (str): Password reset token
+            new_password (str): New password
+
+        Returns:
+            dict: Password reset result
+        """
+        data: dict[str, Any] = {
+            "reset_token": reset_token,
+            "new_password": new_password,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/reset-password",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedUsersResponse:
+        """List users with pagination and filtering options.
+
+        Args:
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            dict: List of users and pagination information
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = [str(user_id) for user_id in ids]  # type: ignore
+
+        response_dict = await self.client._make_request(
+            "GET",
+            "users",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedUsersResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedUserResponse:
+        """Get a specific user.
+
+        Args:
+            id (str | UUID): User ID to retrieve
+
+        Returns:
+            dict: Detailed user information
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"users/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    async def me(
+        self,
+    ) -> WrappedUserResponse:
+        """Get detailed information about the currently authenticated user.
+
+        Returns:
+            dict: Detailed user information
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            "users/me",
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    async def update(
+        self,
+        id: str | UUID,
+        email: Optional[str] = None,
+        is_superuser: Optional[bool] = None,
+        name: Optional[str] = None,
+        bio: Optional[str] = None,
+        profile_picture: Optional[str] = None,
+        limits_overrides: dict | None = None,
+        metadata: dict[str, str | None] | None = None,
+    ) -> WrappedUserResponse:
+        """Update user information.
+
+        Args:
+            id (str | UUID): User ID to update
+            username (Optional[str]): New username
+            is_superuser (Optional[bool]): Update superuser status
+            name (Optional[str]): New name
+            bio (Optional[str]): New bio
+            profile_picture (Optional[str]): New profile picture
+
+        Returns:
+            dict: Updated user information
+        """
+        data: dict = {}
+        if email is not None:
+            data["email"] = email
+        if is_superuser is not None:
+            data["is_superuser"] = is_superuser
+        if name is not None:
+            data["name"] = name
+        if bio is not None:
+            data["bio"] = bio
+        if profile_picture is not None:
+            data["profile_picture"] = profile_picture
+        if limits_overrides is not None:
+            data["limits_overrides"] = limits_overrides
+        if metadata is not None:
+            data["metadata"] = metadata
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"users/{str(id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    async def list_collections(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCollectionsResponse:
+        """Get all collections associated with a specific user.
+
+        Args:
+            id (str | UUID): User ID to get collections for
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            dict: List of collections and pagination information
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"users/{str(id)}/collections",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedCollectionsResponse(**response_dict)
+
+    async def add_to_collection(
+        self,
+        id: str | UUID,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Add a user to a collection.
+
+        Args:
+            id (str | UUID): User ID to add
+            collection_id (str | UUID): Collection ID to add user to
+        """
+        response_dict = await self.client._make_request(
+            "POST",
+            f"users/{str(id)}/collections/{str(collection_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def remove_from_collection(
+        self,
+        id: str | UUID,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a user from a collection.
+
+        Args:
+            id (str | UUID): User ID to remove
+            collection_id (str | UUID): Collection ID to remove user from
+
+        Returns:
+            bool: True if successful
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"users/{str(id)}/collections/{str(collection_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def create_api_key(
+        self,
+        id: str | UUID,
+        name: Optional[str] = None,
+        description: Optional[str] = None,
+    ) -> WrappedAPIKeyResponse:
+        """Create a new API key for the specified user.
+
+        Args:
+            id (str | UUID): User ID to create API key for
+            name (Optional[str]): Name of the API key
+            description (Optional[str]): Description of the API key
+
+        Returns:
+            dict: { "message": "API key created successfully", "api_key": "key_id.raw_api_key" }
+        """
+        data: dict[str, Any] = {}
+        if name:
+            data["name"] = name
+        if description:
+            data["description"] = description
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"users/{str(id)}/api-keys",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedAPIKeyResponse(**response_dict)
+
+    async def list_api_keys(
+        self,
+        id: str | UUID,
+    ) -> WrappedAPIKeysResponse:
+        """List all API keys for the specified user.
+
+        Args:
+            id (str | UUID): User ID to list API keys for
+
+        Returns:
+            WrappedAPIKeysResponse
+        """
+        resp_dict = await self.client._make_request(
+            "GET",
+            f"users/{str(id)}/api-keys",
+            version="v3",
+        )
+
+        return WrappedAPIKeysResponse(**resp_dict)
+
+    async def delete_api_key(
+        self,
+        id: str | UUID,
+        key_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a specific API key for the specified user.
+
+        Args:
+            id (str | UUID): User ID
+            key_id (str | UUID): API key ID to delete
+
+        Returns:
+            dict: { "message": "API key deleted successfully" }
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"users/{str(id)}/api-keys/{str(key_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def get_limits(self) -> WrappedLimitsResponse:
+        response_dict = await self.client._make_request(
+            "GET",
+            f"users/{str(self.client._user_id)}/limits",
+            version="v3",
+        )
+
+        return WrappedLimitsResponse(**response_dict)
+
+    async def oauth_google_authorize(self) -> WrappedGenericMessageResponse:
+        """Get Google OAuth 2.0 authorization URL from the server.
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            "users/oauth/google/authorize",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def oauth_github_authorize(self) -> WrappedGenericMessageResponse:
+        """Get GitHub OAuth 2.0 authorization URL from the server.
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            "users/oauth/github/authorize",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def oauth_google_callback(
+        self, code: str, state: str
+    ) -> WrappedLoginResponse:
+        """Exchange `code` and `state` with the Google OAuth 2.0 callback
+        route."""
+        response_dict = await self.client._make_request(
+            "GET",
+            "users/oauth/google/callback",
+            params={"code": code, "state": state},
+            version="v3",
+        )
+
+        return WrappedLoginResponse(**response_dict)
+
+    async def oauth_github_callback(
+        self, code: str, state: str
+    ) -> WrappedLoginResponse:
+        """Exchange `code` and `state` with the GitHub OAuth 2.0 callback
+        route."""
+        response_dict = await self.client._make_request(
+            "GET",
+            "users/oauth/github/callback",
+            params={"code": code, "state": state},
+            version="v3",
+        )
+
+        return WrappedLoginResponse(**response_dict)