about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/sdk
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/sdk')
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/__init__.py4
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/__init__.py23
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/chunks.py186
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/collections.py343
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/conversations.py310
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/documents.py706
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/graphs.py614
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/indices.py116
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/prompts.py128
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/retrieval.py394
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/system.py43
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/asnyc_methods/users.py589
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/async_client.py135
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/base/__init_.py0
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/base/base_client.py52
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/models.py100
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_client.py165
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/__init__.py23
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/chunks.py186
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/collections.py343
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/conversations.py308
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/documents.py761
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/graphs.py616
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/indices.py119
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/prompts.py128
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/retrieval.py554
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/system.py43
-rw-r--r--.venv/lib/python3.12/site-packages/sdk/sync_methods/users.py587
28 files changed, 7576 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/sdk/__init__.py b/.venv/lib/python3.12/site-packages/sdk/__init__.py
new file mode 100644
index 00000000..092eba63
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/__init__.py
@@ -0,0 +1,4 @@
+from .async_client import R2RAsyncClient
+from .sync_client import R2RClient
+
+__all__ = ["R2RAsyncClient", "R2RClient"]
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/__init__.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/__init__.py
new file mode 100644
index 00000000..efa520d6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/__init__.py
@@ -0,0 +1,23 @@
+from .chunks import ChunksSDK
+from .collections import CollectionsSDK
+from .conversations import ConversationsSDK
+from .documents import DocumentsSDK
+from .graphs import GraphsSDK
+from .indices import IndicesSDK
+from .prompts import PromptsSDK
+from .retrieval import RetrievalSDK
+from .system import SystemSDK
+from .users import UsersSDK
+
+__all__ = [
+    "ChunksSDK",
+    "CollectionsSDK",
+    "ConversationsSDK",
+    "DocumentsSDK",
+    "GraphsSDK",
+    "IndicesSDK",
+    "PromptsSDK",
+    "RetrievalSDK",
+    "SystemSDK",
+    "UsersSDK",
+]
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/chunks.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/chunks.py
new file mode 100644
index 00000000..a64142d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/chunks.py
@@ -0,0 +1,186 @@
+import json
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedChunkResponse,
+    WrappedChunksResponse,
+    WrappedVectorSearchResponse,
+)
+
+from ..models import SearchSettings
+
+
+class ChunksSDK:
+    """SDK for interacting with chunks in the v3 API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    async def update(
+        self,
+        chunk: dict[str, str],
+    ) -> WrappedChunkResponse:
+        """Update an existing chunk.
+
+        Args:
+            chunk (dict[str, str]): Chunk to update. Should contain:
+                - id: UUID of the chunk
+                - metadata: Dictionary of metadata
+        Returns:
+            WrappedChunkResponse
+        """
+        response_dict = await self.client._make_request(
+            "POST",
+            f"chunks/{str(chunk['id'])}",
+            json=chunk,
+            version="v3",
+        )
+
+        return WrappedChunkResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedChunkResponse:
+        """Get a specific chunk.
+
+        Args:
+            id (str | UUID): Chunk ID to retrieve
+
+        Returns:
+            WrappedChunkResponse
+        """
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"chunks/{id}",
+            version="v3",
+        )
+
+        return WrappedChunkResponse(**response_dict)
+
+    # FIXME: Is this the most appropriate name for this method?
+    async def list_by_document(
+        self,
+        document_id: str | UUID,
+        metadata_filter: Optional[dict] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedChunksResponse:
+        """List chunks for a specific document.
+
+        Args:
+            document_id (str | UUID): Document ID to get chunks for
+            metadata_filter (Optional[dict]): Filter chunks by metadata
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedChunksResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if metadata_filter:
+            params["metadata_filter"] = json.dumps(metadata_filter)
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(document_id)}/chunks",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedChunksResponse(**response_dict)
+
+    async def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a specific chunk.
+
+        Args:
+            id (str | UUID): ID of chunk to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"chunks/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def list(
+        self,
+        include_vectors: bool = False,
+        metadata_filter: Optional[dict] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+        filters: Optional[dict] = None,
+    ) -> WrappedChunksResponse:
+        """List chunks with pagination support.
+
+        Args:
+            include_vectors (bool, optional): Include vector data in response. Defaults to False.
+            metadata_filter (Optional[dict], optional): Filter by metadata. Defaults to None.
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedChunksResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+            "include_vectors": include_vectors,
+        }
+        if filters:
+            params["filters"] = json.dumps(filters)
+
+        if metadata_filter:
+            params["metadata_filter"] = json.dumps(metadata_filter)
+
+        response_dict = await self.client._make_request(
+            "GET",
+            "chunks",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedChunksResponse(**response_dict)
+
+    async def search(
+        self,
+        query: str,
+        search_settings: Optional[dict | SearchSettings] = None,
+    ) -> WrappedVectorSearchResponse:
+        """Conduct a vector and/or graph search.
+
+        Args:
+            query (str): The query to search for.
+            search_settings (Optional[dict, SearchSettings]]): Vector search settings.
+
+        Returns:
+            WrappedVectorSearchResponse
+        """
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+
+        data: dict[str, Any] = {
+            "query": query,
+            "search_settings": search_settings,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "chunks/search",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedVectorSearchResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/collections.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/collections.py
new file mode 100644
index 00000000..a768b72e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/collections.py
@@ -0,0 +1,343 @@
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedCollectionResponse,
+    WrappedCollectionsResponse,
+    WrappedDocumentsResponse,
+    WrappedGenericMessageResponse,
+    WrappedUsersResponse,
+)
+
+
+class CollectionsSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self,
+        name: str,
+        description: Optional[str] = None,
+    ) -> WrappedCollectionResponse:
+        """Create a new collection.
+
+        Args:
+            name (str): Name of the collection
+            description (Optional[str]): Description of the collection
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        data: dict[str, Any] = {"name": name, "description": description}
+        response_dict = await self.client._make_request(
+            "POST",
+            "collections",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCollectionResponse(**response_dict)
+
+    async def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCollectionsResponse:
+        """List collections with pagination and filtering options.
+
+        Args:
+            ids (Optional[list[str | UUID]]): Filter collections by ids
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedCollectionsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = ids
+
+        response_dict = await self.client._make_request(
+            "GET", "collections", params=params, version="v3"
+        )
+
+        return WrappedCollectionsResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedCollectionResponse:
+        """Get detailed information about a specific collection.
+
+        Args:
+            id (str | UUID): Collection ID to retrieve
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET", f"collections/{str(id)}", version="v3"
+        )
+
+        return WrappedCollectionResponse(**response_dict)
+
+    async def update(
+        self,
+        id: str | UUID,
+        name: Optional[str] = None,
+        description: Optional[str] = None,
+        generate_description: Optional[bool] = False,
+    ) -> WrappedCollectionResponse:
+        """Update collection information.
+
+        Args:
+            id (str | UUID): Collection ID to update
+            name (Optional[str]): Optional new name for the collection
+            description (Optional[str]): Optional new description for the collection
+            generate_description (Optional[bool]): Whether to generate a new synthetic description for the collection.
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        data: dict[str, Any] = {}
+        if name is not None:
+            data["name"] = name
+        if description is not None:
+            data["description"] = description
+        if generate_description:
+            data["generate_description"] = str(generate_description)
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"collections/{str(id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCollectionResponse(**response_dict)
+
+    async def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a collection.
+
+        Args:
+            id (str | UUID): Collection ID to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE", f"collections/{str(id)}", version="v3"
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def list_documents(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedDocumentsResponse:
+        """List all documents in a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedDocumentsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"collections/{str(id)}/documents",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedDocumentsResponse(**response_dict)
+
+    async def add_document(
+        self,
+        id: str | UUID,
+        document_id: str | UUID,
+    ) -> WrappedGenericMessageResponse:
+        """Add a document to a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            document_id (str | UUID): Document ID to add
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        response_dict = await self.client._make_request(
+            "POST",
+            f"collections/{str(id)}/documents/{str(document_id)}",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def remove_document(
+        self,
+        id: str | UUID,
+        document_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a document from a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            document_id (str | UUID): Document ID to remove
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"collections/{str(id)}/documents/{str(document_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def list_users(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedUsersResponse:
+        """List all users in a collection.
+
+        Args:
+            id (str, UUID): Collection ID
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedUsersResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET", f"collections/{str(id)}/users", params=params, version="v3"
+        )
+
+        return WrappedUsersResponse(**response_dict)
+
+    async def add_user(
+        self,
+        id: str | UUID,
+        user_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Add a user to a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            user_id (str | UUID): User ID to add
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "POST", f"collections/{str(id)}/users/{str(user_id)}", version="v3"
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def remove_user(
+        self,
+        id: str | UUID,
+        user_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a user from a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            user_id (str | UUID): User ID to remove
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"collections/{str(id)}/users/{str(user_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def extract(
+        self,
+        id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Extract entities and relationships from documents in a collection.
+
+        Args:
+            id (str | UUID): Collection ID to extract from
+            settings (Optional[dict]): Settings for the entities and relationships extraction process
+            run_with_orchestration (Optional[bool]): Whether to run the extraction process with orchestration.
+                Defaults to True
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        params = {"run_with_orchestration": run_with_orchestration}
+
+        data: dict[str, Any] = {}
+        if settings is not None:
+            data["settings"] = settings
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"collections/{str(id)}/extract",
+            params=params,
+            json=data or None,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def retrieve_by_name(
+        self, name: str, owner_id: Optional[str] = None
+    ) -> WrappedCollectionResponse:
+        """Retrieve a collection by its name.
+
+        For non-superusers, the backend will use the authenticated user's ID.
+        For superusers, the caller must supply an owner_id to restrict the search.
+
+        Args:
+            name (str): The name of the collection to retrieve.
+            owner_id (Optional[str]): The owner ID to restrict the search. Required for superusers.
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        query_params: dict[str, Any] = {}
+        if owner_id is not None:
+            query_params["owner_id"] = owner_id
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"collections/name/{name}",
+            params=query_params,
+            version="v3",
+        )
+        return WrappedCollectionResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/conversations.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/conversations.py
new file mode 100644
index 00000000..885f9fc5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/conversations.py
@@ -0,0 +1,310 @@
+from builtins import list as _list
+from pathlib import Path
+from typing import Any, Optional
+from uuid import UUID
+
+import aiofiles
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedConversationMessagesResponse,
+    WrappedConversationResponse,
+    WrappedConversationsResponse,
+    WrappedMessageResponse,
+)
+
+
+class ConversationsSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self,
+        name: Optional[str] = None,
+    ) -> WrappedConversationResponse:
+        """Create a new conversation.
+
+        Returns:
+            WrappedConversationResponse
+        """
+        data: dict[str, Any] = {}
+        if name:
+            data["name"] = name
+
+        response_dict = await self.client._make_request(
+            "POST",
+            "conversations",
+            data=data,
+            version="v3",
+        )
+
+        return WrappedConversationResponse(**response_dict)
+
+    async def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedConversationsResponse:
+        """List conversations with pagination and sorting options.
+
+        Args:
+            ids (Optional[list[str | UUID]]): List of conversation IDs to retrieve
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedConversationsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = ids
+
+        response_dict = await self.client._make_request(
+            "GET",
+            "conversations",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedConversationsResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedConversationMessagesResponse:
+        """Get detailed information about a specific conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to retrieve
+
+        Returns:
+            WrappedConversationMessagesResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"conversations/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedConversationMessagesResponse(**response_dict)
+
+    async def update(
+        self,
+        id: str | UUID,
+        name: str,
+    ) -> WrappedConversationResponse:
+        """Update an existing conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to update
+            name (str): The new name of the conversation
+
+        Returns:
+            WrappedConversationResponse
+        """
+        data: dict[str, Any] = {
+            "name": name,
+        }
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"conversations/{str(id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedConversationResponse(**response_dict)
+
+    async def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"conversations/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def add_message(
+        self,
+        id: str | UUID,
+        content: str,
+        role: str,
+        metadata: Optional[dict] = None,
+        parent_id: Optional[str] = None,
+    ) -> WrappedMessageResponse:
+        """Add a new message to a conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to add the message to
+            content (str): The content of the message
+            role (str): The role of the message (e.g., "user" or "assistant")
+            parent_id (Optional[str]): The ID of the parent message
+            metadata (Optional[dict]): Additional metadata to attach to the message
+
+        Returns:
+            WrappedMessageResponse
+        """
+        data: dict[str, Any] = {
+            "content": content,
+            "role": role,
+        }
+        if parent_id:
+            data["parent_id"] = parent_id
+        if metadata:
+            data["metadata"] = metadata
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"conversations/{str(id)}/messages",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedMessageResponse(**response_dict)
+
+    async def update_message(
+        self,
+        id: str | UUID,
+        message_id: str,
+        content: Optional[str] = None,
+        metadata: Optional[dict] = None,
+    ) -> WrappedMessageResponse:
+        """Update an existing message in a conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation containing the message
+            message_id (str): The ID of the message to update
+            content (str): The new content of the message
+            metadata (dict): Additional metadata to attach to the message
+
+        Returns:
+            WrappedMessageResponse
+        """
+        data: dict[str, Any] = {"content": content}
+        if metadata:
+            data["metadata"] = metadata
+        response_dict = await self.client._make_request(
+            "POST",
+            f"conversations/{str(id)}/messages/{message_id}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedMessageResponse(**response_dict)
+
+    async def export(
+        self,
+        output_path: str | Path,
+        columns: Optional[_list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export conversations to a CSV file, streaming the results directly
+        to disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting conversations
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        async with aiofiles.open(output_path, "wb") as f:
+            async with self.client.session.post(
+                f"{self.client.base_url}/v3/conversations/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                async for chunk in response.content.iter_chunks():
+                    if chunk:
+                        await f.write(chunk[0])
+
+    async def export_messages(
+        self,
+        output_path: str | Path,
+        columns: Optional[_list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export messages to a CSV file, streaming the results directly to
+        disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting messages
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        async with aiofiles.open(output_path, "wb") as f:
+            async with self.client.session.post(
+                f"{self.client.base_url}/v3/conversations/export_messages",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                async for chunk in response.content.iter_chunks():
+                    if chunk:
+                        await f.write(chunk[0])
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/documents.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/documents.py
new file mode 100644
index 00000000..b8a254ee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/documents.py
@@ -0,0 +1,706 @@
+import json
+from datetime import datetime
+from io import BytesIO
+from pathlib import Path
+from typing import Any, Optional
+from uuid import UUID
+
+import aiofiles
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedChunksResponse,
+    WrappedCollectionsResponse,
+    WrappedDocumentResponse,
+    WrappedDocumentSearchResponse,
+    WrappedDocumentsResponse,
+    WrappedEntitiesResponse,
+    WrappedGenericMessageResponse,
+    WrappedIngestionResponse,
+    WrappedRelationshipsResponse,
+)
+
+from ..models import IngestionMode, SearchMode, SearchSettings
+
+
+class DocumentsSDK:
+    """SDK for interacting with documents in the v3 API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self,
+        file_path: Optional[str] = None,
+        raw_text: Optional[str] = None,
+        chunks: Optional[list[str]] = None,
+        id: Optional[str | UUID] = None,
+        ingestion_mode: Optional[str] = None,
+        collection_ids: Optional[list[str | UUID]] = None,
+        metadata: Optional[dict] = None,
+        ingestion_config: Optional[dict | IngestionMode] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedIngestionResponse:
+        """Create a new document from either a file or content.
+
+        Args:
+            file_path (Optional[str]): The file to upload, if any
+            content (Optional[str]): Optional text content to upload, if no file path is provided
+            id (Optional[str | UUID]): Optional ID to assign to the document
+            collection_ids (Optional[list[str | UUID]]): Collection IDs to associate with the document. If none are provided, the document will be assigned to the user's default collection.
+            metadata (Optional[dict]): Optional metadata to assign to the document
+            ingestion_config (Optional[dict]): Optional ingestion configuration to use
+            run_with_orchestration (Optional[bool]): Whether to run with orchestration
+
+        Returns:
+            WrappedIngestionResponse
+        """
+        if not file_path and not raw_text and not chunks:
+            raise ValueError(
+                "Either `file_path`, `raw_text` or `chunks` must be provided"
+            )
+        if (
+            (file_path and raw_text)
+            or (file_path and chunks)
+            or (raw_text and chunks)
+        ):
+            raise ValueError(
+                "Only one of `file_path`, `raw_text` or `chunks` may be provided"
+            )
+
+        data: dict[str, Any] = {}
+        files = None
+
+        if id:
+            data["id"] = str(id)
+        if metadata:
+            data["metadata"] = json.dumps(metadata)
+        if ingestion_config:
+            if isinstance(ingestion_config, IngestionMode):
+                ingestion_config = {"mode": ingestion_config.value}
+            app_config: dict[str, Any] = (
+                {}
+                if isinstance(ingestion_config, dict)
+                else ingestion_config["app"]
+            )
+            ingestion_config = dict(ingestion_config)
+            ingestion_config["app"] = app_config
+            data["ingestion_config"] = json.dumps(ingestion_config)
+        if collection_ids:
+            collection_ids = [
+                str(collection_id) for collection_id in collection_ids
+            ]  # type: ignore
+            data["collection_ids"] = json.dumps(collection_ids)
+        if run_with_orchestration is not None:
+            data["run_with_orchestration"] = str(run_with_orchestration)
+        if ingestion_mode is not None:
+            data["ingestion_mode"] = ingestion_mode
+        if file_path:
+            # Create a new file instance that will remain open during the request
+            file_instance = open(file_path, "rb")
+            files = [
+                (
+                    "file",
+                    (file_path, file_instance, "application/octet-stream"),
+                )
+            ]
+            try:
+                response_dict = await self.client._make_request(
+                    "POST",
+                    "documents",
+                    data=data,
+                    files=files,
+                    version="v3",
+                )
+            finally:
+                # Ensure we close the file after the request is complete
+                file_instance.close()
+        elif raw_text:
+            data["raw_text"] = raw_text  # type: ignore
+            response_dict = await self.client._make_request(
+                "POST",
+                "documents",
+                data=data,
+                version="v3",
+            )
+        else:
+            data["chunks"] = json.dumps(chunks)
+            response_dict = await self.client._make_request(
+                "POST",
+                "documents",
+                data=data,
+                version="v3",
+            )
+
+        return WrappedIngestionResponse(**response_dict)
+
+    async def append_metadata(
+        self,
+        id: str | UUID,
+        metadata: list[dict],
+    ) -> WrappedDocumentResponse:
+        """Append metadata to a document.
+
+        Args:
+            id (str | UUID): ID of document to append metadata to
+            metadata (list[dict]): Metadata to append
+
+        Returns:
+            WrappedDocumentResponse
+        """
+        data = json.dumps(metadata)
+        response_dict = await self.client._make_request(
+            "PATCH",
+            f"documents/{str(id)}/metadata",
+            data=data,
+            version="v3",
+        )
+
+        return WrappedDocumentResponse(**response_dict)
+
+    async def replace_metadata(
+        self,
+        id: str | UUID,
+        metadata: list[dict],
+    ) -> WrappedDocumentResponse:
+        """Replace metadata for a document.
+
+        Args:
+            id (str | UUID): ID of document to replace metadata for
+            metadata (list[dict]): The metadata that will replace the existing metadata
+        """
+        data = json.dumps(metadata)
+        response_dict = await self.client._make_request(
+            "PUT",
+            f"documents/{str(id)}/metadata",
+            data=data,
+            version="v3",
+        )
+
+        return WrappedDocumentResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedDocumentResponse:
+        """Get a specific document by ID.
+
+        Args:
+            id (str | UUID): ID of document to retrieve
+
+        Returns:
+            WrappedDocumentResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedDocumentResponse(**response_dict)
+
+    async def download(
+        self,
+        id: str | UUID,
+    ) -> BytesIO:
+        response = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/download",
+            version="v3",
+        )
+        if not isinstance(response, BytesIO):
+            raise ValueError("Expected BytesIO response")
+        return response
+
+    async def download_zip(
+        self,
+        document_ids: Optional[list[str | UUID]] = None,
+        start_date: Optional[datetime] = None,
+        end_date: Optional[datetime] = None,
+        output_path: Optional[str | Path] = None,
+    ) -> BytesIO | None:
+        """Download multiple documents as a zip file."""
+        params: dict[str, Any] = {}
+        if document_ids:
+            params["document_ids"] = [str(doc_id) for doc_id in document_ids]
+        if start_date:
+            params["start_date"] = start_date.isoformat()
+        if end_date:
+            params["end_date"] = end_date.isoformat()
+
+        response = await self.client._make_request(
+            "GET",
+            "documents/download_zip",
+            params=params,
+            version="v3",
+        )
+
+        if not isinstance(response, BytesIO):
+            raise ValueError("Expected BytesIO response")
+
+        if output_path:
+            output_path = (
+                Path(output_path)
+                if isinstance(output_path, str)
+                else output_path
+            )
+            async with aiofiles.open(output_path, "wb") as f:
+                await f.write(response.getvalue())
+            return None
+
+        return response
+
+    async def export(
+        self,
+        output_path: str | Path,
+        columns: Optional[list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export documents to a CSV file, streaming the results directly to
+        disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting documents
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        async with aiofiles.open(output_path, "wb") as f:
+            async with self.client.session.post(
+                f"{self.client.base_url}/v3/documents/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                async for chunk in response.content.iter_chunks():
+                    if chunk:
+                        await f.write(chunk[0])
+
+    async def export_entities(
+        self,
+        id: str | UUID,
+        output_path: str | Path,
+        columns: Optional[list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export documents to a CSV file, streaming the results directly to
+        disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting documents
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        async with aiofiles.open(output_path, "wb") as f:
+            async with self.client.session.post(
+                f"{self.client.base_url}/v3/documents/{str(id)}/entities/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                async for chunk in response.content.iter_chunks():
+                    if chunk:
+                        await f.write(chunk[0])
+
+    async def export_relationships(
+        self,
+        id: str | UUID,
+        output_path: str | Path,
+        columns: Optional[list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export document relationships to a CSV file, streaming the results
+        directly to disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting documents
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        async with aiofiles.open(output_path, "wb") as f:
+            async with self.client.session.post(
+                f"{self.client.base_url}/v3/documents/{str(id)}/relationships/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                async for chunk in response.content.iter_chunks():
+                    if chunk:
+                        await f.write(chunk[0])
+
+    async def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a specific document.
+
+        Args:
+            id (str | UUID): ID of document to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"documents/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def list_chunks(
+        self,
+        id: str | UUID,
+        include_vectors: Optional[bool] = False,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedChunksResponse:
+        """Get chunks for a specific document.
+
+        Args:
+            id (str | UUID): ID of document to retrieve chunks for
+            include_vectors (Optional[bool]): Whether to include vector embeddings in the response
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedChunksResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+            "include_vectors": include_vectors,
+        }
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/chunks",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedChunksResponse(**response_dict)
+
+    async def list_collections(
+        self,
+        id: str | UUID,
+        include_vectors: Optional[bool] = False,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCollectionsResponse:
+        """List collections for a specific document.
+
+        Args:
+            id (str | UUID): ID of document to retrieve collections for
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedCollectionsResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/collections",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedCollectionsResponse(**response_dict)
+
+    async def delete_by_filter(
+        self,
+        filters: dict,
+    ) -> WrappedBooleanResponse:
+        """Delete documents based on filters.
+
+        Args:
+            filters (dict): Filters to apply when selecting documents to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        filters_json = json.dumps(filters)
+        response_dict = await self.client._make_request(
+            "DELETE",
+            "documents/by-filter",
+            data=filters_json,
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def extract(
+        self,
+        id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Extract entities and relationships from a document.
+
+        Args:
+            id (str, UUID): ID of document to extract from
+            settings (Optional[dict]): Settings for extraction process
+            run_with_orchestration (Optional[bool]): Whether to run with orchestration
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        data: dict[str, Any] = {}
+        if settings:
+            data["settings"] = json.dumps(settings)
+        if run_with_orchestration is not None:
+            data["run_with_orchestration"] = str(run_with_orchestration)
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"documents/{str(id)}/extract",
+            params=data,
+            version="v3",
+        )
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def list_entities(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+        include_embeddings: Optional[bool] = False,
+    ) -> WrappedEntitiesResponse:
+        """List entities extracted from a document.
+
+        Args:
+            id (str | UUID): ID of document to get entities from
+            offset (Optional[int]): Number of items to skip
+            limit (Optional[int]): Max number of items to return
+            include_embeddings (Optional[bool]): Whether to include embeddings
+
+        Returns:
+            WrappedEntitiesResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+            "include_embeddings": include_embeddings,
+        }
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/entities",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedEntitiesResponse(**response_dict)
+
+    async def list_relationships(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+        entity_names: Optional[list[str]] = None,
+        relationship_types: Optional[list[str]] = None,
+    ) -> WrappedRelationshipsResponse:
+        """List relationships extracted from a document.
+
+        Args:
+            id (str | UUID): ID of document to get relationships from
+            offset (Optional[int]): Number of items to skip
+            limit (Optional[int]): Max number of items to return
+            entity_names (Optional[list[str]]): Filter by entity names
+            relationship_types (Optional[list[str]]): Filter by relationship types
+
+        Returns:
+            WrappedRelationshipsResponse
+        """
+        params: dict[str, Any] = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if entity_names:
+            params["entity_names"] = entity_names
+        if relationship_types:
+            params["relationship_types"] = relationship_types
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/relationships",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedRelationshipsResponse(**response_dict)
+
+    async def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedDocumentsResponse:
+        """List documents with pagination.
+
+        Args:
+            ids (Optional[list[str | UUID]]): Optional list of document IDs to filter by
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedDocumentsResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = [str(doc_id) for doc_id in ids]  # type: ignore
+
+        response_dict = await self.client._make_request(
+            "GET",
+            "documents",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedDocumentsResponse(**response_dict)
+
+    async def search(
+        self,
+        query: str,
+        search_mode: Optional[str | SearchMode] = "custom",
+        search_settings: Optional[dict | SearchSettings] = None,
+    ) -> WrappedDocumentSearchResponse:
+        """Conduct a vector and/or graph search.
+
+        Args:
+            query (str): The query to search for.
+            search_settings (Optional[dict, SearchSettings]]): Vector search settings.
+
+        Returns:
+            WrappedDocumentSearchResponse
+        """
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+        data: dict[str, Any] = {
+            "query": query,
+            "search_settings": search_settings,
+        }
+        if search_mode:
+            data["search_mode"] = search_mode
+
+        response_dict = await self.client._make_request(
+            "POST",
+            "documents/search",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedDocumentSearchResponse(**response_dict)
+
+    async def deduplicate(
+        self,
+        id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Deduplicate entities and relationships from a document.
+
+        Args:
+            id (str, UUID): ID of document to extract from
+            settings (Optional[dict]): Settings for extraction process
+            run_with_orchestration (Optional[bool]): Whether to run with orchestration
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        data: dict[str, Any] = {}
+        if settings:
+            data["settings"] = json.dumps(settings)
+        if run_with_orchestration is not None:
+            data["run_with_orchestration"] = str(run_with_orchestration)
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"documents/{str(id)}/deduplicate",
+            params=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/graphs.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/graphs.py
new file mode 100644
index 00000000..676aceaa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/graphs.py
@@ -0,0 +1,614 @@
+from builtins import list as _list
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedCommunitiesResponse,
+    WrappedCommunityResponse,
+    WrappedEntitiesResponse,
+    WrappedEntityResponse,
+    WrappedGenericMessageResponse,
+    WrappedGraphResponse,
+    WrappedGraphsResponse,
+    WrappedRelationshipResponse,
+    WrappedRelationshipsResponse,
+)
+
+
+class GraphsSDK:
+    """SDK for interacting with knowledge graphs in the v3 API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    async def list(
+        self,
+        collection_ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedGraphsResponse:
+        """List graphs with pagination and filtering options.
+
+        Args:
+            ids (Optional[list[str | UUID]]): Filter graphs by ids
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedGraphsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if collection_ids:
+            params["collection_ids"] = collection_ids
+
+        response_dict = await self.client._make_request(
+            "GET", "graphs", params=params, version="v3"
+        )
+
+        return WrappedGraphsResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        collection_id: str | UUID,
+    ) -> WrappedGraphResponse:
+        """Get detailed information about a specific graph.
+
+        Args:
+            collection_id (str | UUID): Graph ID to retrieve
+
+        Returns:
+            WrappedGraphResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET", f"graphs/{str(collection_id)}", version="v3"
+        )
+
+        return WrappedGraphResponse(**response_dict)
+
+    async def reset(
+        self,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Deletes a graph and all its associated data.
+
+        This endpoint permanently removes the specified graph along with all
+        entities and relationships that belong to only this graph.
+
+        Entities and relationships extracted from documents are not deleted.
+
+        Args:
+            collection_id (str | UUID): Graph ID to reset
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "POST", f"graphs/{str(collection_id)}/reset", version="v3"
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def update(
+        self,
+        collection_id: str | UUID,
+        name: Optional[str] = None,
+        description: Optional[str] = None,
+    ) -> WrappedGraphResponse:
+        """Update graph information.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            name (Optional[str]): Optional new name for the graph
+            description (Optional[str]): Optional new description for the graph
+
+        Returns:
+            WrappedGraphResponse
+        """
+        data: dict[str, Any] = {}
+        if name is not None:
+            data["name"] = name
+        if description is not None:
+            data["description"] = description
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGraphResponse(**response_dict)
+
+    async def list_entities(
+        self,
+        collection_id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedEntitiesResponse:
+        """List entities in a graph.
+
+        Args:
+            collection_id (str | UUID): Graph ID to list entities from
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedEntitiesResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/entities",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedEntitiesResponse(**response_dict)
+
+    async def get_entity(
+        self,
+        collection_id: str | UUID,
+        entity_id: str | UUID,
+    ) -> WrappedEntityResponse:
+        """Get entity information in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            entity_id (str | UUID): Entity ID to get from the graph
+
+        Returns:
+            WrappedEntityResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/entities/{str(entity_id)}",
+            version="v3",
+        )
+
+        return WrappedEntityResponse(**response_dict)
+
+    async def remove_entity(
+        self,
+        collection_id: str | UUID,
+        entity_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove an entity from a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            entity_id (str | UUID): Entity ID to remove from the graph
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        return await self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/entities/{str(entity_id)}",
+            version="v3",
+        )
+
+    async def list_relationships(
+        self,
+        collection_id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedRelationshipsResponse:
+        """List relationships in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedRelationshipsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/relationships",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedRelationshipsResponse(**response_dict)
+
+    async def get_relationship(
+        self,
+        collection_id: str | UUID,
+        relationship_id: str | UUID,
+    ) -> WrappedRelationshipResponse:
+        """Get relationship information in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            relationship_id (str | UUID): Relationship ID to get from the graph
+
+        Returns:
+            WrappedRelationshipResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/relationships/{str(relationship_id)}",
+            version="v3",
+        )
+
+        return WrappedRelationshipResponse(**response_dict)
+
+    async def remove_relationship(
+        self,
+        collection_id: str | UUID,
+        relationship_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a relationship from a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            relationship_id (str | UUID): Relationship ID to remove from the graph
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/relationships/{str(relationship_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def build(
+        self,
+        collection_id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: bool = True,
+    ) -> WrappedGenericMessageResponse:
+        """Build a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            settings (dict): Settings for the build
+            run_with_orchestration (bool, optional): Whether to run with orchestration. Defaults to True.
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        data: dict[str, Any] = {
+            "run_with_orchestration": run_with_orchestration,
+        }
+        if settings:
+            data["settings"] = settings
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/communities/build",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def list_communities(
+        self,
+        collection_id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCommunitiesResponse:
+        """List communities in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedCommunitiesResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/communities",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedCommunitiesResponse(**response_dict)
+
+    async def get_community(
+        self,
+        collection_id: str | UUID,
+        community_id: str | UUID,
+    ) -> WrappedCommunityResponse:
+        """Get community information in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            community_id (str | UUID): Community ID to get from the graph
+
+        Returns:
+            WrappedCommunityResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/communities/{str(community_id)}",
+            version="v3",
+        )
+
+        return WrappedCommunityResponse(**response_dict)
+
+    async def update_community(
+        self,
+        collection_id: str | UUID,
+        community_id: str | UUID,
+        name: Optional[str] = None,
+        summary: Optional[str] = None,
+        findings: Optional[_list[str]] = None,
+        rating: Optional[int] = None,
+        rating_explanation: Optional[str] = None,
+        level: Optional[int] = None,
+        attributes: Optional[dict] = None,
+    ) -> WrappedCommunityResponse:
+        """Update community information.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            community_id (str | UUID): Community ID to update
+            name (Optional[str]): Optional new name for the community
+            summary (Optional[str]): Optional new summary for the community
+            findings (Optional[list[str]]): Optional new findings for the community
+            rating (Optional[int]): Optional new rating for the community
+            rating_explanation (Optional[str]): Optional new rating explanation for the community
+            level (Optional[int]): Optional new level for the community
+            attributes (Optional[dict]): Optional new attributes for the community
+
+        Returns:
+            WrappedCommunityResponse
+        """
+        data: dict[str, Any] = {}
+        if name is not None:
+            data["name"] = name
+        if summary is not None:
+            data["summary"] = summary
+        if findings is not None:
+            data["findings"] = findings
+        if rating is not None:
+            data["rating"] = str(rating)
+        if rating_explanation is not None:
+            data["rating_explanation"] = rating_explanation
+        if level is not None:
+            data["level"] = level
+        if attributes is not None:
+            data["attributes"] = attributes
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/communities/{str(community_id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCommunityResponse(**response_dict)
+
+    async def delete_community(
+        self,
+        collection_id: str | UUID,
+        community_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a community from a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            community_id (str | UUID): Community ID to remove from the graph
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/communities/{str(community_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def pull(
+        self,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Adds documents to a graph by copying their entities and
+        relationships.
+
+        This endpoint:
+            1. Copies document entities to the graphs_entities table
+            2. Copies document relationships to the graphs_relationships table
+            3. Associates the documents with the graph
+
+        When a document is added:
+            - Its entities and relationships are copied to graph-specific tables
+            - Existing entities/relationships are updated by merging their properties
+            - The document ID is recorded in the graph's document_ids array
+
+        Documents added to a graph will contribute their knowledge to:
+            - Graph analysis and querying
+            - Community detection
+            - Knowledge graph enrichment
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/pull",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def remove_document(
+        self,
+        collection_id: str | UUID,
+        document_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Removes a document from a graph and removes any associated entities.
+
+        This endpoint:
+            1. Removes the document ID from the graph's document_ids array
+            2. Optionally deletes the document's copied entities and relationships
+
+        The user must have access to both the graph and the document being removed.
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/documents/{str(document_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def create_entity(
+        self,
+        collection_id: str | UUID,
+        name: str,
+        description: str,
+        category: Optional[str] = None,
+        metadata: Optional[dict] = None,
+    ) -> WrappedEntityResponse:
+        """Creates a new entity in the graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            name (str): The name of the entity to create
+            description (Optional[str]): The description of the entity
+            category (Optional[str]): The category of the entity
+            metadata (Optional[dict]): Additional metadata for the entity
+
+        Returns:
+            WrappedEntityResponse
+        """
+        data: dict[str, Any] = {
+            "name": name,
+            "description": description,
+        }
+        if category is not None:
+            data["category"] = category
+        if metadata is not None:
+            data["metadata"] = metadata
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/entities",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedEntityResponse(**response_dict)
+
+    async def create_relationship(
+        self,
+        collection_id: str | UUID,
+        subject: str,
+        subject_id: str | UUID,
+        predicate: str,
+        object: str,
+        object_id: str | UUID,
+        description: str,
+        weight: Optional[float] = None,
+        metadata: Optional[dict] = None,
+    ) -> WrappedRelationshipResponse:
+        """Creates a new relationship in the graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            subject (str): The subject of the relationship
+            subject_id (str | UUID): The ID of the subject entity
+            predicate (str): The predicate/type of the relationship
+            object (str): The object of the relationship
+            object_id (str | UUID): The ID of the object entity
+            description (Optional[str]): Description of the relationship
+            weight (Optional[float]): Weight/strength of the relationship
+            metadata (Optional[dict]): Additional metadata for the relationship
+
+        Returns:
+            WrappedRelationshipResponse
+        """
+        data: dict[str, Any] = {
+            "subject": subject,
+            "subject_id": str(subject_id),
+            "predicate": predicate,
+            "object": object,
+            "object_id": str(object_id),
+            "description": description,
+        }
+        if weight is not None:
+            data["weight"] = weight
+        if metadata is not None:
+            data["metadata"] = metadata
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/relationships",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedRelationshipResponse(**response_dict)
+
+    async def create_community(
+        self,
+        collection_id: str | UUID,
+        name: str,
+        summary: str,
+        findings: Optional[_list[str]] = None,
+        rating: Optional[float] = None,
+        rating_explanation: Optional[str] = None,
+    ) -> WrappedCommunityResponse:
+        """Creates a new community in the graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            name (str): The name of the community
+            summary (str): A summary description of the community
+            findings (Optional[list[str]]): List of findings about the community
+            rating (Optional[float]): Rating between 1 and 10
+            rating_explanation (Optional[str]): Explanation for the rating
+
+        Returns:
+            WrappedCommunityResponse
+        """
+        data: dict[str, Any] = {
+            "name": name,
+            "summary": summary,
+        }
+        if findings is not None:
+            data["findings"] = findings
+        if rating is not None:
+            data["rating"] = rating
+        if rating_explanation is not None:
+            data["rating_explanation"] = rating_explanation
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/communities",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCommunityResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/indices.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/indices.py
new file mode 100644
index 00000000..966023ed
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/indices.py
@@ -0,0 +1,116 @@
+import json
+from typing import Any, Optional
+
+from shared.api.models import (
+    WrappedGenericMessageResponse,
+    WrappedVectorIndexResponse,
+    WrappedVectorIndicesResponse,
+)
+
+
+class IndicesSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self,
+        config: dict,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Create a new vector similarity search index in the database.
+
+        Args:
+            config (dict | IndexConfig): Configuration for the vector index.
+            run_with_orchestration (Optional[bool]): Whether to run index creation as an orchestrated task.
+        """
+        if not isinstance(config, dict):
+            config = config.model_dump()
+
+        data: dict[str, Any] = {
+            "config": config,
+            "run_with_orchestration": run_with_orchestration,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "indices",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def list(
+        self,
+        filters: Optional[dict] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 10,
+    ) -> WrappedVectorIndicesResponse:
+        """List existing vector similarity search indices with pagination
+        support.
+
+        Args:
+            filters (Optional[dict]): Filter criteria for indices.
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedVectorIndicesResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if filters:
+            params["filters"] = json.dumps(filters)
+        response_dict = await self.client._make_request(
+            "GET",
+            "indices",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedVectorIndicesResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        index_name: str,
+        table_name: str = "vectors",
+    ) -> WrappedVectorIndexResponse:
+        """Get detailed information about a specific vector index.
+
+        Args:
+            index_name (str): The name of the index to retrieve.
+            table_name (str): The name of the table where the index is stored.
+
+        Returns:
+            WrappedGetIndexResponse: The response containing the index details.
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"indices/{table_name}/{index_name}",
+            version="v3",
+        )
+
+        return WrappedVectorIndexResponse(**response_dict)
+
+    async def delete(
+        self,
+        index_name: str,
+        table_name: str = "vectors",
+    ) -> WrappedGenericMessageResponse:
+        """Delete an existing vector index.
+
+        Args:
+            index_name (str): The name of the index to retrieve.
+            table_name (str): The name of the table where the index is stored.
+
+        Returns:
+            WrappedGetIndexResponse: The response containing the index details.
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"indices/{table_name}/{index_name}",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/prompts.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/prompts.py
new file mode 100644
index 00000000..c7cdb50a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/prompts.py
@@ -0,0 +1,128 @@
+import json
+from typing import Any, Optional
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedGenericMessageResponse,
+    WrappedPromptResponse,
+    WrappedPromptsResponse,
+)
+
+
+class PromptsSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self, name: str, template: str, input_types: dict
+    ) -> WrappedGenericMessageResponse:
+        """Create a new prompt.
+
+        Args:
+            name (str): The name of the prompt
+            template (str): The template string for the prompt
+            input_types (dict): A dictionary mapping input names to their types
+        Returns:
+            dict: Created prompt information
+        """
+        data: dict[str, Any] = {
+            "name": name,
+            "template": template,
+            "input_types": input_types,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "prompts",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def list(self) -> WrappedPromptsResponse:
+        """List all available prompts.
+
+        Returns:
+            dict: List of all available prompts
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            "prompts",
+            version="v3",
+        )
+
+        return WrappedPromptsResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        name: str,
+        inputs: Optional[dict] = None,
+        prompt_override: Optional[str] = None,
+    ) -> WrappedPromptResponse:
+        """Get a specific prompt by name, optionally with inputs and override.
+
+        Args:
+            name (str): The name of the prompt to retrieve
+            inputs (Optional[dict]): JSON-encoded inputs for the prompt
+            prompt_override (Optional[str]): An override for the prompt template
+        Returns:
+            dict: The requested prompt with applied inputs and/or override
+        """
+        params = {}
+        if inputs:
+            params["inputs"] = json.dumps(inputs)
+        if prompt_override:
+            params["prompt_override"] = prompt_override
+        response_dict = await self.client._make_request(
+            "POST",
+            f"prompts/{name}",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedPromptResponse(**response_dict)
+
+    async def update(
+        self,
+        name: str,
+        template: Optional[str] = None,
+        input_types: Optional[dict] = None,
+    ) -> WrappedGenericMessageResponse:
+        """Update an existing prompt's template and/or input types.
+
+        Args:
+            name (str): The name of the prompt to update
+            template (Optional[str]): The updated template string for the prompt
+            input_types (Optional[dict]): The updated dictionary mapping input names to their types
+        Returns:
+            dict: The updated prompt details
+        """
+        data: dict = {}
+        if template:
+            data["template"] = template
+        if input_types:
+            data["input_types"] = json.dumps(input_types)
+        response_dict = await self.client._make_request(
+            "PUT",
+            f"prompts/{name}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def delete(self, name: str) -> WrappedBooleanResponse:
+        """Delete a prompt by name.
+
+        Args:
+            name (str): The name of the prompt to delete
+        Returns:
+            bool: True if deletion was successful
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"prompts/{name}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/retrieval.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/retrieval.py
new file mode 100644
index 00000000..d825a91f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/retrieval.py
@@ -0,0 +1,394 @@
+from typing import Generator
+
+from shared.api.models import (
+    CitationEvent,
+    FinalAnswerEvent,
+    MessageEvent,
+    SearchResultsEvent,
+    ThinkingEvent,
+    ToolCallEvent,
+    ToolResultEvent,
+    UnknownEvent,
+    WrappedAgentResponse,
+    WrappedRAGResponse,
+    WrappedSearchResponse,
+)
+
+from ..models import (
+    Message,
+)
+from ..sync_methods.retrieval import parse_retrieval_event
+
+
+class RetrievalSDK:
+    """
+    SDK for interacting with documents in the v3 API (Asynchronous).
+    """
+
+    def __init__(self, client):
+        self.client = client
+
+    async def search(self, **kwargs) -> WrappedSearchResponse:
+        """
+        Conduct a vector and/or graph search (async).
+
+        Args:
+            query (str): Search query to find relevant documents.
+            search_mode (Optional[str | SearchMode]): Pre-configured search modes: "basic", "advanced", or "custom".
+            search_settings (Optional[dict | SearchSettings]): The search configuration object. If search_mode is "custom",
+                these settings are used as-is. For "basic" or "advanced", these settings
+                will override the default mode configuration.
+
+        Returns:
+            WrappedSearchResponse: The search results.
+        """
+        # Extract the required query parameter
+        query = kwargs.pop("query", None)
+        if query is None:
+            raise ValueError("'query' is a required parameter for search")
+
+        # Process common parameters
+        search_mode = kwargs.pop("search_mode", "custom")
+        search_settings = kwargs.pop("search_settings", None)
+
+        # Handle type conversions
+        if search_mode and not isinstance(search_mode, str):
+            search_mode = search_mode.value
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+
+        # Build payload
+        payload = {
+            "query": query,
+            "search_mode": search_mode,
+            "search_settings": search_settings,
+            **kwargs,  # Include any additional parameters
+        }
+
+        # Filter out None values
+        payload = {k: v for k, v in payload.items() if v is not None}
+
+        response_dict = await self.client._make_request(
+            "POST",
+            "retrieval/search",
+            json=payload,
+            version="v3",
+        )
+        return WrappedSearchResponse(**response_dict)
+
+    async def completion(self, **kwargs):
+        """
+        Get a completion from the model (async).
+
+        Args:
+            messages (list[dict | Message]): List of messages to generate completion for. Each message
+                should have a 'role' and 'content'.
+            generation_config (Optional[dict | GenerationConfig]): Configuration for text generation.
+
+        Returns:
+            The completion response.
+        """
+        # Extract required parameters
+        messages = kwargs.pop("messages", None)
+        if messages is None:
+            raise ValueError(
+                "'messages' is a required parameter for completion"
+            )
+
+        # Process optional parameters
+        generation_config = kwargs.pop("generation_config", None)
+
+        # Handle type conversions
+        cast_messages = [
+            Message(**msg) if isinstance(msg, dict) else msg
+            for msg in messages
+        ]
+        if generation_config and not isinstance(generation_config, dict):
+            generation_config = generation_config.model_dump()
+
+        # Build payload
+        payload = {
+            "messages": [msg.model_dump() for msg in cast_messages],
+            "generation_config": generation_config,
+            **kwargs,  # Include any additional parameters
+        }
+
+        # Filter out None values
+        payload = {k: v for k, v in payload.items() if v is not None}
+
+        return await self.client._make_request(
+            "POST",
+            "retrieval/completion",
+            json=payload,
+            version="v3",
+        )
+
+    async def embedding(self, **kwargs):
+        """
+        Generate an embedding for given text (async).
+
+        Args:
+            text (str): Text to generate embeddings for.
+
+        Returns:
+            The embedding vector.
+        """
+        # Extract required parameters
+        text = kwargs.pop("text", None)
+        if text is None:
+            raise ValueError("'text' is a required parameter for embedding")
+
+        # Build payload
+        payload = {"text": text, **kwargs}  # Include any additional parameters
+
+        return await self.client._make_request(
+            "POST",
+            "retrieval/embedding",
+            data=payload,
+            version="v3",
+        )
+
+    async def rag(
+        self, **kwargs
+    ) -> (
+        WrappedRAGResponse
+        | Generator[
+            ThinkingEvent
+            | SearchResultsEvent
+            | MessageEvent
+            | CitationEvent
+            | FinalAnswerEvent
+            | ToolCallEvent
+            | ToolResultEvent
+            | UnknownEvent
+            | None,
+            None,
+            None,
+        ]
+    ):
+        """
+        Conducts a Retrieval Augmented Generation (RAG) search (async).
+        May return a `WrappedRAGResponse` or a streaming generator if `stream=True`.
+
+        Args:
+            query (str): The search query.
+            rag_generation_config (Optional[dict | GenerationConfig]): Configuration for RAG generation.
+            search_mode (Optional[str | SearchMode]): Pre-configured search modes: "basic", "advanced", or "custom".
+            search_settings (Optional[dict | SearchSettings]): The search configuration object.
+            task_prompt (Optional[str]): Optional custom prompt to override default.
+            include_title_if_available (Optional[bool]): Include document titles in responses when available.
+            include_web_search (Optional[bool]): Include web search results provided to the LLM.
+
+        Returns:
+            Either a WrappedRAGResponse or an AsyncGenerator for streaming.
+        """
+        # Extract required parameters
+        query = kwargs.pop("query", None)
+        if query is None:
+            raise ValueError("'query' is a required parameter for rag")
+
+        # Process optional parameters
+        rag_generation_config = kwargs.pop("rag_generation_config", None)
+        search_mode = kwargs.pop("search_mode", "custom")
+        search_settings = kwargs.pop("search_settings", None)
+        task_prompt = kwargs.pop("task_prompt", None)
+        include_title_if_available = kwargs.pop(
+            "include_title_if_available", False
+        )
+        include_web_search = kwargs.pop("include_web_search", False)
+
+        # Handle type conversions
+        if rag_generation_config and not isinstance(
+            rag_generation_config, dict
+        ):
+            rag_generation_config = rag_generation_config.model_dump()
+        if search_mode and not isinstance(search_mode, str):
+            search_mode = search_mode.value
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+
+        # Build payload
+        payload = {
+            "query": query,
+            "rag_generation_config": rag_generation_config,
+            "search_mode": search_mode,
+            "search_settings": search_settings,
+            "task_prompt": task_prompt,
+            "include_title_if_available": include_title_if_available,
+            "include_web_search": include_web_search,
+            **kwargs,  # Include any additional parameters
+        }
+
+        # Filter out None values
+        payload = {k: v for k, v in payload.items() if v is not None}
+
+        # Check if streaming is enabled
+        is_stream = False
+        if rag_generation_config and rag_generation_config.get(
+            "stream", False
+        ):
+            is_stream = True
+
+        if is_stream:
+            # Return an async streaming generator
+            raw_stream = self.client._make_streaming_request(
+                "POST",
+                "retrieval/rag",
+                json=payload,
+                version="v3",
+            )
+            # Wrap each raw SSE event with parse_rag_event
+            return (parse_retrieval_event(event) for event in raw_stream)
+
+        # Otherwise, request fully and parse response
+        response_dict = await self.client._make_request(
+            "POST",
+            "retrieval/rag",
+            json=payload,
+            version="v3",
+        )
+        return WrappedRAGResponse(**response_dict)
+
+    async def agent(
+        self, **kwargs
+    ) -> (
+        WrappedAgentResponse
+        | Generator[
+            ThinkingEvent
+            | SearchResultsEvent
+            | MessageEvent
+            | CitationEvent
+            | FinalAnswerEvent
+            | ToolCallEvent
+            | ToolResultEvent
+            | UnknownEvent
+            | None,
+            None,
+            None,
+        ]
+    ):
+        """
+        Performs a single turn in a conversation with a RAG agent (async).
+        May return a `WrappedAgentResponse` or a streaming generator if `stream=True`.
+
+        Args:
+            message (Optional[dict | Message]): Current message to process.
+            messages (Optional[list[dict | Message]]): List of messages (deprecated, use message instead).
+            rag_generation_config (Optional[dict | GenerationConfig]): Configuration for RAG generation in 'rag' mode.
+            research_generation_config (Optional[dict | GenerationConfig]): Configuration for generation in 'research' mode.
+            search_mode (Optional[str | SearchMode]): Pre-configured search modes: "basic", "advanced", or "custom".
+            search_settings (Optional[dict | SearchSettings]): The search configuration object.
+            task_prompt (Optional[str]): Optional custom prompt to override default.
+            include_title_if_available (Optional[bool]): Include document titles from search results.
+            conversation_id (Optional[str | uuid.UUID]): ID of the conversation.
+            tools (Optional[list[str]]): List of tools to execute (deprecated).
+            rag_tools (Optional[list[str]]): List of tools to enable for RAG mode.
+            research_tools (Optional[list[str]]): List of tools to enable for Research mode.
+            max_tool_context_length (Optional[int]): Maximum length of returned tool context.
+            use_system_context (Optional[bool]): Use extended prompt for generation.
+            mode (Optional[Literal["rag", "research"]]): Mode to use for generation: 'rag' or 'research'.
+
+        Returns:
+            Either a WrappedAgentResponse or an AsyncGenerator for streaming.
+        """
+        # Extract parameters
+        message = kwargs.pop("message", None)
+        messages = kwargs.pop("messages", None)  # Deprecated
+        rag_generation_config = kwargs.pop("rag_generation_config", None)
+        research_generation_config = kwargs.pop(
+            "research_generation_config", None
+        )
+        search_mode = kwargs.pop("search_mode", "custom")
+        search_settings = kwargs.pop("search_settings", None)
+        task_prompt = kwargs.pop("task_prompt", None)
+        include_title_if_available = kwargs.pop(
+            "include_title_if_available", True
+        )
+        conversation_id = kwargs.pop("conversation_id", None)
+        tools = kwargs.pop("tools", None)  # Deprecated
+        rag_tools = kwargs.pop("rag_tools", None)
+        research_tools = kwargs.pop("research_tools", None)
+        max_tool_context_length = kwargs.pop("max_tool_context_length", 32768)
+        use_system_context = kwargs.pop("use_system_context", True)
+        mode = kwargs.pop("mode", "rag")
+
+        # Handle type conversions
+        if message and isinstance(message, dict):
+            message = Message(**message).model_dump()
+        elif message:
+            message = message.model_dump()
+
+        if rag_generation_config and not isinstance(
+            rag_generation_config, dict
+        ):
+            rag_generation_config = rag_generation_config.model_dump()
+
+        if research_generation_config and not isinstance(
+            research_generation_config, dict
+        ):
+            research_generation_config = (
+                research_generation_config.model_dump()
+            )
+
+        if search_mode and not isinstance(search_mode, str):
+            search_mode = search_mode.value
+
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+
+        # Build payload
+        payload = {
+            "message": message,
+            "messages": messages,  # Deprecated but included for backward compatibility
+            "rag_generation_config": rag_generation_config,
+            "research_generation_config": research_generation_config,
+            "search_mode": search_mode,
+            "search_settings": search_settings,
+            "task_prompt": task_prompt,
+            "include_title_if_available": include_title_if_available,
+            "conversation_id": (
+                str(conversation_id) if conversation_id else None
+            ),
+            "tools": tools,  # Deprecated but included for backward compatibility
+            "rag_tools": rag_tools,
+            "research_tools": research_tools,
+            "max_tool_context_length": max_tool_context_length,
+            "use_system_context": use_system_context,
+            "mode": mode,
+            **kwargs,  # Include any additional parameters
+        }
+
+        # Remove None values
+        payload = {k: v for k, v in payload.items() if v is not None}
+
+        # Check if streaming is enabled
+        is_stream = False
+        if rag_generation_config and rag_generation_config.get(
+            "stream", False
+        ):
+            is_stream = True
+        elif (
+            research_generation_config
+            and mode == "research"
+            and research_generation_config.get("stream", False)
+        ):
+            is_stream = True
+
+        if is_stream:
+            # Return an async streaming generator
+            raw_stream = self.client._make_streaming_request(
+                "POST",
+                "retrieval/agent",
+                json=payload,
+                version="v3",
+            )
+            # Parse each event in the stream
+            return (parse_retrieval_event(event) for event in raw_stream)
+
+        response_dict = await self.client._make_request(
+            "POST",
+            "retrieval/agent",
+            json=payload,
+            version="v3",
+        )
+        return WrappedAgentResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/system.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/system.py
new file mode 100644
index 00000000..6c57def0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/system.py
@@ -0,0 +1,43 @@
+from shared.api.models import (
+    WrappedGenericMessageResponse,
+    WrappedServerStatsResponse,
+    WrappedSettingsResponse,
+)
+
+
+class SystemSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def health(self) -> WrappedGenericMessageResponse:
+        """Check the health of the R2R server."""
+        response_dict = await self.client._make_request(
+            "GET", "health", version="v3"
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def settings(self) -> WrappedSettingsResponse:
+        """Get the configuration settings for the R2R server.
+
+        Returns:
+            dict: The server settings.
+        """
+        response_dict = await self.client._make_request(
+            "GET", "system/settings", version="v3"
+        )
+
+        return WrappedSettingsResponse(**response_dict)
+
+    async def status(self) -> WrappedServerStatsResponse:
+        """Get statistics about the server, including the start time, uptime,
+        CPU usage, and memory usage.
+
+        Returns:
+            dict: The server statistics.
+        """
+        response_dict = await self.client._make_request(
+            "GET", "system/status", version="v3"
+        )
+
+        return WrappedServerStatsResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/users.py b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/users.py
new file mode 100644
index 00000000..207c3cf4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/asnyc_methods/users.py
@@ -0,0 +1,589 @@
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedAPIKeyResponse,
+    WrappedAPIKeysResponse,
+    WrappedBooleanResponse,
+    WrappedCollectionsResponse,
+    WrappedGenericMessageResponse,
+    WrappedLimitsResponse,
+    WrappedLoginResponse,
+    WrappedTokenResponse,
+    WrappedUserResponse,
+    WrappedUsersResponse,
+)
+
+
+class UsersSDK:
+    def __init__(self, client):
+        self.client = client
+
+    async def create(
+        self,
+        email: str,
+        password: str,
+        name: Optional[str] = None,
+        bio: Optional[str] = None,
+        profile_picture: Optional[str] = None,
+    ) -> WrappedUserResponse:
+        """Register a new user.
+
+        Args:
+            email (str): User's email address
+            password (str): User's password
+            name (Optional[str]): The name for the new user
+            bio (Optional[str]): The bio for the new user
+            profile_picture (Optional[str]): New user profile picture
+
+        Returns:
+            UserResponse: New user information
+        """
+
+        data: dict = {"email": email, "password": password}
+
+        if name is not None:
+            data["name"] = name
+        if bio is not None:
+            data["bio"] = bio
+        if profile_picture is not None:
+            data["profile_picture"] = profile_picture
+
+        response_dict = await self.client._make_request(
+            "POST",
+            "users",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    async def send_verification_email(
+        self, email: str
+    ) -> WrappedGenericMessageResponse:
+        """Request that a verification email to a user."""
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/send-verification-email",
+            json=email,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def delete(
+        self, id: str | UUID, password: str
+    ) -> WrappedBooleanResponse:
+        """Delete a specific user. Users can only delete their own account
+        unless they are superusers.
+
+        Args:
+            id (str | UUID): User ID to delete
+            password (str): User's password
+
+        Returns:
+            dict: Deletion result
+        """
+        data: dict[str, Any] = {"password": password}
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"users/{str(id)}",
+            json=data,
+            version="v3",
+        )
+        self.client.access_token = None
+        self.client._refresh_token = None
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def verify_email(
+        self, email: str, verification_code: str
+    ) -> WrappedGenericMessageResponse:
+        """Verify a user's email address.
+
+        Args:
+            email (str): User's email address
+            verification_code (str): Verification code sent to the user's email
+
+        Returns:
+            dict: Verification result
+        """
+        data: dict[str, Any] = {
+            "email": email,
+            "verification_code": verification_code,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/verify-email",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def login(self, email: str, password: str) -> WrappedLoginResponse:
+        """Log in a user.
+
+        Args:
+            email (str): User's email address
+            password (str): User's password
+
+        Returns:
+            WrappedLoginResponse
+        """
+        if self.client.api_key:
+            raise ValueError(
+                "Cannot log in after setting an API key, please unset your R2R_API_KEY variable or call client.set_api_key(None)"
+            )
+        data: dict[str, Any] = {"username": email, "password": password}
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/login",
+            data=data,
+            version="v3",
+        )
+
+        login_response = WrappedLoginResponse(**response_dict)
+        self.client.access_token = login_response.results.access_token.token
+        self.client._refresh_token = login_response.results.refresh_token.token
+
+        user = await self.client._make_request(
+            "GET",
+            "users/me",
+            version="v3",
+        )
+
+        user_response = WrappedUserResponse(**user)
+        self.client._user_id = user_response.results.id
+
+        return login_response
+
+    async def logout(self) -> WrappedGenericMessageResponse | None:
+        """Log out the current user."""
+        if self.client.access_token:
+            response_dict = await self.client._make_request(
+                "POST",
+                "users/logout",
+                version="v3",
+            )
+            self.client.access_token = None
+            self.client._refresh_token = None
+
+            return WrappedGenericMessageResponse(**response_dict)
+
+        self.client.access_token = None
+        self.client._refresh_token = None
+        return None
+
+    async def refresh_token(self) -> WrappedTokenResponse:
+        """Refresh the access token using the refresh token."""
+        if self.client._refresh_token:
+            response_dict = await self.client._make_request(
+                "POST",
+                "users/refresh-token",
+                json=self.client._refresh_token,
+                version="v3",
+            )
+        self.client.access_token = response_dict["results"]["access_token"][
+            "token"
+        ]
+        self.client._refresh_token = response_dict["results"]["refresh_token"][
+            "token"
+        ]
+
+        return WrappedTokenResponse(**response_dict)
+
+    async def change_password(
+        self, current_password: str, new_password: str
+    ) -> WrappedGenericMessageResponse:
+        """Change the user's password.
+
+        Args:
+            current_password (str): User's current password
+            new_password (str): User's new password
+
+        Returns:
+            dict: Change password result
+        """
+        data: dict[str, Any] = {
+            "current_password": current_password,
+            "new_password": new_password,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/change-password",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def request_password_reset(
+        self, email: str
+    ) -> WrappedGenericMessageResponse:
+        """Request a password reset.
+
+        Args:
+            email (str): User's email address
+
+        Returns:
+            dict: Password reset request result
+        """
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/request-password-reset",
+            json=email,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def reset_password(
+        self, reset_token: str, new_password: str
+    ) -> WrappedGenericMessageResponse:
+        """Reset password using a reset token.
+
+        Args:
+            reset_token (str): Password reset token
+            new_password (str): New password
+
+        Returns:
+            dict: Password reset result
+        """
+        data: dict[str, Any] = {
+            "reset_token": reset_token,
+            "new_password": new_password,
+        }
+        response_dict = await self.client._make_request(
+            "POST",
+            "users/reset-password",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedUsersResponse:
+        """List users with pagination and filtering options.
+
+        Args:
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            dict: List of users and pagination information
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = [str(user_id) for user_id in ids]  # type: ignore
+
+        response_dict = await self.client._make_request(
+            "GET",
+            "users",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedUsersResponse(**response_dict)
+
+    async def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedUserResponse:
+        """Get a specific user.
+
+        Args:
+            id (str | UUID): User ID to retrieve
+
+        Returns:
+            dict: Detailed user information
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            f"users/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    async def me(
+        self,
+    ) -> WrappedUserResponse:
+        """Get detailed information about the currently authenticated user.
+
+        Returns:
+            dict: Detailed user information
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            "users/me",
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    async def update(
+        self,
+        id: str | UUID,
+        email: Optional[str] = None,
+        is_superuser: Optional[bool] = None,
+        name: Optional[str] = None,
+        bio: Optional[str] = None,
+        profile_picture: Optional[str] = None,
+        limits_overrides: dict | None = None,
+        metadata: dict[str, str | None] | None = None,
+    ) -> WrappedUserResponse:
+        """Update user information.
+
+        Args:
+            id (str | UUID): User ID to update
+            username (Optional[str]): New username
+            is_superuser (Optional[bool]): Update superuser status
+            name (Optional[str]): New name
+            bio (Optional[str]): New bio
+            profile_picture (Optional[str]): New profile picture
+
+        Returns:
+            dict: Updated user information
+        """
+        data: dict = {}
+        if email is not None:
+            data["email"] = email
+        if is_superuser is not None:
+            data["is_superuser"] = is_superuser
+        if name is not None:
+            data["name"] = name
+        if bio is not None:
+            data["bio"] = bio
+        if profile_picture is not None:
+            data["profile_picture"] = profile_picture
+        if limits_overrides is not None:
+            data["limits_overrides"] = limits_overrides
+        if metadata is not None:
+            data["metadata"] = metadata
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"users/{str(id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    async def list_collections(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCollectionsResponse:
+        """Get all collections associated with a specific user.
+
+        Args:
+            id (str | UUID): User ID to get collections for
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            dict: List of collections and pagination information
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = await self.client._make_request(
+            "GET",
+            f"users/{str(id)}/collections",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedCollectionsResponse(**response_dict)
+
+    async def add_to_collection(
+        self,
+        id: str | UUID,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Add a user to a collection.
+
+        Args:
+            id (str | UUID): User ID to add
+            collection_id (str | UUID): Collection ID to add user to
+        """
+        response_dict = await self.client._make_request(
+            "POST",
+            f"users/{str(id)}/collections/{str(collection_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def remove_from_collection(
+        self,
+        id: str | UUID,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a user from a collection.
+
+        Args:
+            id (str | UUID): User ID to remove
+            collection_id (str | UUID): Collection ID to remove user from
+
+        Returns:
+            bool: True if successful
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"users/{str(id)}/collections/{str(collection_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def create_api_key(
+        self,
+        id: str | UUID,
+        name: Optional[str] = None,
+        description: Optional[str] = None,
+    ) -> WrappedAPIKeyResponse:
+        """Create a new API key for the specified user.
+
+        Args:
+            id (str | UUID): User ID to create API key for
+            name (Optional[str]): Name of the API key
+            description (Optional[str]): Description of the API key
+
+        Returns:
+            dict: { "message": "API key created successfully", "api_key": "key_id.raw_api_key" }
+        """
+        data: dict[str, Any] = {}
+        if name:
+            data["name"] = name
+        if description:
+            data["description"] = description
+
+        response_dict = await self.client._make_request(
+            "POST",
+            f"users/{str(id)}/api-keys",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedAPIKeyResponse(**response_dict)
+
+    async def list_api_keys(
+        self,
+        id: str | UUID,
+    ) -> WrappedAPIKeysResponse:
+        """List all API keys for the specified user.
+
+        Args:
+            id (str | UUID): User ID to list API keys for
+
+        Returns:
+            WrappedAPIKeysResponse
+        """
+        resp_dict = await self.client._make_request(
+            "GET",
+            f"users/{str(id)}/api-keys",
+            version="v3",
+        )
+
+        return WrappedAPIKeysResponse(**resp_dict)
+
+    async def delete_api_key(
+        self,
+        id: str | UUID,
+        key_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a specific API key for the specified user.
+
+        Args:
+            id (str | UUID): User ID
+            key_id (str | UUID): API key ID to delete
+
+        Returns:
+            dict: { "message": "API key deleted successfully" }
+        """
+        response_dict = await self.client._make_request(
+            "DELETE",
+            f"users/{str(id)}/api-keys/{str(key_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    async def get_limits(self) -> WrappedLimitsResponse:
+        response_dict = await self.client._make_request(
+            "GET",
+            f"users/{str(self.client._user_id)}/limits",
+            version="v3",
+        )
+
+        return WrappedLimitsResponse(**response_dict)
+
+    async def oauth_google_authorize(self) -> WrappedGenericMessageResponse:
+        """Get Google OAuth 2.0 authorization URL from the server.
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            "users/oauth/google/authorize",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def oauth_github_authorize(self) -> WrappedGenericMessageResponse:
+        """Get GitHub OAuth 2.0 authorization URL from the server.
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        response_dict = await self.client._make_request(
+            "GET",
+            "users/oauth/github/authorize",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    async def oauth_google_callback(
+        self, code: str, state: str
+    ) -> WrappedLoginResponse:
+        """Exchange `code` and `state` with the Google OAuth 2.0 callback
+        route."""
+        response_dict = await self.client._make_request(
+            "GET",
+            "users/oauth/google/callback",
+            params={"code": code, "state": state},
+            version="v3",
+        )
+
+        return WrappedLoginResponse(**response_dict)
+
+    async def oauth_github_callback(
+        self, code: str, state: str
+    ) -> WrappedLoginResponse:
+        """Exchange `code` and `state` with the GitHub OAuth 2.0 callback
+        route."""
+        response_dict = await self.client._make_request(
+            "GET",
+            "users/oauth/github/callback",
+            params={"code": code, "state": state},
+            version="v3",
+        )
+
+        return WrappedLoginResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/async_client.py b/.venv/lib/python3.12/site-packages/sdk/async_client.py
new file mode 100644
index 00000000..ada14f5a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/async_client.py
@@ -0,0 +1,135 @@
+import json
+from io import BytesIO
+from typing import Any, AsyncGenerator
+
+import httpx
+
+from shared.abstractions import R2RException
+
+from .asnyc_methods import (
+    ChunksSDK,
+    CollectionsSDK,
+    ConversationsSDK,
+    DocumentsSDK,
+    GraphsSDK,
+    IndicesSDK,
+    PromptsSDK,
+    RetrievalSDK,
+    SystemSDK,
+    UsersSDK,
+)
+from .base.base_client import BaseClient
+
+
+class R2RAsyncClient(BaseClient):
+    """Asynchronous client for interacting with the R2R API."""
+
+    def __init__(
+        self,
+        base_url: str | None = None,
+        timeout: float = 300.0,
+        custom_client=None,
+    ):
+        super().__init__(base_url, timeout)
+        self.client = custom_client or httpx.AsyncClient(timeout=timeout)
+        self.chunks = ChunksSDK(self)
+        self.collections = CollectionsSDK(self)
+        self.conversations = ConversationsSDK(self)
+        self.documents = DocumentsSDK(self)
+        self.graphs = GraphsSDK(self)
+        self.indices = IndicesSDK(self)
+        self.prompts = PromptsSDK(self)
+        self.retrieval = RetrievalSDK(self)
+        self.system = SystemSDK(self)
+        self.users = UsersSDK(self)
+
+    async def _make_request(
+        self, method: str, endpoint: str, version: str = "v3", **kwargs
+    ):
+        url = self._get_full_url(endpoint, version)
+        if (
+            "https://api.sciphi.ai" in url
+            and ("login" not in endpoint)
+            and ("create" not in endpoint)
+            and ("users" not in endpoint)
+            and ("health" not in endpoint)
+            and (not self.access_token and not self.api_key)
+        ):
+            raise R2RException(
+                status_code=401,
+                message="Access token or api key is required to access `https://api.sciphi.ai`. To change the base url, use `set_base_url` method or set the local environment variable `R2R_API_BASE` to `http://localhost:7272`.",
+            )
+        request_args = self._prepare_request_args(endpoint, **kwargs)
+
+        try:
+            response = await self.client.request(method, url, **request_args)
+            await self._handle_response(response)
+            if "application/json" in response.headers.get("Content-Type", ""):
+                return response.json() if response.content else None
+            else:
+                return BytesIO(response.content)
+
+        except httpx.RequestError as e:
+            raise R2RException(
+                status_code=500,
+                message=f"Request failed: {str(e)}",
+            ) from e
+
+    async def _make_streaming_request(
+        self, method: str, endpoint: str, version: str = "v3", **kwargs
+    ) -> AsyncGenerator[Any, None]:
+        url = self._get_full_url(endpoint, version)
+        request_args = self._prepare_request_args(endpoint, **kwargs)
+
+        async with httpx.AsyncClient(timeout=self.timeout) as client:
+            async with client.stream(method, url, **request_args) as response:
+                await self._handle_response(response)
+                async for line in response.aiter_lines():
+                    if line.strip():  # Ignore empty lines
+                        try:
+                            yield json.loads(line)
+                        except Exception:
+                            yield line
+
+    async def _handle_response(self, response):
+        if response.status_code >= 400:
+            try:
+                error_content = response.json()
+                if isinstance(error_content, dict):
+                    message = (
+                        error_content.get("detail", {}).get(
+                            "message", str(error_content)
+                        )
+                        if isinstance(error_content.get("detail"), dict)
+                        else error_content.get("detail", str(error_content))
+                    )
+                else:
+                    message = str(error_content)
+            except json.JSONDecodeError:
+                message = response.text
+            except Exception as e:
+                message = str(e)
+
+            raise R2RException(
+                status_code=response.status_code, message=message
+            )
+
+    async def close(self):
+        await self.client.aclose()
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, exc_type, exc_val, exc_tb):
+        await self.close()
+
+    def set_api_key(self, api_key: str) -> None:
+        if self.access_token:
+            raise ValueError("Cannot have both access token and api key.")
+        self.api_key = api_key
+
+    def unset_api_key(self) -> None:
+        self.api_key = None
+
+    def set_base_url(self, base_url: str) -> None:
+        self.base_url = base_url
diff --git a/.venv/lib/python3.12/site-packages/sdk/base/__init_.py b/.venv/lib/python3.12/site-packages/sdk/base/__init_.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/base/__init_.py
diff --git a/.venv/lib/python3.12/site-packages/sdk/base/base_client.py b/.venv/lib/python3.12/site-packages/sdk/base/base_client.py
new file mode 100644
index 00000000..c8de8c0c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/base/base_client.py
@@ -0,0 +1,52 @@
+import os
+from typing import Optional
+
+from shared.abstractions import R2RException
+
+
+class BaseClient:
+    def __init__(
+        self,
+        base_url: str | None = None,
+        timeout: float = 300.0,
+    ):
+        self.base_url = base_url or os.getenv(
+            "R2R_API_BASE", "https://api.sciphi.ai"
+        )
+        self.timeout = timeout
+        self.access_token: Optional[str] = None
+        self._refresh_token: Optional[str] = None
+        self._user_id: Optional[str] = None
+        self.api_key: Optional[str] = os.getenv("R2R_API_KEY", None)
+
+    def _get_auth_header(self) -> dict[str, str]:
+        if self.access_token and self.api_key:
+            raise R2RException(
+                status_code=400,
+                message="Cannot have both access token and api key.",
+            )
+        if self.access_token:
+            return {"Authorization": f"Bearer {self.access_token}"}
+        elif self.api_key:
+            return {"x-api-key": self.api_key}
+        else:
+            return {}
+
+    def _get_full_url(self, endpoint: str, version: str = "v3") -> str:
+        return f"{self.base_url}/{version}/{endpoint}"
+
+    def _prepare_request_args(self, endpoint: str, **kwargs) -> dict:
+        headers = kwargs.pop("headers", {})
+        if (self.access_token or self.api_key) and endpoint not in [
+            "register",
+            "login",
+            "verify_email",
+        ]:
+            headers.update(self._get_auth_header())
+        if (
+            kwargs.get("params", None) == {}
+            or kwargs.get("params", None) is None
+        ):
+            kwargs.pop("params", None)
+
+        return {"headers": headers, **kwargs}
diff --git a/.venv/lib/python3.12/site-packages/sdk/models.py b/.venv/lib/python3.12/site-packages/sdk/models.py
new file mode 100644
index 00000000..81e7f9f0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/models.py
@@ -0,0 +1,100 @@
+from shared.abstractions import (
+    AggregateSearchResult,
+    ChunkSearchResult,
+    GenerationConfig,
+    GraphCommunityResult,
+    GraphEntityResult,
+    GraphRelationshipResult,
+    GraphSearchResult,
+    GraphSearchResultType,
+    GraphSearchSettings,
+    HybridSearchSettings,
+    IngestionMode,
+    Message,
+    MessageType,
+    R2RException,
+    R2RSerializable,
+    SearchMode,
+    SearchSettings,
+    Token,
+    User,
+    select_search_filters,
+)
+from shared.abstractions.graph import (
+    GraphCreationSettings,
+    GraphEnrichmentSettings,
+)
+from shared.api.models import (
+    AgentEvent,
+    AgentResponse,
+    Citation,
+    CitationData,
+    CitationEvent,
+    Delta,
+    DeltaPayload,
+    FinalAnswerData,
+    FinalAnswerEvent,
+    MessageData,
+    MessageDelta,
+    MessageEvent,
+    RAGResponse,
+    SearchResultsData,
+    SearchResultsEvent,
+    SSEEventBase,
+    ThinkingData,
+    ThinkingEvent,
+    ToolCallData,
+    ToolCallEvent,
+    ToolResultData,
+    ToolResultEvent,
+    UnknownEvent,
+)
+
+__all__ = [
+    "AggregateSearchResult",
+    "GenerationConfig",
+    "HybridSearchSettings",
+    "GraphCommunityResult",
+    "GraphCreationSettings",
+    "GraphEnrichmentSettings",
+    "GraphEntityResult",
+    "GraphRelationshipResult",
+    "GraphSearchResult",
+    "GraphSearchResultType",
+    "GraphSearchSettings",
+    "Message",
+    "MessageType",
+    "R2RException",
+    "R2RSerializable",
+    "Token",
+    "ChunkSearchResult",
+    "SearchSettings",
+    "select_search_filters",
+    "IngestionMode",
+    "SearchMode",
+    # "RAGResponse",
+    "Citation",
+    "RAGResponse",
+    "AgentEvent",
+    "AgentResponse",
+    "SSEEventBase",
+    "SearchResultsData",
+    "SearchResultsEvent",
+    "MessageData",
+    "MessageDelta",
+    "MessageEvent",
+    "DeltaPayload",
+    "Delta",
+    "CitationData",
+    "CitationEvent",
+    "FinalAnswerData",
+    "FinalAnswerEvent",
+    "ToolCallData",
+    "ToolCallEvent",
+    "ToolResultData",
+    "ToolResultEvent",
+    "ThinkingEvent",
+    "ThinkingData",
+    "UnknownEvent",
+    "User",
+]
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_client.py b/.venv/lib/python3.12/site-packages/sdk/sync_client.py
new file mode 100644
index 00000000..93db993f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_client.py
@@ -0,0 +1,165 @@
+import json
+from io import BytesIO
+from typing import Any, Generator
+
+from httpx import Client, RequestError, Response
+
+from shared.abstractions import R2RException
+
+from .base.base_client import BaseClient
+from .sync_methods import (
+    ChunksSDK,
+    CollectionsSDK,
+    ConversationsSDK,
+    DocumentsSDK,
+    GraphsSDK,
+    IndicesSDK,
+    PromptsSDK,
+    RetrievalSDK,
+    SystemSDK,
+    UsersSDK,
+)
+
+
+class R2RClient(BaseClient):
+    def __init__(
+        self,
+        base_url: str | None = None,
+        timeout: float = 300.0,
+        custom_client=None,
+    ):
+        super().__init__(base_url, timeout)
+        self.client = custom_client or Client(timeout=timeout)
+        self.chunks = ChunksSDK(self)
+        self.collections = CollectionsSDK(self)
+        self.conversations = ConversationsSDK(self)
+        self.documents = DocumentsSDK(self)
+        self.graphs = GraphsSDK(self)
+        self.indices = IndicesSDK(self)
+        self.prompts = PromptsSDK(self)
+        self.retrieval = RetrievalSDK(self)
+        self.system = SystemSDK(self)
+        self.users = UsersSDK(self)
+
+    def _make_request(
+        self, method: str, endpoint: str, version: str = "v3", **kwargs
+    ) -> dict[str, Any] | BytesIO | None:
+        url = self._get_full_url(endpoint, version)
+        if (
+            "https://api.sciphi.ai" in url
+            and ("login" not in endpoint)
+            and ("create" not in endpoint)
+            and ("users" not in endpoint)
+            and ("health" not in endpoint)
+            and (not self.access_token and not self.api_key)
+        ):
+            raise R2RException(
+                status_code=401,
+                message="Access token or api key is required to access `https://api.sciphi.ai`. To change the base url, use `set_base_url` method or set the local environment variable `R2R_API_BASE` to `http://localhost:7272`.",
+            )
+        request_args = self._prepare_request_args(endpoint, **kwargs)
+
+        try:
+            response = self.client.request(method, url, **request_args)
+            self._handle_response(response)
+
+            if "application/json" in response.headers.get("Content-Type", ""):
+                return response.json() if response.content else None
+            else:
+                return BytesIO(response.content)
+
+        except RequestError as e:
+            raise R2RException(
+                status_code=500,
+                message=f"Request failed: {str(e)}",
+            ) from e
+
+    def _make_streaming_request(
+        self, method: str, endpoint: str, version: str = "v3", **kwargs
+    ) -> Generator[dict[str, str], None, None]:
+        """
+        Make a streaming request, parsing Server-Sent Events (SSE) in multiline form.
+
+        Yields a dictionary with keys:
+        - "event": the event type (or "unknown" if not provided)
+        - "data": the JSON string (possibly spanning multiple lines) accumulated from the event's data lines
+        """
+        url = self._get_full_url(endpoint, version)
+        request_args = self._prepare_request_args(endpoint, **kwargs)
+
+        with Client(timeout=self.timeout) as client:
+            with client.stream(method, url, **request_args) as response:
+                self._handle_response(response)
+
+                sse_event_block: dict[str, Any] = {"event": None, "data": []}
+
+                for line in response.iter_lines():
+                    if isinstance(line, bytes):
+                        line = line.decode("utf-8", "replace")
+
+                    # Blank line -> end of this SSE event
+                    if line == "":
+                        # If there's any accumulated data, yield this event
+                        if sse_event_block["data"]:
+                            data_str = "".join(sse_event_block["data"])
+                            yield {
+                                "event": sse_event_block["event"] or "unknown",
+                                "data": data_str,
+                            }
+                        # Reset the block
+                        sse_event_block = {"event": None, "data": []}
+                        continue
+
+                    # Otherwise, parse the line
+                    if line.startswith("event:"):
+                        sse_event_block["event"] = line[
+                            len("event:") :
+                        ].lstrip()
+                    elif line.startswith("data:"):
+                        # Accumulate the exact substring after "data:"
+                        # Notice we do *not* strip() the entire line
+                        chunk = line[len("data:") :]
+                        sse_event_block["data"].append(chunk)
+                    # Optionally handle id:, retry:, etc. if needed
+
+                # If something remains in the buffer at the end
+                if sse_event_block["data"]:
+                    data_str = "".join(sse_event_block["data"])
+                    yield {
+                        "event": sse_event_block["event"] or "unknown",
+                        "data": data_str,
+                    }
+
+    def _handle_response(self, response: Response) -> None:
+        if response.status_code >= 400:
+            try:
+                error_content = response.json()
+                if isinstance(error_content, dict):
+                    message = (
+                        error_content.get("detail", {}).get(
+                            "message", str(error_content)
+                        )
+                        if isinstance(error_content.get("detail"), dict)
+                        else error_content.get("detail", str(error_content))
+                    )
+                else:
+                    message = str(error_content)
+            except json.JSONDecodeError:
+                message = response.text
+            except Exception as e:
+                message = str(e)
+
+            raise R2RException(
+                status_code=response.status_code, message=message
+            )
+
+    def set_api_key(self, api_key: str) -> None:
+        if self.access_token:
+            raise ValueError("Cannot have both access token and api key.")
+        self.api_key = api_key
+
+    def unset_api_key(self) -> None:
+        self.api_key = None
+
+    def set_base_url(self, base_url: str) -> None:
+        self.base_url = base_url
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/__init__.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/__init__.py
new file mode 100644
index 00000000..efa520d6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/__init__.py
@@ -0,0 +1,23 @@
+from .chunks import ChunksSDK
+from .collections import CollectionsSDK
+from .conversations import ConversationsSDK
+from .documents import DocumentsSDK
+from .graphs import GraphsSDK
+from .indices import IndicesSDK
+from .prompts import PromptsSDK
+from .retrieval import RetrievalSDK
+from .system import SystemSDK
+from .users import UsersSDK
+
+__all__ = [
+    "ChunksSDK",
+    "CollectionsSDK",
+    "ConversationsSDK",
+    "DocumentsSDK",
+    "GraphsSDK",
+    "IndicesSDK",
+    "PromptsSDK",
+    "RetrievalSDK",
+    "SystemSDK",
+    "UsersSDK",
+]
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/chunks.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/chunks.py
new file mode 100644
index 00000000..b7e2124f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/chunks.py
@@ -0,0 +1,186 @@
+import json
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedChunkResponse,
+    WrappedChunksResponse,
+    WrappedVectorSearchResponse,
+)
+
+from ..models import SearchSettings
+
+
+class ChunksSDK:
+    """SDK for interacting with chunks in the v3 API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    def update(
+        self,
+        chunk: dict[str, str],
+    ) -> WrappedChunkResponse:
+        """Update an existing chunk.
+
+        Args:
+            chunk (dict[str, str]): Chunk to update. Should contain:
+                - id: UUID of the chunk
+                - metadata: Dictionary of metadata
+        Returns:
+            WrappedChunkResponse
+        """
+        response_dict = self.client._make_request(
+            "POST",
+            f"chunks/{str(chunk['id'])}",
+            json=chunk,
+            version="v3",
+        )
+
+        return WrappedChunkResponse(**response_dict)
+
+    def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedChunkResponse:
+        """Get a specific chunk.
+
+        Args:
+            id (str | UUID): Chunk ID to retrieve
+
+        Returns:
+            WrappedChunkResponse
+        """
+
+        response_dict = self.client._make_request(
+            "GET",
+            f"chunks/{id}",
+            version="v3",
+        )
+
+        return WrappedChunkResponse(**response_dict)
+
+    # FIXME: Is this the most appropriate name for this method?
+    def list_by_document(
+        self,
+        document_id: str | UUID,
+        metadata_filter: Optional[dict] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedChunksResponse:
+        """List chunks for a specific document.
+
+        Args:
+            document_id (str | UUID): Document ID to get chunks for
+            metadata_filter (Optional[dict]): Filter chunks by metadata
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedChunksResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if metadata_filter:
+            params["metadata_filter"] = json.dumps(metadata_filter)
+
+        response_dict = self.client._make_request(
+            "GET",
+            f"documents/{str(document_id)}/chunks",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedChunksResponse(**response_dict)
+
+    def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a specific chunk.
+
+        Args:
+            id (str | UUID): ID of chunk to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"chunks/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def list(
+        self,
+        include_vectors: bool = False,
+        metadata_filter: Optional[dict] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+        filters: Optional[dict] = None,
+    ) -> WrappedChunksResponse:
+        """List chunks with pagination support.
+
+        Args:
+            include_vectors (bool, optional): Include vector data in response. Defaults to False.
+            metadata_filter (Optional[dict], optional): Filter by metadata. Defaults to None.
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedChunksResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+            "include_vectors": include_vectors,
+        }
+        if filters:
+            params["filters"] = json.dumps(filters)
+
+        if metadata_filter:
+            params["metadata_filter"] = json.dumps(metadata_filter)
+
+        response_dict = self.client._make_request(
+            "GET",
+            "chunks",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedChunksResponse(**response_dict)
+
+    def search(
+        self,
+        query: str,
+        search_settings: Optional[dict | SearchSettings] = None,
+    ) -> WrappedVectorSearchResponse:
+        """Conduct a vector and/or graph search.
+
+        Args:
+            query (str): The query to search for.
+            search_settings (Optional[dict, SearchSettings]]): Vector search settings.
+
+        Returns:
+            WrappedVectorSearchResponse
+        """
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+
+        data: dict[str, Any] = {
+            "query": query,
+            "search_settings": search_settings,
+        }
+        response_dict = self.client._make_request(
+            "POST",
+            "chunks/search",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedVectorSearchResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/collections.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/collections.py
new file mode 100644
index 00000000..69e9d7b4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/collections.py
@@ -0,0 +1,343 @@
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedCollectionResponse,
+    WrappedCollectionsResponse,
+    WrappedDocumentsResponse,
+    WrappedGenericMessageResponse,
+    WrappedUsersResponse,
+)
+
+
+class CollectionsSDK:
+    def __init__(self, client):
+        self.client = client
+
+    def create(
+        self,
+        name: str,
+        description: Optional[str] = None,
+    ) -> WrappedCollectionResponse:
+        """Create a new collection.
+
+        Args:
+            name (str): Name of the collection
+            description (Optional[str]): Description of the collection
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        data: dict[str, Any] = {"name": name, "description": description}
+        response_dict = self.client._make_request(
+            "POST",
+            "collections",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCollectionResponse(**response_dict)
+
+    def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCollectionsResponse:
+        """List collections with pagination and filtering options.
+
+        Args:
+            ids (Optional[list[str | UUID]]): Filter collections by ids
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedCollectionsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = ids
+
+        response_dict = self.client._make_request(
+            "GET", "collections", params=params, version="v3"
+        )
+
+        return WrappedCollectionsResponse(**response_dict)
+
+    def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedCollectionResponse:
+        """Get detailed information about a specific collection.
+
+        Args:
+            id (str | UUID): Collection ID to retrieve
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        response_dict = self.client._make_request(
+            "GET", f"collections/{str(id)}", version="v3"
+        )
+
+        return WrappedCollectionResponse(**response_dict)
+
+    def update(
+        self,
+        id: str | UUID,
+        name: Optional[str] = None,
+        description: Optional[str] = None,
+        generate_description: Optional[bool] = False,
+    ) -> WrappedCollectionResponse:
+        """Update collection information.
+
+        Args:
+            id (str | UUID): Collection ID to update
+            name (Optional[str]): Optional new name for the collection
+            description (Optional[str]): Optional new description for the collection
+            generate_description (Optional[bool]): Whether to generate a new synthetic description for the collection.
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        data: dict[str, Any] = {}
+        if name is not None:
+            data["name"] = name
+        if description is not None:
+            data["description"] = description
+        if generate_description:
+            data["generate_description"] = str(generate_description)
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"collections/{str(id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCollectionResponse(**response_dict)
+
+    def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a collection.
+
+        Args:
+            id (str | UUID): Collection ID to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE", f"collections/{str(id)}", version="v3"
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def list_documents(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedDocumentsResponse:
+        """List all documents in a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedDocumentsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = self.client._make_request(
+            "GET",
+            f"collections/{str(id)}/documents",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedDocumentsResponse(**response_dict)
+
+    def add_document(
+        self,
+        id: str | UUID,
+        document_id: str | UUID,
+    ) -> WrappedGenericMessageResponse:
+        """Add a document to a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            document_id (str | UUID): Document ID to add
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        response_dict = self.client._make_request(
+            "POST",
+            f"collections/{str(id)}/documents/{str(document_id)}",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def remove_document(
+        self,
+        id: str | UUID,
+        document_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a document from a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            document_id (str | UUID): Document ID to remove
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"collections/{str(id)}/documents/{str(document_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def list_users(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedUsersResponse:
+        """List all users in a collection.
+
+        Args:
+            id (str, UUID): Collection ID
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedUsersResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = self.client._make_request(
+            "GET", f"collections/{str(id)}/users", params=params, version="v3"
+        )
+
+        return WrappedUsersResponse(**response_dict)
+
+    def add_user(
+        self,
+        id: str | UUID,
+        user_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Add a user to a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            user_id (str | UUID): User ID to add
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "POST", f"collections/{str(id)}/users/{str(user_id)}", version="v3"
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def remove_user(
+        self,
+        id: str | UUID,
+        user_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a user from a collection.
+
+        Args:
+            id (str | UUID): Collection ID
+            user_id (str | UUID): User ID to remove
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"collections/{str(id)}/users/{str(user_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def extract(
+        self,
+        id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Extract entities and relationships from documents in a collection.
+
+        Args:
+            id (str | UUID): Collection ID to extract from
+            settings (Optional[dict]): Settings for the entities and relationships extraction process
+            run_with_orchestration (Optional[bool]): Whether to run the extraction process with orchestration.
+                Defaults to True
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        params = {"run_with_orchestration": run_with_orchestration}
+
+        data: dict[str, Any] = {}
+        if settings is not None:
+            data["settings"] = settings
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"collections/{str(id)}/extract",
+            params=params,
+            json=data or None,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def retrieve_by_name(
+        self, name: str, owner_id: Optional[str] = None
+    ) -> WrappedCollectionResponse:
+        """Retrieve a collection by its name.
+
+        For non-superusers, the backend will use the authenticated user's ID.
+        For superusers, the caller must supply an owner_id to restrict the search.
+
+        Args:
+            name (str): The name of the collection to retrieve.
+            owner_id (Optional[str]): The owner ID to restrict the search. Required for superusers.
+
+        Returns:
+            WrappedCollectionResponse
+        """
+        query_params: dict[str, Any] = {}
+        if owner_id is not None:
+            query_params["owner_id"] = owner_id
+
+        response_dict = self.client._make_request(
+            "GET",
+            f"collections/name/{name}",
+            params=query_params,
+            version="v3",
+        )
+        return WrappedCollectionResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/conversations.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/conversations.py
new file mode 100644
index 00000000..d3da6bb3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/conversations.py
@@ -0,0 +1,308 @@
+from builtins import list as _list
+from pathlib import Path
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedConversationMessagesResponse,
+    WrappedConversationResponse,
+    WrappedConversationsResponse,
+    WrappedMessageResponse,
+)
+
+
+class ConversationsSDK:
+    def __init__(self, client):
+        self.client = client
+
+    def create(
+        self,
+        name: Optional[str] = None,
+    ) -> WrappedConversationResponse:
+        """Create a new conversation.
+
+        Returns:
+            WrappedConversationResponse
+        """
+        data: dict[str, Any] = {}
+        if name:
+            data["name"] = name
+
+        response_dict = self.client._make_request(
+            "POST",
+            "conversations",
+            data=data,
+            version="v3",
+        )
+
+        return WrappedConversationResponse(**response_dict)
+
+    def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedConversationsResponse:
+        """List conversations with pagination and sorting options.
+
+        Args:
+            ids (Optional[list[str | UUID]]): List of conversation IDs to retrieve
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedConversationsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = ids
+
+        response_dict = self.client._make_request(
+            "GET",
+            "conversations",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedConversationsResponse(**response_dict)
+
+    def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedConversationMessagesResponse:
+        """Get detailed information about a specific conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to retrieve
+
+        Returns:
+            WrappedConversationMessagesResponse
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            f"conversations/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedConversationMessagesResponse(**response_dict)
+
+    def update(
+        self,
+        id: str | UUID,
+        name: str,
+    ) -> WrappedConversationResponse:
+        """Update an existing conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to update
+            name (str): The new name of the conversation
+
+        Returns:
+            WrappedConversationResponse
+        """
+        data: dict[str, Any] = {
+            "name": name,
+        }
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"conversations/{str(id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedConversationResponse(**response_dict)
+
+    def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"conversations/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def add_message(
+        self,
+        id: str | UUID,
+        content: str,
+        role: str,
+        metadata: Optional[dict] = None,
+        parent_id: Optional[str] = None,
+    ) -> WrappedMessageResponse:
+        """Add a new message to a conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation to add the message to
+            content (str): The content of the message
+            role (str): The role of the message (e.g., "user" or "assistant")
+            parent_id (Optional[str]): The ID of the parent message
+            metadata (Optional[dict]): Additional metadata to attach to the message
+
+        Returns:
+            WrappedMessageResponse
+        """
+        data: dict[str, Any] = {
+            "content": content,
+            "role": role,
+        }
+        if parent_id:
+            data["parent_id"] = parent_id
+        if metadata:
+            data["metadata"] = metadata
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"conversations/{str(id)}/messages",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedMessageResponse(**response_dict)
+
+    def update_message(
+        self,
+        id: str | UUID,
+        message_id: str,
+        content: Optional[str] = None,
+        metadata: Optional[dict] = None,
+    ) -> WrappedMessageResponse:
+        """Update an existing message in a conversation.
+
+        Args:
+            id (str | UUID): The ID of the conversation containing the message
+            message_id (str): The ID of the message to update
+            content (str): The new content of the message
+            metadata (dict): Additional metadata to attach to the message
+
+        Returns:
+            WrappedMessageResponse
+        """
+        data: dict[str, Any] = {"content": content}
+        if metadata:
+            data["metadata"] = metadata
+        response_dict = self.client._make_request(
+            "POST",
+            f"conversations/{str(id)}/messages/{message_id}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedMessageResponse(**response_dict)
+
+    def export(
+        self,
+        output_path: str | Path,
+        columns: Optional[_list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export conversations to a CSV file, streaming the results directly
+        to disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting conversations
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        with open(output_path, "wb") as f:
+            with self.client.client.post(
+                f"{self.client.base_url}/v3/conversations/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status}",
+                        response,
+                    )
+
+                for chunk in response.content.iter_chunks():
+                    if chunk:
+                        f.write(chunk[0])
+
+    def export_messages(
+        self,
+        output_path: str | Path,
+        columns: Optional[_list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export messages to a CSV file, streaming the results directly to
+        disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting messages
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        with open(output_path, "wb") as f:
+            with self.client.session.post(
+                f"{self.client.base_url}/v3/conversations/export_messages",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            ) as response:
+                if response.status_code != 200:
+                    raise ValueError(
+                        f"Export failed with status {response.status_code}",
+                        response,
+                    )
+
+                for chunk in response.iter_bytes():
+                    if chunk:
+                        f.write(chunk[0])
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/documents.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/documents.py
new file mode 100644
index 00000000..0c9ad3bb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/documents.py
@@ -0,0 +1,761 @@
+import json
+import os
+import tempfile
+import uuid
+from datetime import datetime
+from io import BytesIO
+from pathlib import Path
+from typing import Any, Optional
+from urllib.parse import urlparse
+from uuid import UUID
+
+import requests
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedChunksResponse,
+    WrappedCollectionsResponse,
+    WrappedDocumentResponse,
+    WrappedDocumentSearchResponse,
+    WrappedDocumentsResponse,
+    WrappedEntitiesResponse,
+    WrappedGenericMessageResponse,
+    WrappedIngestionResponse,
+    WrappedRelationshipsResponse,
+)
+
+from ..models import IngestionMode, SearchMode, SearchSettings
+
+
+class DocumentsSDK:
+    """SDK for interacting with documents in the v3 API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    def create(
+        self,
+        file_path: Optional[str] = None,
+        raw_text: Optional[str] = None,
+        chunks: Optional[list[str]] = None,
+        id: Optional[str | UUID] = None,
+        ingestion_mode: Optional[str] = None,
+        collection_ids: Optional[list[str | UUID]] = None,
+        metadata: Optional[dict] = None,
+        ingestion_config: Optional[dict | IngestionMode] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedIngestionResponse:
+        """Create a new document from either a file or content.
+
+        Args:
+            file_path (Optional[str]): The file to upload, if any
+            content (Optional[str]): Optional text content to upload, if no file path is provided
+            id (Optional[str | UUID]): Optional ID to assign to the document
+            collection_ids (Optional[list[str | UUID]]): Collection IDs to associate with the document. If none are provided, the document will be assigned to the user's default collection.
+            metadata (Optional[dict]): Optional metadata to assign to the document
+            ingestion_config (Optional[dict]): Optional ingestion configuration to use
+            run_with_orchestration (Optional[bool]): Whether to run with orchestration
+
+        Returns:
+            WrappedIngestionResponse
+        """
+        if not file_path and not raw_text and not chunks:
+            raise ValueError(
+                "Either `file_path`, `raw_text` or `chunks` must be provided"
+            )
+        if (
+            (file_path and raw_text)
+            or (file_path and chunks)
+            or (raw_text and chunks)
+        ):
+            raise ValueError(
+                "Only one of `file_path`, `raw_text` or `chunks` may be provided"
+            )
+
+        data: dict[str, Any] = {}
+        files = None
+
+        if id:
+            data["id"] = str(id)
+        if metadata:
+            data["metadata"] = json.dumps(metadata)
+        if ingestion_config:
+            if isinstance(ingestion_config, IngestionMode):
+                ingestion_config = {"mode": ingestion_config.value}
+            app_config: dict[str, Any] = (
+                {}
+                if isinstance(ingestion_config, dict)
+                else ingestion_config["app"]
+            )
+            ingestion_config = dict(ingestion_config)
+            ingestion_config["app"] = app_config
+            data["ingestion_config"] = json.dumps(ingestion_config)
+        if collection_ids:
+            collection_ids = [
+                str(collection_id) for collection_id in collection_ids
+            ]  # type: ignore
+            data["collection_ids"] = json.dumps(collection_ids)
+        if run_with_orchestration is not None:
+            data["run_with_orchestration"] = str(run_with_orchestration)
+        if ingestion_mode is not None:
+            data["ingestion_mode"] = ingestion_mode
+        if file_path:
+            # Create a new file instance that will remain open during the request
+            file_instance = open(file_path, "rb")
+            files = [
+                (
+                    "file",
+                    (file_path, file_instance, "application/octet-stream"),
+                )
+            ]
+            try:
+                response_dict = self.client._make_request(
+                    "POST",
+                    "documents",
+                    data=data,
+                    files=files,
+                    version="v3",
+                )
+            finally:
+                # Ensure we close the file after the request is complete
+                file_instance.close()
+        elif raw_text:
+            data["raw_text"] = raw_text  # type: ignore
+            response_dict = self.client._make_request(
+                "POST",
+                "documents",
+                data=data,
+                version="v3",
+            )
+        else:
+            data["chunks"] = json.dumps(chunks)
+            response_dict = self.client._make_request(
+                "POST",
+                "documents",
+                data=data,
+                version="v3",
+            )
+
+        return WrappedIngestionResponse(**response_dict)
+
+    def append_metadata(
+        self,
+        id: str | UUID,
+        metadata: list[dict],
+    ) -> WrappedDocumentResponse:
+        """Append metadata to a document.
+
+        Args:
+            id (str | UUID): ID of document to append metadata to
+            metadata (list[dict]): Metadata to append
+
+        Returns:
+            WrappedDocumentResponse
+        """
+        data = json.dumps(metadata)
+        response_dict = self.client._make_request(
+            "PATCH",
+            f"documents/{str(id)}/metadata",
+            data=data,
+            version="v3",
+        )
+
+        return WrappedDocumentResponse(**response_dict)
+
+    def replace_metadata(
+        self,
+        id: str | UUID,
+        metadata: list[dict],
+    ) -> WrappedDocumentResponse:
+        """Replace metadata for a document.
+
+        Args:
+            id (str | UUID): ID of document to replace metadata for
+            metadata (list[dict]): The metadata that will replace the existing metadata
+        """
+        data = json.dumps(metadata)
+        response_dict = self.client._make_request(
+            "PUT",
+            f"documents/{str(id)}/metadata",
+            data=data,
+            version="v3",
+        )
+
+        return WrappedDocumentResponse(**response_dict)
+
+    def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedDocumentResponse:
+        """Get a specific document by ID.
+
+        Args:
+            id (str | UUID): ID of document to retrieve
+
+        Returns:
+            WrappedDocumentResponse
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            f"documents/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedDocumentResponse(**response_dict)
+
+    def download(
+        self,
+        id: str | UUID,
+    ) -> BytesIO:
+        response = self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/download",
+            version="v3",
+        )
+        if not isinstance(response, BytesIO):
+            raise ValueError("Expected BytesIO response")
+        return response
+
+    def download_zip(
+        self,
+        document_ids: Optional[list[str | UUID]] = None,
+        start_date: Optional[datetime] = None,
+        end_date: Optional[datetime] = None,
+        output_path: Optional[str | Path] = None,
+    ) -> BytesIO | None:
+        """Download multiple documents as a zip file."""
+        params: dict[str, Any] = {}
+        if document_ids:
+            params["document_ids"] = [str(doc_id) for doc_id in document_ids]
+        if start_date:
+            params["start_date"] = start_date.isoformat()
+        if end_date:
+            params["end_date"] = end_date.isoformat()
+
+        response = self.client._make_request(
+            "GET",
+            "documents/download_zip",
+            params=params,
+            version="v3",
+        )
+
+        if not isinstance(response, BytesIO):
+            raise ValueError("Expected BytesIO response")
+
+        if output_path:
+            output_path = (
+                Path(output_path)
+                if isinstance(output_path, str)
+                else output_path
+            )
+            with open(output_path, "wb") as f:
+                f.write(response.getvalue())
+            return None
+
+        return response
+
+    def export(
+        self,
+        output_path: str | Path,
+        columns: Optional[list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export documents to a CSV file, streaming the results directly to
+        disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting documents
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        with open(output_path, "wb") as f:
+            response = self.client.client.post(
+                f"{self.client.base_url}/v3/documents/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            )
+            if response.status_code != 200:
+                raise ValueError(
+                    f"Export failed with status {response.status_code}",
+                    response,
+                )
+
+            for chunk in response.iter_bytes():
+                if chunk:
+                    f.write(chunk)
+
+    def export_entities(
+        self,
+        id: str | UUID,
+        output_path: str | Path,
+        columns: Optional[list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export documents to a CSV file, streaming the results directly to
+        disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting documents
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        with open(output_path, "wb") as f:
+            response = self.client.client.post(
+                f"{self.client.base_url}/v3/documents/{str(id)}/entities/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            )
+            if response.status_code != 200:
+                raise ValueError(
+                    f"Export failed with status {response.status_code}",
+                    response,
+                )
+
+            for chunk in response.iter_bytes():
+                if chunk:
+                    f.write(chunk)
+
+    def export_relationships(
+        self,
+        id: str | UUID,
+        output_path: str | Path,
+        columns: Optional[list[str]] = None,
+        filters: Optional[dict] = None,
+        include_header: bool = True,
+    ) -> None:
+        """Export document relationships to a CSV file, streaming the results
+        directly to disk.
+
+        Args:
+            output_path (str | Path): Local path where the CSV file should be saved
+            columns (Optional[list[str]]): Specific columns to export. If None, exports default columns
+            filters (Optional[dict]): Optional filters to apply when selecting documents
+            include_header (bool): Whether to include column headers in the CSV (default: True)
+
+        Returns:
+            None
+        """
+        # Convert path to string if it's a Path object
+        output_path = (
+            str(output_path) if isinstance(output_path, Path) else output_path
+        )
+
+        # Prepare request data
+        data: dict[str, Any] = {"include_header": include_header}
+        if columns:
+            data["columns"] = columns
+        if filters:
+            data["filters"] = filters
+
+        # Stream response directly to file
+        with open(output_path, "wb") as f:
+            response = self.client.client.post(
+                f"{self.client.base_url}/v3/documents/{str(id)}/relationships/export",
+                json=data,
+                headers={
+                    "Accept": "text/csv",
+                    **self.client._get_auth_header(),
+                },
+            )
+            if response.status_code != 200:
+                raise ValueError(
+                    f"Export failed with status {response.status_code}",
+                    response,
+                )
+
+            for chunk in response.iter_bytes():
+                if chunk:
+                    f.write(chunk)
+
+    def delete(
+        self,
+        id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a specific document.
+
+        Args:
+            id (str | UUID): ID of document to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"documents/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def list_chunks(
+        self,
+        id: str | UUID,
+        include_vectors: Optional[bool] = False,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedChunksResponse:
+        """Get chunks for a specific document.
+
+        Args:
+            id (str | UUID): ID of document to retrieve chunks for
+            include_vectors (Optional[bool]): Whether to include vector embeddings in the response
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedChunksResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+            "include_vectors": include_vectors,
+        }
+        response_dict = self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/chunks",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedChunksResponse(**response_dict)
+
+    def list_collections(
+        self,
+        id: str | UUID,
+        include_vectors: Optional[bool] = False,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCollectionsResponse:
+        """List collections for a specific document.
+
+        Args:
+            id (str | UUID): ID of document to retrieve collections for
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedCollectionsResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/collections",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedCollectionsResponse(**response_dict)
+
+    def delete_by_filter(
+        self,
+        filters: dict,
+    ) -> WrappedBooleanResponse:
+        """Delete documents based on filters.
+
+        Args:
+            filters (dict): Filters to apply when selecting documents to delete
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        filters_json = json.dumps(filters)
+        response_dict = self.client._make_request(
+            "DELETE",
+            "documents/by-filter",
+            data=filters_json,
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def extract(
+        self,
+        id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Extract entities and relationships from a document.
+
+        Args:
+            id (str, UUID): ID of document to extract from
+            settings (Optional[dict]): Settings for extraction process
+            run_with_orchestration (Optional[bool]): Whether to run with orchestration
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        data: dict[str, Any] = {}
+        if settings:
+            data["settings"] = json.dumps(settings)
+        if run_with_orchestration is not None:
+            data["run_with_orchestration"] = str(run_with_orchestration)
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"documents/{str(id)}/extract",
+            params=data,
+            version="v3",
+        )
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def list_entities(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+        include_embeddings: Optional[bool] = False,
+    ) -> WrappedEntitiesResponse:
+        """List entities extracted from a document.
+
+        Args:
+            id (str | UUID): ID of document to get entities from
+            offset (Optional[int]): Number of items to skip
+            limit (Optional[int]): Max number of items to return
+            include_embeddings (Optional[bool]): Whether to include embeddings
+
+        Returns:
+            WrappedEntitiesResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+            "include_embeddings": include_embeddings,
+        }
+        response_dict = self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/entities",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedEntitiesResponse(**response_dict)
+
+    def list_relationships(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+        entity_names: Optional[list[str]] = None,
+        relationship_types: Optional[list[str]] = None,
+    ) -> WrappedRelationshipsResponse:
+        """List relationships extracted from a document.
+
+        Args:
+            id (str | UUID): ID of document to get relationships from
+            offset (Optional[int]): Number of items to skip
+            limit (Optional[int]): Max number of items to return
+            entity_names (Optional[list[str]]): Filter by entity names
+            relationship_types (Optional[list[str]]): Filter by relationship types
+
+        Returns:
+            WrappedRelationshipsResponse
+        """
+        params: dict[str, Any] = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if entity_names:
+            params["entity_names"] = entity_names
+        if relationship_types:
+            params["relationship_types"] = relationship_types
+
+        response_dict = self.client._make_request(
+            "GET",
+            f"documents/{str(id)}/relationships",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedRelationshipsResponse(**response_dict)
+
+    def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedDocumentsResponse:
+        """List documents with pagination.
+
+        Args:
+            ids (Optional[list[str | UUID]]): Optional list of document IDs to filter by
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedDocumentsResponse
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = [str(doc_id) for doc_id in ids]  # type: ignore
+
+        response_dict = self.client._make_request(
+            "GET",
+            "documents",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedDocumentsResponse(**response_dict)
+
+    def search(
+        self,
+        query: str,
+        search_mode: Optional[str | SearchMode] = "custom",
+        search_settings: Optional[dict | SearchSettings] = None,
+    ) -> WrappedDocumentSearchResponse:
+        """Conduct a vector and/or graph search.
+
+        Args:
+            query (str): The query to search for.
+            search_settings (Optional[dict, SearchSettings]]): Vector search settings.
+
+        Returns:
+            WrappedDocumentSearchResponse
+        """
+        if search_settings and not isinstance(search_settings, dict):
+            search_settings = search_settings.model_dump()
+        data: dict[str, Any] = {
+            "query": query,
+            "search_settings": search_settings,
+        }
+        if search_mode:
+            data["search_mode"] = search_mode
+
+        response_dict = self.client._make_request(
+            "POST",
+            "documents/search",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedDocumentSearchResponse(**response_dict)
+
+    def deduplicate(
+        self,
+        id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Deduplicate entities and relationships from a document.
+
+        Args:
+            id (str, UUID): ID of document to extract from
+            settings (Optional[dict]): Settings for extraction process
+            run_with_orchestration (Optional[bool]): Whether to run with orchestration
+
+        Returns:
+            dict: Extraction results or cost estimate
+        """
+        data: dict[str, Any] = {}
+        if settings:
+            data["settings"] = json.dumps(settings)
+        if run_with_orchestration is not None:
+            data["run_with_orchestration"] = str(run_with_orchestration)
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"documents/{str(id)}/deduplicate",
+            params=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def create_sample(self, hi_res: bool = False) -> WrappedIngestionResponse:
+        """Ingest a sample document into R2R.
+
+        This method downloads a sample file from a predefined URL, saves it
+        as a temporary file, and ingests it using the `create` method. The
+        temporary file is removed after ingestion.
+
+        Returns:
+            WrappedIngestionResponse: The response from the ingestion request.
+        """
+        # Define the sample file URL
+        sample_file_url = "https://raw.githubusercontent.com/SciPhi-AI/R2R/main/py/core/examples/data/DeepSeek_R1.pdf"
+        # Parse the URL to extract the filename
+        parsed_url = urlparse(sample_file_url)
+        filename = os.path.basename(parsed_url.path)
+        # Determine whether the file is a PDF (this can affect how we write the file)
+
+        # Create a temporary file.
+        # We use binary mode ("wb") for both PDFs and text files because the `create`
+        # method will open the file in binary mode.
+        temp_file = tempfile.NamedTemporaryFile(
+            mode="wb", delete=False, suffix=f"_{filename}"
+        )
+        try:
+            response = requests.get(sample_file_url)
+            response.raise_for_status()
+            # Write the downloaded content to the temporary file.
+            # (For text files, using response.content avoids any potential encoding issues
+            # when the file is later opened in binary mode.)
+            temp_file.write(response.content)
+            temp_file.close()
+
+            # Prepare metadata and generate a stable document ID based on the URL
+            metadata = {"title": filename}
+            doc_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, sample_file_url))
+
+            # Call the SDK's create method to ingest the file.
+            ingestion_response = self.create(
+                file_path=temp_file.name,
+                metadata=metadata,
+                id=doc_id,
+                ingestion_mode="hi-res" if hi_res else None,
+            )
+            return ingestion_response
+        finally:
+            # Remove the temporary file regardless of whether ingestion succeeded.
+            try:
+                os.unlink(temp_file.name)
+            except Exception:
+                pass
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/graphs.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/graphs.py
new file mode 100644
index 00000000..d6903209
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/graphs.py
@@ -0,0 +1,616 @@
+from builtins import list as _list
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedCommunitiesResponse,
+    WrappedCommunityResponse,
+    WrappedEntitiesResponse,
+    WrappedEntityResponse,
+    WrappedGenericMessageResponse,
+    WrappedGraphResponse,
+    WrappedGraphsResponse,
+    WrappedRelationshipResponse,
+    WrappedRelationshipsResponse,
+)
+
+
+class GraphsSDK:
+    """SDK for interacting with knowledge graphs in the v3 API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    def list(
+        self,
+        collection_ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedGraphsResponse:
+        """List graphs with pagination and filtering options.
+
+        Args:
+            ids (Optional[list[str | UUID]]): Filter graphs by ids
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedGraphsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if collection_ids:
+            params["collection_ids"] = collection_ids
+
+        response_dict = self.client._make_request(
+            "GET", "graphs", params=params, version="v3"
+        )
+
+        return WrappedGraphsResponse(**response_dict)
+
+    def retrieve(
+        self,
+        collection_id: str | UUID,
+    ) -> WrappedGraphResponse:
+        """Get detailed information about a specific graph.
+
+        Args:
+            collection_id (str | UUID): Graph ID to retrieve
+
+        Returns:
+            WrappedGraphResponse
+        """
+        response_dict = self.client._make_request(
+            "GET", f"graphs/{str(collection_id)}", version="v3"
+        )
+
+        return WrappedGraphResponse(**response_dict)
+
+    def reset(
+        self,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Deletes a graph and all its associated data.
+
+        This endpoint permanently removes the specified graph along with all
+        entities and relationships that belong to only this graph.
+
+        Entities and relationships extracted from documents are not deleted.
+
+        Args:
+            collection_id (str | UUID): Graph ID to reset
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "POST", f"graphs/{str(collection_id)}/reset", version="v3"
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def update(
+        self,
+        collection_id: str | UUID,
+        name: Optional[str] = None,
+        description: Optional[str] = None,
+    ) -> WrappedGraphResponse:
+        """Update graph information.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            name (Optional[str]): Optional new name for the graph
+            description (Optional[str]): Optional new description for the graph
+
+        Returns:
+            WrappedGraphResponse
+        """
+        data: dict[str, Any] = {}
+        if name is not None:
+            data["name"] = name
+        if description is not None:
+            data["description"] = description
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGraphResponse(**response_dict)
+
+    def list_entities(
+        self,
+        collection_id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedEntitiesResponse:
+        """List entities in a graph.
+
+        Args:
+            collection_id (str | UUID): Graph ID to list entities from
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedEntitiesResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/entities",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedEntitiesResponse(**response_dict)
+
+    def get_entity(
+        self,
+        collection_id: str | UUID,
+        entity_id: str | UUID,
+    ) -> WrappedEntityResponse:
+        """Get entity information in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            entity_id (str | UUID): Entity ID to get from the graph
+
+        Returns:
+            WrappedEntityResponse
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/entities/{str(entity_id)}",
+            version="v3",
+        )
+
+        return WrappedEntityResponse(**response_dict)
+
+    def remove_entity(
+        self,
+        collection_id: str | UUID,
+        entity_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove an entity from a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            entity_id (str | UUID): Entity ID to remove from the graph
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/entities/{str(entity_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def list_relationships(
+        self,
+        collection_id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedRelationshipsResponse:
+        """List relationships in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedRelationshipsResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/relationships",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedRelationshipsResponse(**response_dict)
+
+    def get_relationship(
+        self,
+        collection_id: str | UUID,
+        relationship_id: str | UUID,
+    ) -> WrappedRelationshipResponse:
+        """Get relationship information in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            relationship_id (str | UUID): Relationship ID to get from the graph
+
+        Returns:
+            WrappedRelationshipResponse
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/relationships/{str(relationship_id)}",
+            version="v3",
+        )
+
+        return WrappedRelationshipResponse(**response_dict)
+
+    def remove_relationship(
+        self,
+        collection_id: str | UUID,
+        relationship_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a relationship from a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            relationship_id (str | UUID): Relationship ID to remove from the graph
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/relationships/{str(relationship_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def build(
+        self,
+        collection_id: str | UUID,
+        settings: Optional[dict] = None,
+        run_with_orchestration: bool = True,
+    ) -> WrappedGenericMessageResponse:
+        """Build a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            settings (dict): Settings for the build
+            run_with_orchestration (bool, optional): Whether to run with orchestration. Defaults to True.
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        data: dict[str, Any] = {
+            "run_with_orchestration": run_with_orchestration,
+        }
+        if settings:
+            data["settings"] = settings
+        response_dict = self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/communities/build",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def list_communities(
+        self,
+        collection_id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCommunitiesResponse:
+        """List communities in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedCommunitiesResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/communities",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedCommunitiesResponse(**response_dict)
+
+    def get_community(
+        self,
+        collection_id: str | UUID,
+        community_id: str | UUID,
+    ) -> WrappedCommunityResponse:
+        """Get community information in a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            community_id (str | UUID): Community ID to get from the graph
+
+        Returns:
+            WrappedCommunityResponse
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            f"graphs/{str(collection_id)}/communities/{str(community_id)}",
+            version="v3",
+        )
+
+        return WrappedCommunityResponse(**response_dict)
+
+    def update_community(
+        self,
+        collection_id: str | UUID,
+        community_id: str | UUID,
+        name: Optional[str] = None,
+        summary: Optional[str] = None,
+        findings: Optional[_list[str]] = None,
+        rating: Optional[int] = None,
+        rating_explanation: Optional[str] = None,
+        level: Optional[int] = None,
+        attributes: Optional[dict] = None,
+    ) -> WrappedCommunityResponse:
+        """Update community information.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            community_id (str | UUID): Community ID to update
+            name (Optional[str]): Optional new name for the community
+            summary (Optional[str]): Optional new summary for the community
+            findings (Optional[list[str]]): Optional new findings for the community
+            rating (Optional[int]): Optional new rating for the community
+            rating_explanation (Optional[str]): Optional new rating explanation for the community
+            level (Optional[int]): Optional new level for the community
+            attributes (Optional[dict]): Optional new attributes for the community
+
+        Returns:
+            WrappedCommunityResponse
+        """
+        data: dict[str, Any] = {}
+        if name is not None:
+            data["name"] = name
+        if summary is not None:
+            data["summary"] = summary
+        if findings is not None:
+            data["findings"] = findings
+        if rating is not None:
+            data["rating"] = str(rating)
+        if rating_explanation is not None:
+            data["rating_explanation"] = rating_explanation
+        if level is not None:
+            data["level"] = level
+        if attributes is not None:
+            data["attributes"] = attributes
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/communities/{str(community_id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCommunityResponse(**response_dict)
+
+    def delete_community(
+        self,
+        collection_id: str | UUID,
+        community_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a community from a graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            community_id (str | UUID): Community ID to remove from the graph
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/communities/{str(community_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def pull(
+        self,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Adds documents to a graph by copying their entities and
+        relationships.
+
+        This endpoint:
+            1. Copies document entities to the graphs_entities table
+            2. Copies document relationships to the graphs_relationships table
+            3. Associates the documents with the graph
+
+        When a document is added:
+            - Its entities and relationships are copied to graph-specific tables
+            - Existing entities/relationships are updated by merging their properties
+            - The document ID is recorded in the graph's document_ids array
+
+        Documents added to a graph will contribute their knowledge to:
+            - Graph analysis and querying
+            - Community detection
+            - Knowledge graph enrichment
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/pull",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def remove_document(
+        self,
+        collection_id: str | UUID,
+        document_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Removes a document from a graph and removes any associated entities.
+
+        This endpoint:
+            1. Removes the document ID from the graph's document_ids array
+            2. Optionally deletes the document's copied entities and relationships
+
+        The user must have access to both the graph and the document being removed.
+
+        Returns:
+            WrappedBooleanResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"graphs/{str(collection_id)}/documents/{str(document_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def create_entity(
+        self,
+        collection_id: str | UUID,
+        name: str,
+        description: str,
+        category: Optional[str] = None,
+        metadata: Optional[dict] = None,
+    ) -> WrappedEntityResponse:
+        """Creates a new entity in the graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            name (str): The name of the entity to create
+            description (Optional[str]): The description of the entity
+            category (Optional[str]): The category of the entity
+            metadata (Optional[dict]): Additional metadata for the entity
+
+        Returns:
+            WrappedEntityResponse
+        """
+        data: dict[str, Any] = {
+            "name": name,
+            "description": description,
+        }
+        if category is not None:
+            data["category"] = category
+        if metadata is not None:
+            data["metadata"] = metadata
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/entities",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedEntityResponse(**response_dict)
+
+    def create_relationship(
+        self,
+        collection_id: str | UUID,
+        subject: str,
+        subject_id: str | UUID,
+        predicate: str,
+        object: str,
+        object_id: str | UUID,
+        description: str,
+        weight: Optional[float] = None,
+        metadata: Optional[dict] = None,
+    ) -> WrappedRelationshipResponse:
+        """Creates a new relationship in the graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            subject (str): The subject of the relationship
+            subject_id (str | UUID): The ID of the subject entity
+            predicate (str): The predicate/type of the relationship
+            object (str): The object of the relationship
+            object_id (str | UUID): The ID of the object entity
+            description (Optional[str]): Description of the relationship
+            weight (Optional[float]): Weight/strength of the relationship
+            metadata (Optional[dict]): Additional metadata for the relationship
+
+        Returns:
+            WrappedRelationshipResponse
+        """
+        data: dict[str, Any] = {
+            "subject": subject,
+            "subject_id": str(subject_id),
+            "predicate": predicate,
+            "object": object,
+            "object_id": str(object_id),
+            "description": description,
+        }
+        if weight is not None:
+            data["weight"] = weight
+        if metadata is not None:
+            data["metadata"] = metadata
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/relationships",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedRelationshipResponse(**response_dict)
+
+    def create_community(
+        self,
+        collection_id: str | UUID,
+        name: str,
+        summary: str,
+        findings: Optional[_list[str]] = None,
+        rating: Optional[float] = None,
+        rating_explanation: Optional[str] = None,
+    ) -> WrappedCommunityResponse:
+        """Creates a new community in the graph.
+
+        Args:
+            collection_id (str | UUID): The collection ID corresponding to the graph
+            name (str): The name of the community
+            summary (str): A summary description of the community
+            findings (Optional[list[str]]): List of findings about the community
+            rating (Optional[float]): Rating between 1 and 10
+            rating_explanation (Optional[str]): Explanation for the rating
+
+        Returns:
+            WrappedCommunityResponse
+        """
+        data: dict[str, Any] = {
+            "name": name,
+            "summary": summary,
+        }
+        if findings is not None:
+            data["findings"] = findings
+        if rating is not None:
+            data["rating"] = rating
+        if rating_explanation is not None:
+            data["rating_explanation"] = rating_explanation
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"graphs/{str(collection_id)}/communities",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedCommunityResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/indices.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/indices.py
new file mode 100644
index 00000000..1db9afc4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/indices.py
@@ -0,0 +1,119 @@
+import json
+from typing import Any, Optional
+
+from shared.api.models import (
+    WrappedGenericMessageResponse,
+    WrappedVectorIndexResponse,
+    WrappedVectorIndicesResponse,
+)
+
+
+class IndicesSDK:
+    def __init__(self, client):
+        self.client = client
+
+    def create(
+        self,
+        config: dict,
+        run_with_orchestration: Optional[bool] = True,
+    ) -> WrappedGenericMessageResponse:
+        """Create a new vector similarity search index in the database.
+
+        Args:
+            config (dict | IndexConfig): Configuration for the vector index.
+            run_with_orchestration (Optional[bool]): Whether to run index creation as an orchestrated task.
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        if not isinstance(config, dict):
+            config = config.model_dump()
+
+        data: dict[str, Any] = {
+            "config": config,
+            "run_with_orchestration": run_with_orchestration,
+        }
+        response_dict = self.client._make_request(
+            "POST",
+            "indices",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def list(
+        self,
+        filters: Optional[dict] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 10,
+    ) -> WrappedVectorIndicesResponse:
+        """List existing vector similarity search indices with pagination
+        support.
+
+        Args:
+            filters (Optional[dict]): Filter criteria for indices.
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            WrappedVectorIndicesResponse
+        """
+        params: dict = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if filters:
+            params["filters"] = json.dumps(filters)
+        response_dict = self.client._make_request(
+            "GET",
+            "indices",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedVectorIndicesResponse(**response_dict)
+
+    def retrieve(
+        self,
+        index_name: str,
+        table_name: str = "vectors",
+    ) -> WrappedVectorIndexResponse:
+        """Get detailed information about a specific vector index.
+
+        Args:
+            index_name (str): The name of the index to retrieve.
+            table_name (str): The name of the table where the index is stored.
+
+        Returns:
+            WrappedGetIndexResponse
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            f"indices/{table_name}/{index_name}",
+            version="v3",
+        )
+
+        return WrappedVectorIndexResponse(**response_dict)
+
+    def delete(
+        self,
+        index_name: str,
+        table_name: str = "vectors",
+    ) -> WrappedGenericMessageResponse:
+        """Delete an existing vector index.
+
+        Args:
+            index_name (str): The name of the index to retrieve.
+            table_name (str): The name of the table where the index is stored.
+
+        Returns:
+            WrappedGetIndexResponse
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"indices/{table_name}/{index_name}",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/prompts.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/prompts.py
new file mode 100644
index 00000000..b2200b4d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/prompts.py
@@ -0,0 +1,128 @@
+import json
+from typing import Any, Optional
+
+from shared.api.models import (
+    WrappedBooleanResponse,
+    WrappedGenericMessageResponse,
+    WrappedPromptResponse,
+    WrappedPromptsResponse,
+)
+
+
+class PromptsSDK:
+    def __init__(self, client):
+        self.client = client
+
+    def create(
+        self, name: str, template: str, input_types: dict
+    ) -> WrappedGenericMessageResponse:
+        """Create a new prompt.
+
+        Args:
+            name (str): The name of the prompt
+            template (str): The template string for the prompt
+            input_types (dict): A dictionary mapping input names to their types
+        Returns:
+            dict: Created prompt information
+        """
+        data: dict[str, Any] = {
+            "name": name,
+            "template": template,
+            "input_types": input_types,
+        }
+        response_dict = self.client._make_request(
+            "POST",
+            "prompts",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def list(self) -> WrappedPromptsResponse:
+        """List all available prompts.
+
+        Returns:
+            dict: List of all available prompts
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            "prompts",
+            version="v3",
+        )
+
+        return WrappedPromptsResponse(**response_dict)
+
+    def retrieve(
+        self,
+        name: str,
+        inputs: Optional[dict] = None,
+        prompt_override: Optional[str] = None,
+    ) -> WrappedPromptResponse:
+        """Get a specific prompt by name, optionally with inputs and override.
+
+        Args:
+            name (str): The name of the prompt to retrieve
+            inputs (Optional[dict]): JSON-encoded inputs for the prompt
+            prompt_override (Optional[str]): An override for the prompt template
+        Returns:
+            dict: The requested prompt with applied inputs and/or override
+        """
+        params = {}
+        if inputs:
+            params["inputs"] = json.dumps(inputs)
+        if prompt_override:
+            params["prompt_override"] = prompt_override
+        response_dict = self.client._make_request(
+            "POST",
+            f"prompts/{name}",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedPromptResponse(**response_dict)
+
+    def update(
+        self,
+        name: str,
+        template: Optional[str] = None,
+        input_types: Optional[dict] = None,
+    ) -> WrappedGenericMessageResponse:
+        """Update an existing prompt's template and/or input types.
+
+        Args:
+            name (str): The name of the prompt to update
+            template (Optional[str]): The updated template string for the prompt
+            input_types (Optional[dict]): The updated dictionary mapping input names to their types
+        Returns:
+            dict: The updated prompt details
+        """
+        data: dict = {}
+        if template:
+            data["template"] = template
+        if input_types:
+            data["input_types"] = input_types
+        response_dict = self.client._make_request(
+            "PUT",
+            f"prompts/{name}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def delete(self, name: str) -> WrappedBooleanResponse:
+        """Delete a prompt by name.
+
+        Args:
+            name (str): The name of the prompt to delete
+        Returns:
+            bool: True if deletion was successful
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"prompts/{name}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/retrieval.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/retrieval.py
new file mode 100644
index 00000000..4a927014
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/retrieval.py
@@ -0,0 +1,554 @@
+import json
+import uuid
+from typing import Any, Generator, Optional
+
+from shared.api.models import (
+    WrappedAgentResponse,
+    WrappedEmbeddingResponse,
+    WrappedLLMChatCompletion,
+    WrappedRAGResponse,
+    WrappedSearchResponse,
+)
+
+from ..models import (
+    AgentEvent,
+    CitationData,
+    CitationEvent,
+    Delta,
+    DeltaPayload,
+    FinalAnswerData,
+    FinalAnswerEvent,
+    GenerationConfig,
+    Message,
+    MessageData,
+    MessageDelta,
+    MessageEvent,
+    SearchMode,
+    SearchResultsData,
+    SearchResultsEvent,
+    SearchSettings,
+    ThinkingData,
+    ThinkingEvent,
+    ToolCallData,
+    ToolCallEvent,
+    ToolResultData,
+    ToolResultEvent,
+    UnknownEvent,
+)
+
+
+def parse_retrieval_event(raw: dict) -> Optional[AgentEvent]:
+    """
+    Convert a raw SSE event dict into a typed Pydantic model.
+
+    Example raw dict:
+        {
+          "event": "message",
+          "data": "{\"id\": \"msg_partial\", \"object\": \"agent.message.delta\", \"delta\": {...}}"
+        }
+    """
+    event_type = raw.get("event", "unknown")
+
+    # If event_type == "done", we usually return None to signal the SSE stream is finished.
+    if event_type == "done":
+        return None
+
+    # The SSE "data" is JSON-encoded, so parse it
+    data_str = raw.get("data", "")
+    try:
+        data_obj = json.loads(data_str)
+    except json.JSONDecodeError as e:
+        # You can decide whether to raise or return UnknownEvent
+        raise ValueError(f"Could not parse JSON in SSE event data: {e}") from e
+
+    # Now branch on event_type to build the right Pydantic model
+    if event_type == "search_results":
+        return SearchResultsEvent(
+            event=event_type,
+            data=SearchResultsData(**data_obj),
+        )
+    elif event_type == "message":
+        # Parse nested delta structure manually before creating MessageData
+        if "delta" in data_obj and isinstance(data_obj["delta"], dict):
+            delta_dict = data_obj["delta"]
+
+            # Convert content items to MessageDelta objects
+            if "content" in delta_dict and isinstance(
+                delta_dict["content"], list
+            ):
+                parsed_content = []
+                for item in delta_dict["content"]:
+                    if isinstance(item, dict):
+                        # Parse payload to DeltaPayload
+                        if "payload" in item and isinstance(
+                            item["payload"], dict
+                        ):
+                            payload_dict = item["payload"]
+                            item["payload"] = DeltaPayload(**payload_dict)
+                        parsed_content.append(MessageDelta(**item))
+
+                # Replace with parsed content
+                delta_dict["content"] = parsed_content
+
+            # Create properly typed Delta object
+            data_obj["delta"] = Delta(**delta_dict)
+
+        return MessageEvent(
+            event=event_type,
+            data=MessageData(**data_obj),
+        )
+    elif event_type == "citation":
+        return CitationEvent(event=event_type, data=CitationData(**data_obj))
+    elif event_type == "tool_call":
+        return ToolCallEvent(event=event_type, data=ToolCallData(**data_obj))
+    elif event_type == "tool_result":
+        return ToolResultEvent(
+            event=event_type, data=ToolResultData(**data_obj)
+        )
+    elif event_type == "thinking":
+        # Parse nested delta structure manually before creating ThinkingData
+        if "delta" in data_obj and isinstance(data_obj["delta"], dict):
+            delta_dict = data_obj["delta"]
+
+            # Convert content items to MessageDelta objects
+            if "content" in delta_dict and isinstance(
+                delta_dict["content"], list
+            ):
+                parsed_content = []
+                for item in delta_dict["content"]:
+                    if isinstance(item, dict):
+                        # Parse payload to DeltaPayload
+                        if "payload" in item and isinstance(
+                            item["payload"], dict
+                        ):
+                            payload_dict = item["payload"]
+                            item["payload"] = DeltaPayload(**payload_dict)
+                        parsed_content.append(MessageDelta(**item))
+
+                # Replace with parsed content
+                delta_dict["content"] = parsed_content
+
+            # Create properly typed Delta object
+            data_obj["delta"] = Delta(**delta_dict)
+
+        return ThinkingEvent(
+            event=event_type,
+            data=ThinkingData(**data_obj),
+        )
+    elif event_type == "final_answer":
+        return FinalAnswerEvent(
+            event=event_type, data=FinalAnswerData(**data_obj)
+        )
+    else:
+        # Fallback if it doesn't match any known event
+        return UnknownEvent(
+            event=event_type,
+            data=data_obj,
+        )
+
+
+def search_arg_parser(
+    query: str,
+    search_mode: Optional[str | SearchMode] = "custom",
+    search_settings: Optional[dict | SearchSettings] = None,
+) -> dict:
+    if search_mode and not isinstance(search_mode, str):
+        search_mode = search_mode.value
+
+    if search_settings and not isinstance(search_settings, dict):
+        search_settings = search_settings.model_dump()
+
+    data: dict[str, Any] = {
+        "query": query,
+        "search_settings": search_settings,
+    }
+    if search_mode:
+        data["search_mode"] = search_mode
+
+    return data
+
+
+def completion_arg_parser(
+    messages: list[dict | Message],
+    generation_config: Optional[dict | GenerationConfig] = None,
+) -> dict:
+    # FIXME: Needs a proper return type
+    cast_messages: list[Message] = [
+        Message(**msg) if isinstance(msg, dict) else msg for msg in messages
+    ]
+
+    if generation_config and not isinstance(generation_config, dict):
+        generation_config = generation_config.model_dump()
+
+    data: dict[str, Any] = {
+        "messages": [msg.model_dump() for msg in cast_messages],
+        "generation_config": generation_config,
+    }
+    return data
+
+
+def embedding_arg_parser(
+    text: str,
+) -> dict:
+    data: dict[str, Any] = {
+        "text": text,
+    }
+    return data
+
+
+def rag_arg_parser(
+    query: str,
+    rag_generation_config: Optional[dict | GenerationConfig] = None,
+    search_mode: Optional[str | SearchMode] = "custom",
+    search_settings: Optional[dict | SearchSettings] = None,
+    task_prompt: Optional[str] = None,
+    include_title_if_available: Optional[bool] = False,
+    include_web_search: Optional[bool] = False,
+) -> dict:
+    if rag_generation_config and not isinstance(rag_generation_config, dict):
+        rag_generation_config = rag_generation_config.model_dump()
+    if search_settings and not isinstance(search_settings, dict):
+        search_settings = search_settings.model_dump()
+
+    data: dict[str, Any] = {
+        "query": query,
+        "rag_generation_config": rag_generation_config,
+        "search_settings": search_settings,
+        "task_prompt": task_prompt,
+        "include_title_if_available": include_title_if_available,
+        "include_web_search": include_web_search,
+    }
+    if search_mode:
+        data["search_mode"] = search_mode
+    return data
+
+
+def agent_arg_parser(
+    message: Optional[dict | Message] = None,
+    rag_generation_config: Optional[dict | GenerationConfig] = None,
+    research_generation_config: Optional[dict | GenerationConfig] = None,
+    search_mode: Optional[str | SearchMode] = "custom",
+    search_settings: Optional[dict | SearchSettings] = None,
+    task_prompt: Optional[str] = None,
+    include_title_if_available: Optional[bool] = True,
+    conversation_id: Optional[str | uuid.UUID] = None,
+    max_tool_context_length: Optional[int] = None,
+    use_system_context: Optional[bool] = True,
+    rag_tools: Optional[list[str]] = None,
+    research_tools: Optional[list[str]] = None,
+    tools: Optional[list[str]] = None,  # For backward compatibility
+    mode: Optional[str] = "rag",
+    needs_initial_conversation_name: Optional[bool] = None,
+) -> dict:
+    if rag_generation_config and not isinstance(rag_generation_config, dict):
+        rag_generation_config = rag_generation_config.model_dump()
+    if research_generation_config and not isinstance(
+        research_generation_config, dict
+    ):
+        research_generation_config = research_generation_config.model_dump()
+    if search_settings and not isinstance(search_settings, dict):
+        search_settings = search_settings.model_dump()
+
+    data: dict[str, Any] = {
+        "rag_generation_config": rag_generation_config or {},
+        "search_settings": search_settings,
+        "task_prompt": task_prompt,
+        "include_title_if_available": include_title_if_available,
+        "conversation_id": (str(conversation_id) if conversation_id else None),
+        "max_tool_context_length": max_tool_context_length,
+        "use_system_context": use_system_context,
+        "mode": mode,
+    }
+
+    # Handle generation configs based on mode
+    if research_generation_config and mode == "research":
+        data["research_generation_config"] = research_generation_config
+
+    # Handle tool configurations
+    if rag_tools:
+        data["rag_tools"] = rag_tools
+    if research_tools:
+        data["research_tools"] = research_tools
+    if tools:  # Backward compatibility
+        data["tools"] = tools
+
+    if search_mode:
+        data["search_mode"] = search_mode
+
+    if needs_initial_conversation_name:
+        data["needs_initial_conversation_name"] = (
+            needs_initial_conversation_name
+        )
+
+    if message:
+        cast_message: Message = (
+            Message(**message) if isinstance(message, dict) else message
+        )
+        data["message"] = cast_message.model_dump()
+    return data
+
+
+class RetrievalSDK:
+    """SDK for interacting with documents in the v3 API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    def search(
+        self,
+        query: str,
+        search_mode: Optional[str | SearchMode] = "custom",
+        search_settings: Optional[dict | SearchSettings] = None,
+    ) -> WrappedSearchResponse:
+        """Conduct a vector and/or graph search.
+
+        Args:
+            query (str): The query to search for.
+            search_settings (Optional[dict, SearchSettings]]): Vector search settings.
+
+        Returns:
+            WrappedSearchResponse
+        """
+
+        response_dict = self.client._make_request(
+            "POST",
+            "retrieval/search",
+            json=search_arg_parser(
+                query=query,
+                search_mode=search_mode,
+                search_settings=search_settings,
+            ),
+            version="v3",
+        )
+
+        return WrappedSearchResponse(**response_dict)
+
+    def completion(
+        self,
+        messages: list[dict | Message],
+        generation_config: Optional[dict | GenerationConfig] = None,
+    ) -> WrappedLLMChatCompletion:
+        cast_messages: list[Message] = [
+            Message(**msg) if isinstance(msg, dict) else msg
+            for msg in messages
+        ]
+
+        if generation_config and not isinstance(generation_config, dict):
+            generation_config = generation_config.model_dump()
+
+        data: dict[str, Any] = {
+            "messages": [msg.model_dump() for msg in cast_messages],
+            "generation_config": generation_config,
+        }
+        response_dict = self.client._make_request(
+            "POST",
+            "retrieval/completion",
+            json=completion_arg_parser(messages, generation_config),
+            version="v3",
+        )
+
+        return WrappedLLMChatCompletion(**response_dict)
+
+    def embedding(
+        self,
+        text: str,
+    ) -> WrappedEmbeddingResponse:
+        response_dict = self.client._make_request(
+            "POST",
+            "retrieval/embedding",
+            data=embedding_arg_parser(text),
+            version="v3",
+        )
+
+        return WrappedEmbeddingResponse(**response_dict)
+
+    def rag(
+        self,
+        query: str,
+        rag_generation_config: Optional[dict | GenerationConfig] = None,
+        search_mode: Optional[str | SearchMode] = "custom",
+        search_settings: Optional[dict | SearchSettings] = None,
+        task_prompt: Optional[str] = None,
+        include_title_if_available: Optional[bool] = False,
+        include_web_search: Optional[bool] = False,
+    ) -> (
+        WrappedRAGResponse
+        | Generator[
+            ThinkingEvent
+            | SearchResultsEvent
+            | MessageEvent
+            | CitationEvent
+            | FinalAnswerEvent
+            | ToolCallEvent
+            | ToolResultEvent
+            | UnknownEvent
+            | None,
+            None,
+            None,
+        ]
+    ):
+        """Conducts a Retrieval Augmented Generation (RAG) search with the
+        given query.
+
+        Args:
+            query (str): The query to search for.
+            rag_generation_config (Optional[dict | GenerationConfig]): RAG generation configuration.
+            search_settings (Optional[dict | SearchSettings]): Vector search settings.
+            task_prompt (Optional[str]): Task prompt override.
+            include_title_if_available (Optional[bool]): Include the title if available.
+
+        Returns:
+            WrappedRAGResponse | AsyncGenerator[RAGResponse, None]: The RAG response
+        """
+        data = rag_arg_parser(
+            query=query,
+            rag_generation_config=rag_generation_config,
+            search_mode=search_mode,
+            search_settings=search_settings,
+            task_prompt=task_prompt,
+            include_title_if_available=include_title_if_available,
+            include_web_search=include_web_search,
+        )
+        rag_generation_config = data.get("rag_generation_config")
+        if rag_generation_config and rag_generation_config.get(  # type: ignore
+            "stream", False
+        ):
+            raw_stream = self.client._make_streaming_request(
+                "POST",
+                "retrieval/rag",
+                json=data,
+                version="v3",
+            )
+            # Wrap the raw stream to parse each event
+            return (parse_retrieval_event(event) for event in raw_stream)
+
+        response_dict = self.client._make_request(
+            "POST",
+            "retrieval/rag",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedRAGResponse(**response_dict)
+
+    def agent(
+        self,
+        message: Optional[dict | Message] = None,
+        rag_generation_config: Optional[dict | GenerationConfig] = None,
+        research_generation_config: Optional[dict | GenerationConfig] = None,
+        search_mode: Optional[str | SearchMode] = "custom",
+        search_settings: Optional[dict | SearchSettings] = None,
+        task_prompt: Optional[str] = None,
+        include_title_if_available: Optional[bool] = True,
+        conversation_id: Optional[str | uuid.UUID] = None,
+        max_tool_context_length: Optional[int] = None,
+        use_system_context: Optional[bool] = True,
+        # Tool configurations
+        rag_tools: Optional[list[str]] = None,
+        research_tools: Optional[list[str]] = None,
+        tools: Optional[list[str]] = None,  # For backward compatibility
+        mode: Optional[str] = "rag",
+        needs_initial_conversation_name: Optional[bool] = None,
+    ) -> (
+        WrappedAgentResponse
+        | Generator[
+            ThinkingEvent
+            | SearchResultsEvent
+            | MessageEvent
+            | CitationEvent
+            | FinalAnswerEvent
+            | ToolCallEvent
+            | ToolResultEvent
+            | UnknownEvent
+            | None,
+            None,
+            None,
+        ]
+    ):
+        """Performs a single turn in a conversation with a RAG agent.
+
+        Args:
+            message (Optional[dict | Message]): The message to send to the agent.
+            rag_generation_config (Optional[dict | GenerationConfig]): Configuration for RAG generation in 'rag' mode.
+            research_generation_config (Optional[dict | GenerationConfig]): Configuration for generation in 'research' mode.
+            search_mode (Optional[str | SearchMode]): Pre-configured search modes: "basic", "advanced", or "custom".
+            search_settings (Optional[dict | SearchSettings]): Vector search settings.
+            task_prompt (Optional[str]): Task prompt override.
+            include_title_if_available (Optional[bool]): Include the title if available.
+            conversation_id (Optional[str | uuid.UUID]): ID of the conversation for maintaining context.
+            max_tool_context_length (Optional[int]): Maximum context length for tool replies.
+            use_system_context (Optional[bool]): Whether to use system context in the prompt.
+            rag_tools (Optional[list[str]]): List of tools to enable for RAG mode.
+                Available tools: "search_file_knowledge", "content", "web_search", "web_scrape", "search_file_descriptions".
+            research_tools (Optional[list[str]]): List of tools to enable for Research mode.
+                Available tools: "rag", "reasoning", "critique", "python_executor".
+            tools (Optional[list[str]]): Deprecated. List of tools to execute.
+            mode (Optional[str]): Mode to use for generation: "rag" for standard retrieval or "research" for deep analysis.
+                Defaults to "rag".
+
+        Returns:
+            WrappedAgentResponse | AsyncGenerator[AgentEvent, None]: The agent response.
+        """
+        data = agent_arg_parser(
+            message=message,
+            rag_generation_config=rag_generation_config,
+            research_generation_config=research_generation_config,
+            search_mode=search_mode,
+            search_settings=search_settings,
+            task_prompt=task_prompt,
+            include_title_if_available=include_title_if_available,
+            conversation_id=conversation_id,
+            max_tool_context_length=max_tool_context_length,
+            use_system_context=use_system_context,
+            rag_tools=rag_tools,
+            research_tools=research_tools,
+            tools=tools,
+            mode=mode,
+            needs_initial_conversation_name=needs_initial_conversation_name,
+        )
+
+        # Determine if streaming is enabled
+        if search_mode:
+            data["search_mode"] = search_mode
+
+        if message:
+            cast_message: Message = (
+                Message(**message) if isinstance(message, dict) else message
+            )
+            data["message"] = cast_message.model_dump()
+
+        is_stream = False
+        if mode != "research":
+            if rag_generation_config:
+                if isinstance(rag_generation_config, dict):
+                    is_stream = rag_generation_config.get(  # type: ignore
+                        "stream", False
+                    )
+                else:
+                    is_stream = rag_generation_config.stream
+        else:
+            if research_generation_config:
+                if isinstance(research_generation_config, dict):
+                    is_stream = research_generation_config.get(  # type: ignore
+                        "stream", False
+                    )
+                else:
+                    is_stream = research_generation_config.stream
+
+        if is_stream:
+            raw_stream = self.client._make_streaming_request(
+                "POST",
+                "retrieval/agent",
+                json=data,
+                version="v3",
+            )
+            return (parse_retrieval_event(event) for event in raw_stream)
+
+        response_dict = self.client._make_request(
+            "POST",
+            "retrieval/agent",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedAgentResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/system.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/system.py
new file mode 100644
index 00000000..47fbe6c5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/system.py
@@ -0,0 +1,43 @@
+from shared.api.models import (
+    WrappedGenericMessageResponse,
+    WrappedServerStatsResponse,
+    WrappedSettingsResponse,
+)
+
+
+class SystemSDK:
+    def __init__(self, client):
+        self.client = client
+
+    def health(self) -> WrappedGenericMessageResponse:
+        """Check the health of the R2R server."""
+        response_dict = self.client._make_request(
+            "GET", "health", version="v3"
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def settings(self) -> WrappedSettingsResponse:
+        """Get the configuration settings for the R2R server.
+
+        Returns:
+            dict: The server settings.
+        """
+        response_dict = self.client._make_request(
+            "GET", "system/settings", version="v3"
+        )
+
+        return WrappedSettingsResponse(**response_dict)
+
+    def status(self) -> WrappedServerStatsResponse:
+        """Get statistics about the server, including the start time, uptime,
+        CPU usage, and memory usage.
+
+        Returns:
+            dict: The server statistics.
+        """
+        response_dict = self.client._make_request(
+            "GET", "system/status", version="v3"
+        )
+
+        return WrappedServerStatsResponse(**response_dict)
diff --git a/.venv/lib/python3.12/site-packages/sdk/sync_methods/users.py b/.venv/lib/python3.12/site-packages/sdk/sync_methods/users.py
new file mode 100644
index 00000000..3021642f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sdk/sync_methods/users.py
@@ -0,0 +1,587 @@
+from typing import Any, Optional
+from uuid import UUID
+
+from shared.api.models import (
+    WrappedAPIKeyResponse,
+    WrappedAPIKeysResponse,
+    WrappedBooleanResponse,
+    WrappedCollectionsResponse,
+    WrappedGenericMessageResponse,
+    WrappedLimitsResponse,
+    WrappedLoginResponse,
+    WrappedTokenResponse,
+    WrappedUserResponse,
+    WrappedUsersResponse,
+)
+
+
+class UsersSDK:
+    def __init__(self, client):
+        self.client = client
+
+    def create(
+        self,
+        email: str,
+        password: str,
+        name: Optional[str] = None,
+        bio: Optional[str] = None,
+        profile_picture: Optional[str] = None,
+    ) -> WrappedUserResponse:
+        """Register a new user.
+
+        Args:
+            email (str): User's email address
+            password (str): User's password
+            name (Optional[str]): The name for the new user
+            bio (Optional[str]): The bio for the new user
+            profile_picture (Optional[str]): New user profile picture
+
+        Returns:
+            UserResponse: New user information
+        """
+
+        data: dict = {"email": email, "password": password}
+
+        if name is not None:
+            data["name"] = name
+        if bio is not None:
+            data["bio"] = bio
+        if profile_picture is not None:
+            data["profile_picture"] = profile_picture
+
+        response_dict = self.client._make_request(
+            "POST",
+            "users",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    def send_verification_email(
+        self, email: str
+    ) -> WrappedGenericMessageResponse:
+        """Request that a verification email to a user."""
+        response_dict = self.client._make_request(
+            "POST",
+            "users/send-verification-email",
+            json=email,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def delete(self, id: str | UUID, password: str) -> WrappedBooleanResponse:
+        """Delete a specific user. Users can only delete their own account
+        unless they are superusers.
+
+        Args:
+            id (str | UUID): User ID to delete
+            password (str): User's password
+
+        Returns:
+            dict: Deletion result
+        """
+        data: dict[str, Any] = {"password": password}
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"users/{str(id)}",
+            json=data,
+            version="v3",
+        )
+        self.client.access_token = None
+        self.client._refresh_token = None
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def verify_email(
+        self, email: str, verification_code: str
+    ) -> WrappedGenericMessageResponse:
+        """Verify a user's email address.
+
+        Args:
+            email (str): User's email address
+            verification_code (str): Verification code sent to the user's email
+
+        Returns:
+            dict: Verification result
+        """
+        data: dict[str, Any] = {
+            "email": email,
+            "verification_code": verification_code,
+        }
+        response_dict = self.client._make_request(
+            "POST",
+            "users/verify-email",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def login(self, email: str, password: str) -> WrappedLoginResponse:
+        """Log in a user.
+
+        Args:
+            email (str): User's email address
+            password (str): User's password
+
+        Returns:
+            WrappedLoginResponse
+        """
+        if self.client.api_key:
+            raise ValueError(
+                "Cannot log in after setting an API key, please unset your R2R_API_KEY variable or call client.set_api_key(None)"
+            )
+        data: dict[str, Any] = {"username": email, "password": password}
+        response_dict = self.client._make_request(
+            "POST",
+            "users/login",
+            data=data,
+            version="v3",
+        )
+
+        login_response = WrappedLoginResponse(**response_dict)
+        self.client.access_token = login_response.results.access_token.token
+        self.client._refresh_token = login_response.results.refresh_token.token
+
+        user = self.client._make_request(
+            "GET",
+            "users/me",
+            version="v3",
+        )
+
+        user_response = WrappedUserResponse(**user)
+        self.client._user_id = user_response.results.id
+
+        return login_response
+
+    def logout(self) -> WrappedGenericMessageResponse | None:
+        """Log out the current user."""
+        if self.client.access_token:
+            response_dict = self.client._make_request(
+                "POST",
+                "users/logout",
+                version="v3",
+            )
+            self.client.access_token = None
+            self.client._refresh_token = None
+
+            return WrappedGenericMessageResponse(**response_dict)
+
+        self.client.access_token = None
+        self.client._refresh_token = None
+        return None
+
+    def refresh_token(self) -> WrappedTokenResponse:
+        """Refresh the access token using the refresh token."""
+        if self.client._refresh_token:
+            response_dict = self.client._make_request(
+                "POST",
+                "users/refresh-token",
+                json=self.client._refresh_token,
+                version="v3",
+            )
+
+        self.client.access_token = response_dict["results"]["access_token"][
+            "token"
+        ]
+        self.client._refresh_token = response_dict["results"]["refresh_token"][
+            "token"
+        ]
+
+        return WrappedTokenResponse(**response_dict)
+
+    def change_password(
+        self, current_password: str, new_password: str
+    ) -> WrappedGenericMessageResponse:
+        """Change the user's password.
+
+        Args:
+            current_password (str): User's current password
+            new_password (str): User's new password
+
+        Returns:
+            dict: Change password result
+        """
+        data: dict[str, Any] = {
+            "current_password": current_password,
+            "new_password": new_password,
+        }
+        response_dict = self.client._make_request(
+            "POST",
+            "users/change-password",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def request_password_reset(
+        self, email: str
+    ) -> WrappedGenericMessageResponse:
+        """Request a password reset.
+
+        Args:
+            email (str): User's email address
+
+        Returns:
+            dict: Password reset request result
+        """
+        response_dict = self.client._make_request(
+            "POST",
+            "users/request-password-reset",
+            json=email,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def reset_password(
+        self, reset_token: str, new_password: str
+    ) -> WrappedGenericMessageResponse:
+        """Reset password using a reset token.
+
+        Args:
+            reset_token (str): Password reset token
+            new_password (str): New password
+
+        Returns:
+            dict: Password reset result
+        """
+        data: dict[str, Any] = {
+            "reset_token": reset_token,
+            "new_password": new_password,
+        }
+        response_dict = self.client._make_request(
+            "POST",
+            "users/reset-password",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def list(
+        self,
+        ids: Optional[list[str | UUID]] = None,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedUsersResponse:
+        """List users with pagination and filtering options.
+
+        Args:
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            dict: List of users and pagination information
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+        if ids:
+            params["ids"] = [str(user_id) for user_id in ids]  # type: ignore
+
+        response_dict = self.client._make_request(
+            "GET",
+            "users",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedUsersResponse(**response_dict)
+
+    def retrieve(
+        self,
+        id: str | UUID,
+    ) -> WrappedUserResponse:
+        """Get a specific user.
+
+        Args:
+            id (str | UUID): User ID to retrieve
+
+        Returns:
+            dict: Detailed user information
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            f"users/{str(id)}",
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    def me(
+        self,
+    ) -> WrappedUserResponse:
+        """Get detailed information about the currently authenticated user.
+
+        Returns:
+            dict: Detailed user information
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            "users/me",
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    def update(
+        self,
+        id: str | UUID,
+        email: Optional[str] = None,
+        is_superuser: Optional[bool] = None,
+        name: Optional[str] = None,
+        bio: Optional[str] = None,
+        profile_picture: Optional[str] = None,
+        limits_overrides: dict | None = None,
+        metadata: dict[str, str | None] | None = None,
+    ) -> WrappedUserResponse:
+        """Update user information.
+
+        Args:
+            id (str | UUID): User ID to update
+            username (Optional[str]): New username
+            is_superuser (Optional[bool]): Update superuser status
+            name (Optional[str]): New name
+            bio (Optional[str]): New bio
+            profile_picture (Optional[str]): New profile picture
+
+        Returns:
+            dict: Updated user information
+        """
+        data: dict = {}
+        if email is not None:
+            data["email"] = email
+        if is_superuser is not None:
+            data["is_superuser"] = is_superuser
+        if name is not None:
+            data["name"] = name
+        if bio is not None:
+            data["bio"] = bio
+        if profile_picture is not None:
+            data["profile_picture"] = profile_picture
+        if limits_overrides is not None:
+            data["limits_overrides"] = limits_overrides
+        if metadata is not None:
+            data["metadata"] = metadata
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"users/{str(id)}",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedUserResponse(**response_dict)
+
+    def list_collections(
+        self,
+        id: str | UUID,
+        offset: Optional[int] = 0,
+        limit: Optional[int] = 100,
+    ) -> WrappedCollectionsResponse:
+        """Get all collections associated with a specific user.
+
+        Args:
+            id (str | UUID): User ID to get collections for
+            offset (int, optional): Specifies the number of objects to skip. Defaults to 0.
+            limit (int, optional): Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.
+
+        Returns:
+            dict: List of collections and pagination information
+        """
+        params = {
+            "offset": offset,
+            "limit": limit,
+        }
+
+        response_dict = self.client._make_request(
+            "GET",
+            f"users/{str(id)}/collections",
+            params=params,
+            version="v3",
+        )
+
+        return WrappedCollectionsResponse(**response_dict)
+
+    def add_to_collection(
+        self,
+        id: str | UUID,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Add a user to a collection.
+
+        Args:
+            id (str | UUID): User ID to add
+            collection_id (str | UUID): Collection ID to add user to
+        """
+        response_dict = self.client._make_request(
+            "POST",
+            f"users/{str(id)}/collections/{str(collection_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def remove_from_collection(
+        self,
+        id: str | UUID,
+        collection_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Remove a user from a collection.
+
+        Args:
+            id (str | UUID): User ID to remove
+            collection_id (str | UUID): Collection ID to remove user from
+
+        Returns:
+            bool: True if successful
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"users/{str(id)}/collections/{str(collection_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def create_api_key(
+        self,
+        id: str | UUID,
+        name: Optional[str] = None,
+        description: Optional[str] = None,
+    ) -> WrappedAPIKeyResponse:
+        """Create a new API key for the specified user.
+
+        Args:
+            id (str | UUID): User ID to create API key for
+            name (Optional[str]): Name of the API key
+            description (Optional[str]): Description of the API key
+
+        Returns:
+            dict: { "message": "API key created successfully", "api_key": "key_id.raw_api_key" }
+        """
+        data: dict[str, Any] = {}
+        if name:
+            data["name"] = name
+        if description:
+            data["description"] = description
+
+        response_dict = self.client._make_request(
+            "POST",
+            f"users/{str(id)}/api-keys",
+            json=data,
+            version="v3",
+        )
+
+        return WrappedAPIKeyResponse(**response_dict)
+
+    def list_api_keys(
+        self,
+        id: str | UUID,
+    ) -> WrappedAPIKeysResponse:
+        """List all API keys for the specified user.
+
+        Args:
+            id (str | UUID): User ID to list API keys for
+
+        Returns:
+            WrappedAPIKeysResponse
+        """
+        resp_dict = self.client._make_request(
+            "GET",
+            f"users/{str(id)}/api-keys",
+            version="v3",
+        )
+
+        return WrappedAPIKeysResponse(**resp_dict)
+
+    def delete_api_key(
+        self,
+        id: str | UUID,
+        key_id: str | UUID,
+    ) -> WrappedBooleanResponse:
+        """Delete a specific API key for the specified user.
+
+        Args:
+            id (str | UUID): User ID
+            key_id (str | UUID): API key ID to delete
+
+        Returns:
+            dict: { "message": "API key deleted successfully" }
+        """
+        response_dict = self.client._make_request(
+            "DELETE",
+            f"users/{str(id)}/api-keys/{str(key_id)}",
+            version="v3",
+        )
+
+        return WrappedBooleanResponse(**response_dict)
+
+    def get_limits(self) -> WrappedLimitsResponse:
+        response_dict = self.client._make_request(
+            "GET",
+            f"users/{str(self.client._user_id)}/limits",
+            version="v3",
+        )
+
+        return WrappedLimitsResponse(**response_dict)
+
+    def oauth_google_authorize(self) -> WrappedGenericMessageResponse:
+        """Get Google OAuth 2.0 authorization URL from the server.
+
+        Returns:
+            WrappedGenericMessageResponse
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            "users/oauth/google/authorize",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def oauth_github_authorize(self) -> WrappedGenericMessageResponse:
+        """Get GitHub OAuth 2.0 authorization URL from the server.
+
+        Returns: {"redirect_url": "..."}
+        """
+        response_dict = self.client._make_request(
+            "GET",
+            "users/oauth/github/authorize",
+            version="v3",
+        )
+
+        return WrappedGenericMessageResponse(**response_dict)
+
+    def oauth_google_callback(
+        self, code: str, state: str
+    ) -> WrappedLoginResponse:
+        """Exchange `code` and `state` with the Google OAuth 2.0 callback
+        route."""
+        response_dict = self.client._make_request(
+            "GET",
+            "users/oauth/google/callback",
+            params={"code": code, "state": state},
+            version="v3",
+        )
+
+        return WrappedLoginResponse(**response_dict)
+
+    def oauth_github_callback(
+        self, code: str, state: str
+    ) -> WrappedLoginResponse:
+        """Exchange `code` and `state` with the GitHub OAuth 2.0 callback
+        route."""
+        response_dict = self.client._make_request(
+            "GET",
+            "users/oauth/github/callback",
+            params={"code": code, "state": state},
+            version="v3",
+        )
+
+        return WrappedLoginResponse(**response_dict)