about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/core/configs/full_azure.toml
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/core/configs/full_azure.toml
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/core/configs/full_azure.toml')
-rw-r--r--.venv/lib/python3.12/site-packages/core/configs/full_azure.toml46
1 files changed, 46 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/core/configs/full_azure.toml b/.venv/lib/python3.12/site-packages/core/configs/full_azure.toml
new file mode 100644
index 00000000..c6ebb199
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/core/configs/full_azure.toml
@@ -0,0 +1,46 @@
+[app]
+# LLM used for internal operations, like deriving conversation names
+fast_llm = "azure/gpt-4o-mini"
+
+# LLM used for user-facing output, like RAG replies
+quality_llm = "azure/gpt-4o"
+
+# LLM used for ingesting visual inputs
+vlm = "azure/gpt-4o"
+
+# LLM used for transcription
+audio_lm = "azure/whisper-1"
+
+# Reasoning model, used for `research` agent
+reasoning_llm = "azure/o3-mini"
+# Planning model, used for `research` agent
+planning_llm = "azure/o3-mini"
+
+[embedding]
+base_model = "azure/text-embedding-3-small"
+
+[completion_embedding]
+base_model = "azure/text-embedding-3-small"
+
+[ingestion]
+provider = "unstructured_local"
+strategy = "auto"
+chunking_strategy = "by_title"
+new_after_n_chars = 2_048
+max_characters = 4_096
+combine_under_n_chars = 1_024
+overlap = 1_024
+document_summary_model = "azure/gpt-4o-mini"
+automatic_extraction = true # enable automatic extraction of entities and relations
+
+  [ingestion.extra_parsers]
+    pdf = "zerox"
+
+  [ingestion.chunk_enrichment_settings]
+    generation_config = { model = "azure/gpt-4o-mini" }
+
+[orchestration]
+provider = "hatchet"
+kg_creation_concurrency_limit = 32
+ingestion_concurrency_limit = 4
+kg_concurrency_limit = 8