about summary refs log tree commit diff
path: root/R2R/tests/test_llms.py
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /R2R/tests/test_llms.py
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to 'R2R/tests/test_llms.py')
-rwxr-xr-xR2R/tests/test_llms.py59
1 files changed, 59 insertions, 0 deletions
diff --git a/R2R/tests/test_llms.py b/R2R/tests/test_llms.py
new file mode 100755
index 00000000..666bbff8
--- /dev/null
+++ b/R2R/tests/test_llms.py
@@ -0,0 +1,59 @@
+import pytest
+
+from r2r import LLMConfig
+from r2r.base.abstractions.llm import GenerationConfig
+from r2r.providers.llms import LiteLLM
+
+
+@pytest.fixture
+def lite_llm():
+    config = LLMConfig(provider="litellm")
+    return LiteLLM(config)
+
+
+@pytest.mark.parametrize("llm_fixture", ["lite_llm"])
+def test_get_completion_ollama(request, llm_fixture):
+    llm = request.getfixturevalue(llm_fixture)
+
+    messages = [
+        {
+            "role": "user",
+            "content": "This is a test, return only the word `True`",
+        }
+    ]
+    generation_config = GenerationConfig(
+        model="ollama/llama2",
+        temperature=0.0,
+        top_p=0.9,
+        max_tokens_to_sample=50,
+        stream=False,
+    )
+
+    completion = llm.get_completion(messages, generation_config)
+    # assert isinstance(completion, LLMChatCompletion)
+    assert completion.choices[0].message.role == "assistant"
+    assert completion.choices[0].message.content.strip() == "True"
+
+
+@pytest.mark.parametrize("llm_fixture", ["lite_llm"])
+def test_get_completion_openai(request, llm_fixture):
+    llm = request.getfixturevalue(llm_fixture)
+
+    messages = [
+        {
+            "role": "user",
+            "content": "This is a test, return only the word `True`",
+        }
+    ]
+    generation_config = GenerationConfig(
+        model="gpt-3.5-turbo",
+        temperature=0.0,
+        top_p=0.9,
+        max_tokens_to_sample=50,
+        stream=False,
+    )
+
+    completion = llm.get_completion(messages, generation_config)
+    # assert isinstance(completion, LLMChatCompletion)
+    assert completion.choices[0].message.role == "assistant"
+    assert completion.choices[0].message.content.strip() == "True"