diff options
Diffstat (limited to 'R2R/tests/test_llms.py')
-rwxr-xr-x | R2R/tests/test_llms.py | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/R2R/tests/test_llms.py b/R2R/tests/test_llms.py new file mode 100755 index 00000000..666bbff8 --- /dev/null +++ b/R2R/tests/test_llms.py @@ -0,0 +1,59 @@ +import pytest + +from r2r import LLMConfig +from r2r.base.abstractions.llm import GenerationConfig +from r2r.providers.llms import LiteLLM + + +@pytest.fixture +def lite_llm(): + config = LLMConfig(provider="litellm") + return LiteLLM(config) + + +@pytest.mark.parametrize("llm_fixture", ["lite_llm"]) +def test_get_completion_ollama(request, llm_fixture): + llm = request.getfixturevalue(llm_fixture) + + messages = [ + { + "role": "user", + "content": "This is a test, return only the word `True`", + } + ] + generation_config = GenerationConfig( + model="ollama/llama2", + temperature=0.0, + top_p=0.9, + max_tokens_to_sample=50, + stream=False, + ) + + completion = llm.get_completion(messages, generation_config) + # assert isinstance(completion, LLMChatCompletion) + assert completion.choices[0].message.role == "assistant" + assert completion.choices[0].message.content.strip() == "True" + + +@pytest.mark.parametrize("llm_fixture", ["lite_llm"]) +def test_get_completion_openai(request, llm_fixture): + llm = request.getfixturevalue(llm_fixture) + + messages = [ + { + "role": "user", + "content": "This is a test, return only the word `True`", + } + ] + generation_config = GenerationConfig( + model="gpt-3.5-turbo", + temperature=0.0, + top_p=0.9, + max_tokens_to_sample=50, + stream=False, + ) + + completion = llm.get_completion(messages, generation_config) + # assert isinstance(completion, LLMChatCompletion) + assert completion.choices[0].message.role == "assistant" + assert completion.choices[0].message.content.strip() == "True" |