aboutsummaryrefslogtreecommitdiff
path: root/R2R/r2r/base/abstractions/llm.py
blob: 3178d8dc3cd940a2bbc1d8ee6bfd930c4a3cf4b8 (about) (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
"""Abstractions for the LLM model."""

from typing import TYPE_CHECKING, ClassVar, Optional

from openai.types.chat import ChatCompletion, ChatCompletionChunk
from pydantic import BaseModel, Field

if TYPE_CHECKING:
    from .search import AggregateSearchResult

LLMChatCompletion = ChatCompletion
LLMChatCompletionChunk = ChatCompletionChunk


class RAGCompletion:
    completion: LLMChatCompletion
    search_results: "AggregateSearchResult"

    def __init__(
        self,
        completion: LLMChatCompletion,
        search_results: "AggregateSearchResult",
    ):
        self.completion = completion
        self.search_results = search_results


class GenerationConfig(BaseModel):
    _defaults: ClassVar[dict] = {
        "model": "gpt-4o",
        "temperature": 0.1,
        "top_p": 1.0,
        "top_k": 100,
        "max_tokens_to_sample": 1024,
        "stream": False,
        "functions": None,
        "skip_special_tokens": False,
        "stop_token": None,
        "num_beams": 1,
        "do_sample": True,
        "generate_with_chat": False,
        "add_generation_kwargs": None,
        "api_base": None,
    }

    model: str = Field(
        default_factory=lambda: GenerationConfig._defaults["model"]
    )
    temperature: float = Field(
        default_factory=lambda: GenerationConfig._defaults["temperature"]
    )
    top_p: float = Field(
        default_factory=lambda: GenerationConfig._defaults["top_p"]
    )
    top_k: int = Field(
        default_factory=lambda: GenerationConfig._defaults["top_k"]
    )
    max_tokens_to_sample: int = Field(
        default_factory=lambda: GenerationConfig._defaults[
            "max_tokens_to_sample"
        ]
    )
    stream: bool = Field(
        default_factory=lambda: GenerationConfig._defaults["stream"]
    )
    functions: Optional[list[dict]] = Field(
        default_factory=lambda: GenerationConfig._defaults["functions"]
    )
    skip_special_tokens: bool = Field(
        default_factory=lambda: GenerationConfig._defaults[
            "skip_special_tokens"
        ]
    )
    stop_token: Optional[str] = Field(
        default_factory=lambda: GenerationConfig._defaults["stop_token"]
    )
    num_beams: int = Field(
        default_factory=lambda: GenerationConfig._defaults["num_beams"]
    )
    do_sample: bool = Field(
        default_factory=lambda: GenerationConfig._defaults["do_sample"]
    )
    generate_with_chat: bool = Field(
        default_factory=lambda: GenerationConfig._defaults[
            "generate_with_chat"
        ]
    )
    add_generation_kwargs: Optional[dict] = Field(
        default_factory=lambda: GenerationConfig._defaults[
            "add_generation_kwargs"
        ]
    )
    api_base: Optional[str] = Field(
        default_factory=lambda: GenerationConfig._defaults["api_base"]
    )

    @classmethod
    def set_default(cls, **kwargs):
        for key, value in kwargs.items():
            if key in cls._defaults:
                cls._defaults[key] = value
            else:
                raise AttributeError(
                    f"No default attribute '{key}' in GenerationConfig"
                )

    def __init__(self, **data):
        model = data.pop("model", None)
        if model is not None:
            super().__init__(model=model, **data)
        else:
            super().__init__(**data)