File size: 8,634 Bytes
4e6ea87
3667c7a
72b491a
44800eb
e12b285
44800eb
3667c7a
9fc1785
e12b285
9fc1785
 
 
e12b285
e4558ca
 
 
9fc1785
 
 
 
e12b285
e4558ca
 
 
9fc1785
 
 
e12b285
 
 
e4558ca
 
 
 
9fc1785
 
 
3667c7a
e12b285
3667c7a
72b491a
 
 
 
 
 
 
9fc1785
3667c7a
e4558ca
 
d6cd6c2
e4558ca
 
 
 
 
 
d6cd6c2
72b491a
 
 
 
d6cd6c2
 
 
72b491a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4558ca
 
 
 
44800eb
e4558ca
 
 
 
 
 
 
 
 
44800eb
d6cd6c2
 
 
3667c7a
e12b285
e4558ca
 
 
9fc1785
e12b285
4e6ea87
e12b285
e4558ca
 
 
9fc1785
 
e12b285
9fc1785
8d3b67a
 
 
e4558ca
 
 
 
9fc1785
eee97a9
 
 
 
3667c7a
e12b285
 
 
e4558ca
 
 
82598a2
3667c7a
e12b285
3667c7a
78654a1
 
e12b285
 
 
e4558ca
 
 
44800eb
9fc1785
e12b285
1f19f64
 
 
 
 
44800eb
e4558ca
e12b285
 
e4558ca
 
 
d6cd6c2
 
e4558ca
e12b285
eee97a9
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
import os
from openai import OpenAI
import anthropic
from utils.errors import APIError
from typing import List, Dict, Generator, Optional, Tuple


class PromptManager:
    def __init__(self, prompts: Dict[str, str]):
        self.prompts = prompts
        self.limit = os.getenv("DEMO_WORD_LIMIT")

    def add_limit(self, prompt: str) -> str:
        """
        Add word limit to the prompt if specified in the environment variables.
        """
        if self.limit:
            prompt += f" Keep your responses very short and simple, no more than {self.limit} words."
        return prompt

    def get_system_prompt(self, key: str) -> str:
        """
        Retrieve and limit a system prompt by its key.
        """
        prompt = self.prompts[key]
        return self.add_limit(prompt)

    def get_problem_requirements_prompt(
        self, type: str, difficulty: Optional[str] = None, topic: Optional[str] = None, requirements: Optional[str] = None
    ) -> str:
        """
        Create a problem requirements prompt with optional parameters.
        """
        prompt = f"Create a {type} problem. Difficulty: {difficulty}. Topic: {topic}. Additional requirements: {requirements}."
        return self.add_limit(prompt)


class LLMManager:
    def __init__(self, config, prompts: Dict[str, str]):
        self.config = config
        self.llm_type = config.llm.type
        if self.llm_type == "ANTHROPIC_API":
            self.client = anthropic.Anthropic(api_key=config.llm.key)
        else:
            # all other API types suppose to support OpenAI format
            self.client = OpenAI(base_url=config.llm.url, api_key=config.llm.key)

        self.prompt_manager = PromptManager(prompts)

        self.status = self.test_llm(stream=False)
        self.streaming = self.test_llm(stream=True) if self.status else False

    def get_text(self, messages: List[Dict[str, str]], stream: Optional[bool] = None) -> Generator[str, None, None]:
        """
        Generate text from the LLM, optionally streaming the response.
        """
        if stream is None:
            stream = self.streaming
        try:
            if self.llm_type == "OPENAI_API":
                return self._get_text_openai(messages, stream)
            elif self.llm_type == "ANTHROPIC_API":
                return self._get_text_anthropic(messages, stream)
        except Exception as e:
            raise APIError(f"LLM Get Text Error: Unexpected error: {e}")

    def _get_text_openai(self, messages: List[Dict[str, str]], stream: bool) -> Generator[str, None, None]:
        if not stream:
            response = self.client.chat.completions.create(model=self.config.llm.name, messages=messages, temperature=1, max_tokens=2000)
            yield response.choices[0].message.content.strip()
        else:
            response = self.client.chat.completions.create(
                model=self.config.llm.name, messages=messages, temperature=1, stream=True, max_tokens=2000
            )
            for chunk in response:
                if chunk.choices[0].delta.content:
                    yield chunk.choices[0].delta.content

    def _get_text_anthropic(self, messages: List[Dict[str, str]], stream: bool) -> Generator[str, None, None]:
        # I convert the messages every time to the Anthropics format
        # It is not optimal way to do it, we can instead support the messages format from the beginning
        # But it duplicates the code and I don't want to do it now
        system_message = None
        consolidated_messages = []

        for message in messages:
            if message["role"] == "system":
                if system_message is None:
                    system_message = message["content"]
                else:
                    system_message += "\n" + message["content"]
            else:
                if consolidated_messages and consolidated_messages[-1]["role"] == message["role"]:
                    consolidated_messages[-1]["content"] += "\n" + message["content"]
                else:
                    consolidated_messages.append(message.copy())

        if not stream:
            response = self.client.messages.create(
                model=self.config.llm.name, max_tokens=2000, temperature=1, system=system_message, messages=consolidated_messages
            )
            yield response.content[0].text
        else:
            with self.client.messages.stream(
                model=self.config.llm.name, max_tokens=2000, temperature=1, system=system_message, messages=consolidated_messages
            ) as stream:
                yield from stream.text_stream

    def test_llm(self, stream=False) -> bool:
        """
        Test the LLM connection with or without streaming.
        """
        try:
            list(
                self.get_text(
                    [
                        {"role": "system", "content": "You just help me test the connection."},
                        {"role": "user", "content": "Hi!"},
                        {"role": "user", "content": "Ping!"},
                    ],
                    stream=stream,
                )
            )
            return True
        except:
            return False

    def init_bot(self, problem: str, interview_type: str = "coding") -> List[Dict[str, str]]:
        """
        Initialize the bot with a system prompt and problem description.
        """
        system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_interviewer_prompt")
        return [{"role": "system", "content": f"{system_prompt}\nThe candidate is solving the following problem:\n {problem}"}]

    def get_problem_prepare_messages(self, requirements: str, difficulty: str, topic: str, interview_type: str) -> List[Dict[str, str]]:
        """
        Prepare messages for generating a problem based on given requirements.
        """
        system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_problem_generation_prompt")
        full_prompt = self.prompt_manager.get_problem_requirements_prompt(interview_type, difficulty, topic, requirements)
        return [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": full_prompt},
        ]

    def get_problem(self, requirements: str, difficulty: str, topic: str, interview_type: str) -> Generator[str, None, None]:
        """
        Get a problem from the LLM based on the given requirements, difficulty, and topic.
        """
        messages = self.get_problem_prepare_messages(requirements, difficulty, topic, interview_type)
        problem = ""
        for text in self.get_text(messages):
            problem += text
            yield problem

    def update_chat_history(
        self, code: str, previous_code: str, chat_history: List[Dict[str, str]], chat_display: List[List[Optional[str]]]
    ) -> List[Dict[str, str]]:
        """
        Update chat history with the latest user message and code.
        """
        message = chat_display[-1][0]
        if code != previous_code:
            message += "\nMY NOTES AND CODE:\n" + code
        chat_history.append({"role": "user", "content": message})
        return chat_history

    def end_interview_prepare_messages(
        self, problem_description: str, chat_history: List[Dict[str, str]], interview_type: str
    ) -> List[Dict[str, str]]:
        """
        Prepare messages to end the interview and generate feedback.
        """
        transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
        system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_grading_feedback_prompt")
        return [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": f"The original problem to solve: {problem_description}"},
            {"role": "user", "content": "\n\n".join(transcript)},
            {"role": "user", "content": "Grade the interview based on the transcript provided and give feedback."},
        ]

    def end_interview(
        self, problem_description: str, chat_history: List[Dict[str, str]], interview_type: str = "coding"
    ) -> Generator[str, None, None]:
        """
        End the interview and get feedback from the LLM.
        """
        if len(chat_history) <= 2:
            yield "No interview history available"
            return
        messages = self.end_interview_prepare_messages(problem_description, chat_history, interview_type)
        feedback = ""
        for text in self.get_text(messages):
            feedback += text
            yield feedback