File size: 5,074 Bytes
4e6ea87
 
3667c7a
 
44800eb
 
3667c7a
 
 
 
 
 
 
 
44800eb
 
 
 
 
 
 
 
 
 
 
 
 
 
3667c7a
 
4e6ea87
 
 
 
44800eb
4e6ea87
3667c7a
 
 
 
 
 
 
 
 
 
4e6ea87
 
 
 
44800eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3667c7a
 
 
 
 
 
 
 
44800eb
 
 
 
 
 
 
 
3667c7a
 
 
 
 
 
 
 
 
 
 
 
 
44800eb
3667c7a
4e6ea87
 
 
 
44800eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3667c7a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import os

from openai import OpenAI

from utils.errors import APIError


class LLMManager:
    def __init__(self, config, prompts):
        self.config = config
        self.client = OpenAI(base_url=config.llm.url, api_key=config.llm.key)
        self.prompts = prompts

    def test_connection(self):
        try:
            response = self.client.chat.completions.create(
                model=self.config.llm.name,
                messages=[
                    {"role": "system", "content": "You just help me test the connection."},
                    {"role": "user", "content": "Hi!"},
                    {"role": "user", "content": "Ping!"},
                ],
            )
            if not response.choices:
                raise APIError("LLM Test Connection Error", details="No choices in response")
            return response.choices[0].message.content.strip()
        except Exception as e:
            raise APIError(f"LLM Test Connection Error: Unexpected error: {e}")

    def init_bot(self, problem=""):
        system_prompt = self.prompts["coding_interviewer_prompt"]
        if os.getenv("IS_DEMO"):
            system_prompt += " Keep your responses very short and simple, no more than 100 words."

        return [
            {"role": "system", "content": system_prompt},
            {"role": "system", "content": f"The candidate is solving the following problem: {problem}"},
        ]

    def get_problem(self, requirements, difficulty, topic):
        full_prompt = (
            f"Create a {difficulty} {topic} coding problem. "
            f"Additional requirements: {requirements}. "
            "The problem should be clearly stated, well-formatted, and solvable within 30 minutes. "
            "Ensure the problem varies each time to provide a wide range of challenges."
        )

        if os.getenv("IS_DEMO"):
            full_prompt += " Keep your response very short and simple, no more than 200 words."

        try:
            response = self.client.chat.completions.create(
                model=self.config.llm.name,
                messages=[
                    {"role": "system", "content": self.prompts["problem_generation_prompt"]},
                    {"role": "user", "content": full_prompt},
                ],
                temperature=1.0,
            )
            if not response.choices:
                raise APIError("LLM Problem Generation Error", details="No choices in response")
            question = response.choices[0].message.content.strip()
        except Exception as e:
            raise APIError(f"LLM Problem Generation Error: Unexpected error: {e}")

        chat_history = self.init_bot(question)
        return question, chat_history

    def send_request(self, code, previous_code, message, chat_history, chat_display):
        if code != previous_code:
            chat_history.append({"role": "user", "content": f"My latest code:\n{code}"})
        chat_history.append({"role": "user", "content": message})

        try:
            response = self.client.chat.completions.create(model=self.config.llm.name, messages=chat_history)
            if not response.choices:
                raise APIError("LLM Send Request Error", details="No choices in response")
            reply = response.choices[0].message.content.strip()
        except Exception as e:
            raise APIError(f"LLM Send Request Error: Unexpected error: {e}")

        chat_history.append({"role": "assistant", "content": reply})

        if chat_display:
            chat_display[-1][1] = reply
        else:
            chat_display.append([message, reply])

        return chat_history, chat_display, "", code

    def end_interview(self, problem_description, chat_history):
        if not chat_history or len(chat_history) <= 2:
            return "No interview content available to review."

        transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]

        system_prompt = self.prompts["grading_feedback_prompt"]
        if os.getenv("IS_DEMO"):
            system_prompt += " Keep your response very short and simple, no more than 200 words."

        try:
            response = self.client.chat.completions.create(
                model=self.config.llm.name,
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": f"The original problem to solve: {problem_description}"},
                    {"role": "user", "content": "\n\n".join(transcript)},
                    {"role": "user", "content": "Grade the interview based on the transcript provided and give feedback."},
                ],
                temperature=0.5,
            )
            if not response.choices:
                raise APIError("LLM End Interview Error", details="No choices in response")
            feedback = response.choices[0].message.content.strip()
        except Exception as e:
            raise APIError(f"LLM End Interview Error: Unexpected error: {e}")

        return feedback