Spaces:
Sleeping
Sleeping
File size: 5,639 Bytes
4e6ea87 3667c7a 44800eb 3667c7a f035eac 3667c7a 44800eb 3667c7a 4e6ea87 f035eac 4e6ea87 44800eb 4e6ea87 3667c7a 4e6ea87 f035eac 4e6ea87 44800eb 3667c7a 44800eb 3667c7a 1f19f64 3667c7a 1f19f64 3667c7a 44800eb 3667c7a 4e6ea87 f035eac 4e6ea87 1f19f64 44800eb 1f19f64 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
import os
from openai import OpenAI
from utils.errors import APIError
class LLMManager:
def __init__(self, config, prompts):
self.config = config
self.client = OpenAI(base_url=config.llm.url, api_key=config.llm.key)
self.prompts = prompts
self.is_demo = os.getenv("IS_DEMO")
self.demo_word_limit = os.getenv("DEMO_WORD_LIMIT")
def test_connection(self):
try:
response = self.client.chat.completions.create(
model=self.config.llm.name,
messages=[
{"role": "system", "content": "You just help me test the connection."},
{"role": "user", "content": "Hi!"},
{"role": "user", "content": "Ping!"},
],
)
if not response.choices:
raise APIError("LLM Test Connection Error", details="No choices in response")
return response.choices[0].message.content.strip()
except Exception as e:
raise APIError(f"LLM Test Connection Error: Unexpected error: {e}")
def init_bot(self, problem=""):
system_prompt = self.prompts["coding_interviewer_prompt"]
if self.is_demo:
system_prompt += f" Keep your responses very short and simple, no more than {self.demo_word_limit} words."
return [
{"role": "system", "content": system_prompt},
{"role": "system", "content": f"The candidate is solving the following problem: {problem}"},
]
def get_problem(self, requirements, difficulty, topic):
full_prompt = (
f"Create a {difficulty} {topic} coding problem. "
f"Additional requirements: {requirements}. "
"The problem should be clearly stated, well-formatted, and solvable within 30 minutes. "
"Ensure the problem varies each time to provide a wide range of challenges."
)
if self.is_demo:
full_prompt += f" Keep your response very short and simple, no more than {self.demo_word_limit} words."
try:
response = self.client.chat.completions.create(
model=self.config.llm.name,
messages=[
{"role": "system", "content": self.prompts["problem_generation_prompt"]},
{"role": "user", "content": full_prompt},
],
temperature=1.0,
)
if not response.choices:
raise APIError("LLM Problem Generation Error", details="No choices in response")
question = response.choices[0].message.content.strip()
except Exception as e:
raise APIError(f"LLM Problem Generation Error: Unexpected error: {e}")
chat_history = self.init_bot(question)
return question, chat_history
def send_request(self, code, previous_code, message, chat_history, chat_display):
if code != previous_code:
chat_history.append({"role": "user", "content": f"My latest code:\n{code}"})
chat_history.append({"role": "user", "content": message})
try:
response = self.client.chat.completions.create(model=self.config.llm.name, messages=chat_history)
if not response.choices:
raise APIError("LLM Send Request Error", details="No choices in response")
reply = response.choices[0].message.content.strip()
except Exception as e:
raise APIError(f"LLM Send Request Error: Unexpected error: {e}")
chat_history.append({"role": "assistant", "content": reply})
if chat_display:
chat_display[-1][1] = reply
else:
chat_display.append([message, reply])
return chat_history, chat_display, "", code
def end_interview(self, problem_description, chat_history):
if not chat_history or len(chat_history) <= 2:
yield "No interview content available to review."
transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
system_prompt = self.prompts["grading_feedback_prompt"]
if self.is_demo:
system_prompt += f" Keep your response very short and simple, no more than {self.demo_word_limit} words."
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": f"The original problem to solve: {problem_description}"},
{"role": "user", "content": "\n\n".join(transcript)},
{"role": "user", "content": "Grade the interview based on the transcript provided and give feedback."},
]
if os.getenv("STREAMING", False):
try:
response = self.client.chat.completions.create(
model=self.config.llm.name,
messages=messages,
temperature=0.5,
stream=True,
)
except Exception as e:
raise APIError(f"LLM End Interview Error: Unexpected error: {e}")
feedback = ""
for chunk in response:
if chunk.choices[0].delta.content:
feedback += chunk.choices[0].delta.content
yield feedback
# else:
# response = self.client.chat.completions.create(
# model=self.config.llm.name,
# messages=messages,
# temperature=0.5,
# )
# feedback = response.choices[0].message.content.strip()
# return feedback
|