File size: 3,331 Bytes
c8e8be4
 
a10bc68
c8e8be4
 
855dfb9
a10bc68
c8e8be4
a10bc68
891f3b9
c8e8be4
 
 
 
 
a10bc68
c8e8be4
 
 
 
 
a10bc68
c8e8be4
a10bc68
c8e8be4
a10bc68
c8e8be4
 
 
 
68e9ce7
c8e8be4
 
 
 
f6e34f2
 
a10bc68
 
 
 
 
 
 
 
 
 
 
 
 
68e9ce7
a10bc68
 
 
 
 
 
f6e34f2
 
 
 
a10bc68
f6e34f2
 
 
 
 
 
 
 
a10bc68
f6e34f2
 
 
 
 
87ae702
 
855dfb9
 
 
 
87ae702
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import json

from dotenv import load_dotenv
from openai import OpenAI

from audio import numpy_audio_to_bytes
from prompts import coding_interviewer_prompt, grading_feedback_prompt

load_dotenv()
# TODO: don't use my key
client = OpenAI()


def init_bot(problem=""):
    chat_history = [
        {"role": "system", "content": coding_interviewer_prompt},
        {"role": "system", "content": f"The candidate is solving the following problem: {problem}"},
    ]
    return chat_history


def get_problem(requirements, difficulty, topic, model, client=client):
    prompt_system = "You are ChatGPT acting as a coding round interviewer for a big-tech company. "
    full_prompt = f"Generate a {difficulty} {topic} problem in. Follow additional requirements: {requirements}. The problem should be solvable within 30 minutes."
    response = client.chat.completions.create(
        model=model,
        messages=[
            {"role": "system", "content": prompt_system},
            {"role": "user", "content": full_prompt},
        ],
        temperature=1.5,
    )
    question = response.choices[0].message.content.strip()
    chat_history = init_bot(question)
    return question, chat_history


def end_interview(chat_history, model, client=client):
    transcript = []
    for message in chat_history[1:]:
        role = message["role"]
        content = f"{role.capitalize()}: {message['content']}"
        transcript.append(content)
    response = client.chat.completions.create(
        model=model,
        messages=[
            {"role": "system", "content": grading_feedback_prompt},
            {"role": "user", "content": "Interview transcript:" + "\n\n".join(transcript)},
            {"role": "user", "content": "Grade the interview based on the transcript provided and give a feedback."},
        ],
        temperature=0.5,
    )
    feedback = response.choices[0].message.content.strip()
    return feedback


def send_request(code, previous_code, message, chat_history, chat_display, model, client=client):
    if code != previous_code:
        chat_history.append({"role": "user", "content": f"My latest code: {code}"})
    chat_history.append({"role": "user", "content": message})

    response = client.chat.completions.create(model=model, response_format={"type": "json_object"}, messages=chat_history)

    json_reply = response.choices[0].message.content.strip()

    try:
        data = json.loads(json_reply)
        reply = data["reply_to_candidate"]
    except json.JSONDecodeError as e:
        print("Failed to decode JSON:", str(e))
        reply = "There was an error processing your request."

    chat_history.append({"role": "assistant", "content": json_reply})
    chat_display.append([message, str(reply)])

    return chat_history, chat_display, "", code


def transcribe_audio(audio, client=client):
    transcription = client.audio.transcriptions.create(
        model="whisper-1", file=("temp.wav", numpy_audio_to_bytes(audio[1]), "audio/wav"), response_format="text"
    )

    return transcription


def text_to_speech(text, client=client):
    response = client.audio.speech.create(model="tts-1", voice="alloy", input=text)
    return response.content


def read_last_message(chat_display):
    last_message = chat_display[-1][1]

    audio = text_to_speech(last_message)
    return audio