Spaces:
Sleeping
Sleeping
IliaLarchenko
commited on
Commit
•
1a47458
1
Parent(s):
f6e34f2
Added feedback agent
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
from llm import get_problem, send_request
|
4 |
|
5 |
with gr.Blocks() as demo:
|
6 |
gr.Markdown("Your coding interview practice AI assistant!")
|
@@ -39,6 +39,6 @@ with gr.Blocks() as demo:
|
|
39 |
answer_btn.click(
|
40 |
fn=send_request, inputs=[code, previous_code, message, chat_history, chat], outputs=[chat_history, chat, message, previous_code]
|
41 |
)
|
42 |
-
|
43 |
|
44 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
from llm import end_interview, get_problem, send_request
|
4 |
|
5 |
with gr.Blocks() as demo:
|
6 |
gr.Markdown("Your coding interview practice AI assistant!")
|
|
|
39 |
answer_btn.click(
|
40 |
fn=send_request, inputs=[code, previous_code, message, chat_history, chat], outputs=[chat_history, chat, message, previous_code]
|
41 |
)
|
42 |
+
end_btn.click(fn=end_interview, inputs=chat_history, outputs=feedback)
|
43 |
|
44 |
demo.launch()
|
llm.py
CHANGED
@@ -83,3 +83,40 @@ def send_request(code, previous_code, message, chat_history, chat_display, clien
|
|
83 |
chat_display.append([message, str(reply)])
|
84 |
|
85 |
return chat_history, chat_display, "", code
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
chat_display.append([message, str(reply)])
|
84 |
|
85 |
return chat_history, chat_display, "", code
|
86 |
+
|
87 |
+
|
88 |
+
def end_interview(chat_history, client=client):
|
89 |
+
prompt_system = (
|
90 |
+
"You are ChatGPT acting as a grader of the coding round interviewer for a big-tech company. "
|
91 |
+
"Below you will see the transcript of interview with and candidate."
|
92 |
+
"Candidate will send you his current code with every message, you can ignore it if it didn't change. "
|
93 |
+
"Provide very detailed feedback using all the notes and full interview transcript. "
|
94 |
+
"Take into account all issues and mistakes made during the interview. "
|
95 |
+
"Provide as many details as possible including: overall feedback, all mistakes, improvement opportunities, "
|
96 |
+
"communication issues, missed edge cases, and any other valuable feedback. "
|
97 |
+
"Use examples and code snippets when necessary. "
|
98 |
+
"If the candidate didn't provide a solution or it was not optimal provide the correct most optimal one. "
|
99 |
+
"Return the results in nicely formatted markdown."
|
100 |
+
)
|
101 |
+
|
102 |
+
transcript = []
|
103 |
+
for message in chat_history[1:]:
|
104 |
+
if message["role"] == "assistant":
|
105 |
+
transcript.append(f"Interviewer: {message['content']}")
|
106 |
+
elif message["role"] == "user":
|
107 |
+
transcript.append(f"Candidate: {message['content']}")
|
108 |
+
else:
|
109 |
+
transcript.append(f"{message['role']}: {message['content']}")
|
110 |
+
|
111 |
+
response = client.chat.completions.create(
|
112 |
+
model="gpt-3.5-turbo",
|
113 |
+
messages=[
|
114 |
+
{"role": "system", "content": prompt_system},
|
115 |
+
{"role": "user", "content": "Interview transcript:" + "\n\n".join(transcript)},
|
116 |
+
{"role": "user", "content": "Grade the interview based on the transcript provided and give a feedback."},
|
117 |
+
],
|
118 |
+
)
|
119 |
+
|
120 |
+
feedback = response.choices[0].message.content.strip()
|
121 |
+
|
122 |
+
return feedback
|