Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 5,774 Bytes
df25732 87ae702 1b279d5 a10bc68 c57cd9a 87ae702 1b279d5 c57cd9a 1b279d5 c57cd9a df25732 891f3b9 1b279d5 df25732 a10bc68 1b279d5 df25732 a10bc68 1b279d5 c57cd9a a10bc68 f6e34f2 891f3b9 a10bc68 c57cd9a df25732 1b279d5 a10bc68 df25732 87ae702 1b279d5 87ae702 1b279d5 87ae702 df25732 1b279d5 a10bc68 1b279d5 c57cd9a 1b279d5 a10bc68 f6e34f2 c57cd9a 1b279d5 5f54ec7 1b279d5 c8e8be4 87ae702 1b279d5 f913a0b 87ae702 1b279d5 87ae702 1b279d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import gradio as gr
from llm import end_interview, get_problem, read_last_message, send_request, transcribe_audio
from options import fixed_messages, models, topics_list
default_audio_params = {
"label": "Record answer",
"sources": ["microphone"],
"type": "numpy",
"waveform_options": {"show_controls": False},
"editable": False,
"container": False,
"show_share_button": False,
}
def hide_settings():
init_acc = gr.Accordion("Settings", open=False)
start_btn = gr.Button("Generate a problem", interactive=False)
solution_acc = gr.Accordion("Solution", open=True)
end_btn = gr.Button("Finish the interview", interactive=True)
audio_input = gr.Audio(interactive=True, **default_audio_params)
return init_acc, start_btn, solution_acc, end_btn, audio_input
def add_interviewer_message(message):
def f(chat):
chat.append((None, message))
return chat
return f
def hide_solution():
solution_acc = gr.Accordion("Solution", open=False)
end_btn = gr.Button("Finish the interview", interactive=False)
problem_acc = gr.Accordion("Problem statement", open=False)
audio_input = gr.Audio(interactive=False, **default_audio_params)
return solution_acc, end_btn, problem_acc, audio_input
with gr.Blocks() as demo:
gr.Markdown("Your coding interview practice AI assistant!")
# TODO: add other types of interviews (e.g. system design, ML design, behavioral, etc.)
with gr.Tab("Coding") as coding_tab:
chat_history = gr.State([])
previous_code = gr.State("")
client = gr.State(None)
client_started = gr.State(False)
with gr.Accordion("Settings") as init_acc:
with gr.Row():
with gr.Column():
gr.Markdown("##### Problem settings")
with gr.Row():
gr.Markdown("Difficulty")
difficulty_select = gr.Dropdown(
label="Select difficulty",
choices=["Easy", "Medium", "Hard"],
value="Medium",
container=False,
allow_custom_value=True,
)
with gr.Row():
gr.Markdown("Topic (can type custom value)")
topic_select = gr.Dropdown(
label="Select topic", choices=topics_list, value="Arrays", container=False, allow_custom_value=True
)
gr.Markdown("##### Assistant settings")
with gr.Row():
gr.Markdown("Select LLM model to use")
model_select = gr.Dropdown(label="Select model", choices=models, value="gpt-3.5-turbo", container=False)
with gr.Column(scale=2):
requirements = gr.Textbox(label="Requirements", placeholder="Specify additional requirements", lines=5)
start_btn = gr.Button("Generate a problem")
# TODO: select LLM model
with gr.Accordion("Problem statement", open=True) as problem_acc:
description = gr.Markdown()
with gr.Accordion("Solution", open=False) as solution_acc:
with gr.Row() as content:
with gr.Column(scale=2):
code = gr.Code(
label="Please write your code here. Only Python linting is available for now.", language="python", lines=35
)
with gr.Column(scale=1):
end_btn = gr.Button("Finish the interview", interactive=False)
chat = gr.Chatbot(label="Chat", show_label=False, show_share_button=False)
audio_input = gr.Audio(interactive=False, **default_audio_params)
audio_output = gr.Audio(label="Play audio", autoplay=True, visible=False)
message = gr.Textbox(label="Message", lines=3, visible=False)
with gr.Accordion("Feedback", open=True) as feedback_acc:
feedback = gr.Markdown()
with gr.Tab("Instruction") as instruction_tab:
pass
coding_tab.select(fn=add_interviewer_message(fixed_messages["intro"]), inputs=[chat], outputs=[chat])
start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).then(
fn=get_problem,
inputs=[requirements, difficulty_select, topic_select, model_select],
outputs=[description, chat_history],
scroll_to_output=True,
).then(fn=hide_settings, inputs=None, outputs=[init_acc, start_btn, solution_acc, end_btn, audio_input])
message.submit(
fn=send_request,
inputs=[code, previous_code, message, chat_history, chat, model_select],
outputs=[chat_history, chat, message, previous_code],
)
end_btn.click(
fn=add_interviewer_message(fixed_messages["end"]),
inputs=[chat],
outputs=[chat],
).then(
fn=end_interview, inputs=[description, chat_history, model_select], outputs=feedback
).then(fn=hide_solution, inputs=None, outputs=[solution_acc, end_btn, problem_acc, audio_input])
audio_input.stop_recording(fn=transcribe_audio, inputs=[audio_input], outputs=[message]).then(
fn=lambda: None, inputs=None, outputs=[audio_input]
).then(
fn=send_request,
inputs=[code, previous_code, message, chat_history, chat, model_select],
outputs=[chat_history, chat, message, previous_code],
)
chat.change(fn=read_last_message, inputs=[chat], outputs=[audio_output])
audio_output.stop(fn=lambda: None, inputs=None, outputs=[audio_output])
demo.launch(show_api=False)
|