|
import streamlit as st |
|
from groq import Groq |
|
from langchain.prompts import PromptTemplate |
|
from langchain.chains import LLMChain |
|
import os |
|
from langchain_groq import ChatGroq |
|
|
|
import pandas as pd |
|
from langchain.schema import (AIMessage, HumanMessage, SystemMessage) |
|
from langchain.prompts.chat import ( |
|
ChatPromptTemplate, |
|
SystemMessagePromptTemplate, |
|
HumanMessagePromptTemplate |
|
) |
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.memory import ConversationBufferWindowMemory |
|
import json,time,random |
|
from templates import choose_template, extract_template, warmup_feedback_template, warm_up_question_template |
|
from utils import select_questions |
|
|
|
st.set_page_config(page_icon='rex.png', layout='wide') |
|
|
|
st.title("Warm Up Round : Getting Comfortable with the Interview") |
|
|
|
category = st.selectbox("Which type of questions do you want to practice?", |
|
['Technical', 'Behavioural', 'Culture Fit','STAR'],index=None) |
|
|
|
while category is None: |
|
st.info('Please select question category') |
|
st.stop() |
|
|
|
if category: |
|
data = select_questions(category=category) |
|
|
|
|
|
if not st.session_state.groq_key: |
|
st.info("Please add your API key to continue") |
|
st.stop() |
|
|
|
if not st.session_state["Resume Info"]: |
|
st.info("Please upload your Resume") |
|
st.stop() |
|
|
|
if not st.session_state["Job Description"]: |
|
st.info("Please add your job description") |
|
st.stop() |
|
|
|
|
|
|
|
client = ChatGroq( |
|
groq_api_key=os.getenv('GROQ_API_KEY'), |
|
model_name="mixtral-8x7b-32768" |
|
) |
|
|
|
memory = ConversationBufferMemory( |
|
memory_key="history", |
|
return_messages=True |
|
) |
|
|
|
|
|
system_template_e = extract_template |
|
|
|
system_message_prompt_e = SystemMessagePromptTemplate.from_template(system_template_e) |
|
|
|
human_template_e = "{text}" |
|
human_message_prompt_e = HumanMessagePromptTemplate.from_template(human_template_e) |
|
|
|
chat_prompt_e = ChatPromptTemplate.from_messages([system_message_prompt_e, human_message_prompt_e]) |
|
|
|
extract_chain = LLMChain(llm=client, prompt=chat_prompt_e) |
|
|
|
|
|
|
|
system_template_c = choose_template |
|
|
|
system_message_prompt_c = SystemMessagePromptTemplate.from_template(system_template_c) |
|
|
|
human_template_c = "{text}" |
|
human_message_prompt_c = HumanMessagePromptTemplate.from_template(human_template_c) |
|
|
|
chat_prompt_c = ChatPromptTemplate.from_messages([system_message_prompt_c, human_message_prompt_c]) |
|
|
|
choose_chain = LLMChain(llm=client, prompt=chat_prompt_c) |
|
|
|
|
|
system_template_q = warm_up_question_template |
|
|
|
system_message_prompt_q = SystemMessagePromptTemplate.from_template(system_template_q) |
|
|
|
human_template_q = "{text}" |
|
human_message_prompt_q = HumanMessagePromptTemplate.from_template(human_template_q) |
|
|
|
chat_prompt_q = ChatPromptTemplate.from_messages([system_message_prompt_q, human_message_prompt_q]) |
|
|
|
question_chain = LLMChain(llm=client, prompt=chat_prompt_q) |
|
|
|
|
|
|
|
system_template_f = warmup_feedback_template |
|
|
|
|
|
system_message_prompt_f = SystemMessagePromptTemplate.from_template(system_template_f) |
|
|
|
human_template_f = "{text}" |
|
human_message_prompt_f = HumanMessagePromptTemplate.from_template(human_template_f) |
|
|
|
chat_prompt_f = ChatPromptTemplate.from_messages([system_message_prompt_f, human_message_prompt_f]) |
|
|
|
feedback_chain = LLMChain(llm=client, prompt=chat_prompt_f) |
|
|
|
if "warmup_message" not in st.session_state: |
|
st.session_state.warmup_message = [] |
|
|
|
if "action" not in st.session_state: |
|
st.session_state.action = "Next" |
|
|
|
|
|
if "history" not in st.session_state: |
|
st.session_state.history = [] |
|
|
|
if "questions" not in st.session_state: |
|
st.session_state.questions = [] |
|
|
|
for message in st.session_state.warmup_message: |
|
if message['role'] == "user": |
|
name = "user" |
|
avatar = "user.png" |
|
else: |
|
name = "assistant" |
|
avatar = "rex.png" |
|
|
|
with st.chat_message(name, avatar=avatar): |
|
st.markdown(f"{message['content']}") |
|
|
|
if inp := st.chat_input("Type here"): |
|
with st.chat_message("user",avatar='user.png'): |
|
st.markdown(inp) |
|
st.session_state['warmup_message'].append({'role': 'user', 'content': inp}) |
|
|
|
question = None |
|
|
|
if st.session_state.warmup_message != [] and st.session_state.warmup_message[-1]['role'] == "feedback": |
|
option = st.radio(label="Which question would you like to do?", options=["Next", "Repeat"], index=None) |
|
while option is None: |
|
pass |
|
st.session_state.action = option |
|
|
|
if st.session_state.action == "Next" or "Repeat" and ( |
|
st.session_state.warmup_message == [] or st.session_state.warmup_message[-1]['role'] == "feedback"): |
|
|
|
if st.session_state.questions != []: |
|
extracts = extract_chain.run(history=st.session_state.questions, text="") |
|
else: |
|
extracts = "No previous Questions" |
|
|
|
chosen_q = choose_chain.run(action=st.session_state.action, questions=extracts, data=data, text="",details=st.session_state["Resume Info"], description=st.session_state['Job Description']) |
|
response = question_chain.run(question=chosen_q, history=st.session_state.history[-2:], text=inp, details=st.session_state["Resume Info"]) |
|
|
|
with st.chat_message("assistant", avatar='rex.png'): |
|
|
|
message_placeholder = st.empty() |
|
full_response = "" |
|
for chunk in response.split(): |
|
full_response += chunk + " " |
|
time.sleep(0.05) |
|
|
|
message_placeholder.markdown(full_response + "β") |
|
message_placeholder.markdown(full_response) |
|
|
|
|
|
|
|
st.session_state.action = "Feedback" |
|
st.session_state['warmup_message'].append({'role': 'interviewer', 'content': response}) |
|
memory.save_context({"input": ""}, {"output": response}) |
|
st.session_state['history'].append(memory.buffer_as_messages[-2:]) |
|
st.session_state['questions'].append({'Question': response}) |
|
question = chosen_q |
|
st.stop() |
|
|
|
if st.session_state.warmup_message[-1]['role'] == "user" and st.session_state.action == "Feedback": |
|
feedback = feedback_chain.run(question=question, response=inp, history=st.session_state.history[-2:], text=inp,asked=st.session_state.warmup_message[-2]['content']) |
|
|
|
with st.chat_message("assistant", avatar='rex.png'): |
|
|
|
message_placeholder = st.empty() |
|
full_response = "" |
|
for chunk in feedback.split(): |
|
full_response += chunk + " " |
|
time.sleep(0.05) |
|
|
|
message_placeholder.markdown(full_response + "β") |
|
message_placeholder.markdown(full_response) |
|
|
|
|
|
st.session_state['warmup_message'].append({'role': 'feedback', 'content': feedback}) |
|
memory.save_context({"input": inp}, {"output": feedback}) |
|
st.session_state['history'].append(memory.buffer_as_messages[-2:]) |
|
|
|
st.button("Continue") |
|
|
|
|