Spaces:
Sleeping
Sleeping
File size: 3,587 Bytes
e102cba 248713a 8f33c3e c7bf147 e102cba 248713a 8f33c3e e102cba 248713a 8f33c3e e102cba 248713a 8f33c3e e102cba 8f33c3e e102cba c7bf147 8f33c3e 734f006 e102cba c7bf147 734f006 c7bf147 bcd4212 c7bf147 bcd4212 c7bf147 bcd4212 c7bf147 bcd4212 c7bf147 e102cba 248713a c7bf147 8f33c3e c7bf147 8f33c3e e102cba 248713a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import gradio as gr
from transformers import pipeline
from speechbrain.pretrained import Tacotron2, HIFIGAN, EncoderDecoderASR
import matplotlib.pyplot as plt
import pandas as pd
import random
# Initialize psychometric model
psych_model_name = "KevSun/Personality_LM"
psych_model = pipeline("text-classification", model=psych_model_name)
# Initialize ASR and TTS models
asr_model = EncoderDecoderASR.from_hparams(source="speechbrain/asr-crdnn-rnnlm-librispeech", savedir="tmp_asr")
tts_model = Tacotron2.from_hparams(source="speechbrain/tts-tacotron2-ljspeech", savedir="tmp_tts")
voc_model = HIFIGAN.from_hparams(source="speechbrain/tts-hifigan-ljspeech", savedir="tmp_voc")
# Function to analyze text responses
def analyze_text_responses(responses):
analysis = [psych_model(response)[0] for response in responses]
traits = {response["label"]: response["score"] for response in analysis}
return traits
# Function to handle TTS
def generate_audio_question(question):
mel_output, alignment, _ = tts_model.encode_text(question)
waveforms = voc_model.decode_batch(mel_output)
return waveforms[0].numpy()
# Function to process audio response
def process_audio_response(audio):
# Check if the audio input is None
if audio is None:
return "No audio provided"
# Process the audio if it's a valid input
try:
text_response = asr_model.transcribe_file(audio)
return text_response
except Exception as e:
return f"Error processing audio: {str(e)}"
# Function to generate dynamic questions based on answers
def generate_dynamic_question(previous_answer):
# Example of simple follow-up questions based on the answer
if "teamwork" in previous_answer.lower():
return "Can you share a specific instance where you worked in a team?"
elif "challenge" in previous_answer.lower():
return "How did you overcome that challenge? What steps did you take?"
elif "stress" in previous_answer.lower():
return "How do you manage stress during high-pressure situations?"
else:
# Default follow-up question
return "Can you tell me more about that?"
# Gradio UI function to handle dynamic conversation
def chat_interface(candidate_name, *responses):
conversation_history = []
# Iterate through responses to generate follow-up questions
for i, response in enumerate(responses):
conversation_history.append(f"Q{i+1}: {response}")
# Generate dynamic question based on the previous response
dynamic_question = generate_dynamic_question(response)
conversation_history.append(f"Follow-up Question: {dynamic_question}")
# Process text responses
text_df, text_plot = text_part(candidate_name, responses)
# Process audio responses
audio_df, audio_plot = audio_part(candidate_name, responses)
# Return conversation history and analysis
return "\n".join(conversation_history), text_df, text_plot, audio_df, audio_plot
# Create text inputs and audio inputs
text_inputs = [gr.Textbox(label=f"Response to Q{i+1}:") for i in range(5)] # Assuming we have up to 5 text responses
audio_inputs = [gr.Audio(label=f"Response to Audio Q{i+1}:") for i in range(2)] # Assuming we have up to 2 audio responses
interface = gr.Interface(
fn=chat_interface,
inputs=[gr.Textbox(label="Candidate Name")] + text_inputs + audio_inputs,
outputs=["text", "dataframe", "plot", "dataframe", "plot"],
title="Dynamic Psychometric Analysis Chatbot"
)
# Launch the interface
interface.launch()
|