Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,90 +1,85 @@
|
|
1 |
-
# Install necessary libraries
|
2 |
-
|
3 |
-
import os
|
4 |
-
import whisper
|
5 |
-
from gtts import gTTS
|
6 |
-
import gradio as gr
|
7 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
8 |
from transformers import pipeline
|
9 |
-
from
|
10 |
-
|
11 |
-
|
12 |
-
# Load career guidance dataset from Hugging Face
|
13 |
-
career_guidance_dataset = load_dataset("mb7419/career-guidance-reddit")
|
14 |
|
15 |
-
#
|
16 |
-
|
|
|
17 |
|
18 |
-
#
|
19 |
-
|
20 |
-
|
21 |
-
llama_pipeline = pipeline("text-generation", model="tgkamal/student_career_path-llama")
|
22 |
|
23 |
-
#
|
24 |
-
|
25 |
-
tts = gTTS(text=text, lang='en')
|
26 |
-
response_audio_path = "response.mp3"
|
27 |
-
tts.save(response_audio_path)
|
28 |
-
return response_audio_path
|
29 |
|
30 |
-
# Function to
|
31 |
-
def
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
34 |
|
35 |
-
# Function to
|
36 |
-
def
|
37 |
-
|
38 |
-
response =
|
39 |
-
return response[0][
|
40 |
|
41 |
-
# Function to
|
42 |
-
def
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
"Do you enjoy research-oriented work or practical applications?",
|
49 |
-
# Add more relevant questions here up to 20
|
50 |
-
]
|
51 |
-
return questions
|
52 |
|
53 |
-
#
|
54 |
-
def career_chatbot(audio_path):
|
55 |
-
# Step 1: Transcribe audio to text
|
56 |
-
user_input = transcribe_audio(audio_path)
|
57 |
-
st.write(f"User said: {user_input}")
|
58 |
-
|
59 |
-
# Step 2: Generate a career counseling response based on quiz results
|
60 |
-
response_text = get_counseling_response(user_input)
|
61 |
-
st.write(f"Career Counseling Bot says: {response_text}")
|
62 |
-
|
63 |
-
# Step 3: Convert response text to audio
|
64 |
-
response_audio_path = text_to_speech(response_text)
|
65 |
-
|
66 |
-
# Return text and audio response paths
|
67 |
-
return response_text, response_audio_path
|
68 |
-
|
69 |
-
# Streamlit and Gradio Interface Integration
|
70 |
st.title("Career Counseling Chatbot")
|
71 |
-
st.write("
|
|
|
|
|
|
|
72 |
|
73 |
-
#
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
77 |
|
78 |
-
#
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
|
|
88 |
|
89 |
-
|
90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
import gradio as gr
|
3 |
+
import requests
|
4 |
+
import openai
|
5 |
+
from gtts import gTTS
|
6 |
+
import soundfile as sf
|
7 |
from transformers import pipeline
|
8 |
+
from transformers import WhisperForConditionalGeneration, WhisperProcessor
|
9 |
+
import io
|
|
|
|
|
|
|
10 |
|
11 |
+
# Setting up API keys
|
12 |
+
openai.api_key = "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"
|
13 |
+
groq_api_key = "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"
|
14 |
|
15 |
+
# Load Whisper model for voice-to-text
|
16 |
+
whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
|
17 |
+
whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-large")
|
|
|
18 |
|
19 |
+
# Load LLaMA model for language processing
|
20 |
+
llama_model = pipeline("text-generation", model="sujal011/llama3.2-3b-disease-symptoms", device=0)
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
# Function to convert voice input to text
|
23 |
+
def audio_to_text(audio_file):
|
24 |
+
audio_input, _ = sf.read(audio_file)
|
25 |
+
input_features = whisper_processor(audio_input, return_tensors="pt").input_features
|
26 |
+
generated_ids = whisper_model.generate(input_features)
|
27 |
+
transcription = whisper_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
28 |
+
return transcription
|
29 |
|
30 |
+
# Function to handle user input
|
31 |
+
def handle_user_input(user_text):
|
32 |
+
# Call LLaMA model to process input and generate response
|
33 |
+
response = llama_model(user_text, max_length=150)
|
34 |
+
return response[0]['generated_text']
|
35 |
|
36 |
+
# Function to generate audio from text
|
37 |
+
def text_to_audio(text):
|
38 |
+
tts = gTTS(text)
|
39 |
+
audio_bytes = io.BytesIO()
|
40 |
+
tts.save(audio_bytes)
|
41 |
+
audio_bytes.seek(0)
|
42 |
+
return audio_bytes
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
# Streamlit UI setup
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
st.title("Career Counseling Chatbot")
|
46 |
+
st.write("Answer questions to receive personalized career advice.")
|
47 |
+
|
48 |
+
# Input method selection
|
49 |
+
input_method = st.radio("Choose your input method:", ("Text", "Voice"))
|
50 |
|
51 |
+
# Collect educational background, skills, and aspirations
|
52 |
+
st.subheader("Tell us about your educational background and skills.")
|
53 |
+
education_level = st.selectbox("Education Level", ["Intermediate", "Undergraduate", "Graduate"])
|
54 |
+
technical_skills = st.text_area("Technical Skills", "E.g., Python, Machine Learning, etc.")
|
55 |
+
soft_skills = st.text_area("Soft Skills", "E.g., Communication, Leadership, etc.")
|
56 |
+
future_aspirations = st.text_area("Future Career Interests", "E.g., Data Scientist, Software Developer, etc.")
|
57 |
|
58 |
+
# Conduct quiz
|
59 |
+
st.subheader("Career Aptitude Quiz")
|
60 |
+
quiz_questions = [
|
61 |
+
"Question 1: Your interest in scientific research?",
|
62 |
+
"Question 2: Your comfort with technology?",
|
63 |
+
# Add more questions as needed
|
64 |
+
]
|
65 |
+
quiz_responses = []
|
66 |
+
for question in quiz_questions:
|
67 |
+
response = st.radio(question, ["High", "Moderate", "Low"])
|
68 |
+
quiz_responses.append(response)
|
69 |
|
70 |
+
# Process input and provide career suggestion
|
71 |
+
if input_method == "Text":
|
72 |
+
user_input = st.text_input("Enter your query:")
|
73 |
+
if user_input:
|
74 |
+
response = handle_user_input(user_input)
|
75 |
+
st.write("Chatbot:", response)
|
76 |
+
st.audio(text_to_audio(response), format="audio/mp3")
|
77 |
|
78 |
+
elif input_method == "Voice":
|
79 |
+
user_audio = st.file_uploader("Upload your voice query:", type=["wav", "mp3"])
|
80 |
+
if user_audio:
|
81 |
+
transcription = audio_to_text(user_audio)
|
82 |
+
response = handle_user_input(transcription)
|
83 |
+
st.write("You said:", transcription)
|
84 |
+
st.write("Chatbot:", response)
|
85 |
+
st.audio(text_to_audio(response), format="audio/mp3")
|