tahirsher's picture
Update app.py
0f85f14 verified
import streamlit as st
import gradio as gr
import requests
import openai
from gtts import gTTS
import soundfile as sf
from transformers import pipeline
from transformers import WhisperForConditionalGeneration, WhisperProcessor
import io
# Setting up API keys
openai.api_key = "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"
groq_api_key = "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"
# Load Whisper model for voice-to-text
whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-large")
# Load LLaMA model for language processing
llama_model = pipeline("text-generation", model="sujal011/llama3.2-3b-disease-symptoms", device=0)
# Function to convert voice input to text
def audio_to_text(audio_file):
audio_input, _ = sf.read(audio_file)
input_features = whisper_processor(audio_input, return_tensors="pt").input_features
generated_ids = whisper_model.generate(input_features)
transcription = whisper_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
return transcription
# Function to handle user input
def handle_user_input(user_text):
# Call LLaMA model to process input and generate response
response = llama_model(user_text, max_length=150)
return response[0]['generated_text']
# Function to generate audio from text
def text_to_audio(text):
tts = gTTS(text)
audio_bytes = io.BytesIO()
tts.save(audio_bytes)
audio_bytes.seek(0)
return audio_bytes
# Streamlit UI setup
st.title("Career Counseling Chatbot")
st.write("Answer questions to receive personalized career advice.")
# Input method selection
input_method = st.radio("Choose your input method:", ("Text", "Voice"))
# Collect educational background, skills, and aspirations
st.subheader("Tell us about your educational background and skills.")
education_level = st.selectbox("Education Level", ["Intermediate", "Undergraduate", "Graduate"])
technical_skills = st.text_area("Technical Skills", "E.g., Python, Machine Learning, etc.")
soft_skills = st.text_area("Soft Skills", "E.g., Communication, Leadership, etc.")
future_aspirations = st.text_area("Future Career Interests", "E.g., Data Scientist, Software Developer, etc.")
# Conduct quiz
st.subheader("Career Aptitude Quiz")
quiz_questions = [
"Question 1: Your interest in scientific research?",
"Question 2: Your comfort with technology?",
# Add more questions as needed
]
quiz_responses = []
for question in quiz_questions:
response = st.radio(question, ["High", "Moderate", "Low"])
quiz_responses.append(response)
# Process input and provide career suggestion
if input_method == "Text":
user_input = st.text_input("Enter your query:")
if user_input:
response = handle_user_input(user_input)
st.write("Chatbot:", response)
st.audio(text_to_audio(response), format="audio/mp3")
elif input_method == "Voice":
user_audio = st.file_uploader("Upload your voice query:", type=["wav", "mp3"])
if user_audio:
transcription = audio_to_text(user_audio)
response = handle_user_input(transcription)
st.write("You said:", transcription)
st.write("Chatbot:", response)
st.audio(text_to_audio(response), format="audio/mp3")