tahirsher commited on
Commit
0f85f14
·
verified ·
1 Parent(s): 4840534

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -76
app.py CHANGED
@@ -1,90 +1,85 @@
1
- # Install necessary libraries
2
-
3
- import os
4
- import whisper
5
- from gtts import gTTS
6
- import gradio as gr
7
  import streamlit as st
 
 
 
 
 
8
  from transformers import pipeline
9
- from groq import Groq
10
- from datasets import load_dataset
11
-
12
- # Load career guidance dataset from Hugging Face
13
- career_guidance_dataset = load_dataset("mb7419/career-guidance-reddit")
14
 
15
- # Initialize Whisper model
16
- whisper_model = whisper.load_model("large")
 
17
 
18
- # Initialize Groq client and LLaMA model
19
- GROQ_API_KEY = "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"
20
- client = Groq(api_key=GROQ_API_KEY)
21
- llama_pipeline = pipeline("text-generation", model="tgkamal/student_career_path-llama")
22
 
23
- # Function to convert text to speech
24
- def text_to_speech(text):
25
- tts = gTTS(text=text, lang='en')
26
- response_audio_path = "response.mp3"
27
- tts.save(response_audio_path)
28
- return response_audio_path
29
 
30
- # Function to transcribe audio to text using Whisper
31
- def transcribe_audio(audio_path):
32
- result = whisper_model.transcribe(audio_path)
33
- return result["text"]
 
 
 
34
 
35
- # Function to generate career counseling response using LLaMA model
36
- def get_counseling_response(user_input):
37
- messages = [{"role": "user", "content": user_input}]
38
- response = llama_pipeline(messages)
39
- return response[0]["generated_text"]
40
 
41
- # Function to ask quiz questions based on the user's background
42
- def generate_quiz(education_level, skills):
43
- # Sample questions based on educational background and skills
44
- questions = [
45
- f"What is your experience level in {skills[0]}?",
46
- f"How would you rate your proficiency in {skills[1]}?",
47
- "Which field do you prefer working in: technology, business, or academia?",
48
- "Do you enjoy research-oriented work or practical applications?",
49
- # Add more relevant questions here up to 20
50
- ]
51
- return questions
52
 
53
- # Main function to handle chatbot interaction
54
- def career_chatbot(audio_path):
55
- # Step 1: Transcribe audio to text
56
- user_input = transcribe_audio(audio_path)
57
- st.write(f"User said: {user_input}")
58
-
59
- # Step 2: Generate a career counseling response based on quiz results
60
- response_text = get_counseling_response(user_input)
61
- st.write(f"Career Counseling Bot says: {response_text}")
62
-
63
- # Step 3: Convert response text to audio
64
- response_audio_path = text_to_speech(response_text)
65
-
66
- # Return text and audio response paths
67
- return response_text, response_audio_path
68
-
69
- # Streamlit and Gradio Interface Integration
70
  st.title("Career Counseling Chatbot")
71
- st.write("Ask career-related questions and receive guidance based on your background.")
 
 
 
72
 
73
- # Audio input using Gradio in Streamlit
74
- with gr.Blocks() as gr_interface:
75
- gr.Audio(type="filepath", label="Speak to the Career Bot")
76
- gr.outputs=["text", gr.Audio(type="filepath")]
 
 
77
 
78
- # Set up Streamlit's interactive components for quiz
79
- education_level = st.selectbox("Select your education level:", ["Intermediate", "Undergraduate", "Graduate"])
80
- skills = st.multiselect("Select your technical skills:", ["Python", "Machine Learning", "Data Analysis", "Web Development"])
 
 
 
 
 
 
 
 
81
 
82
- if st.button("Start Quiz"):
83
- quiz_questions = generate_quiz(education_level, skills)
84
- user_responses = {}
85
- for question in quiz_questions:
86
- user_responses[question] = st.text_input(question)
87
- st.write("Thank you for answering the questions!")
 
88
 
89
- # Connect Streamlit and Gradio to launch the chatbot
90
- gr_interface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import gradio as gr
3
+ import requests
4
+ import openai
5
+ from gtts import gTTS
6
+ import soundfile as sf
7
  from transformers import pipeline
8
+ from transformers import WhisperForConditionalGeneration, WhisperProcessor
9
+ import io
 
 
 
10
 
11
+ # Setting up API keys
12
+ openai.api_key = "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"
13
+ groq_api_key = "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"
14
 
15
+ # Load Whisper model for voice-to-text
16
+ whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
17
+ whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-large")
 
18
 
19
+ # Load LLaMA model for language processing
20
+ llama_model = pipeline("text-generation", model="sujal011/llama3.2-3b-disease-symptoms", device=0)
 
 
 
 
21
 
22
+ # Function to convert voice input to text
23
+ def audio_to_text(audio_file):
24
+ audio_input, _ = sf.read(audio_file)
25
+ input_features = whisper_processor(audio_input, return_tensors="pt").input_features
26
+ generated_ids = whisper_model.generate(input_features)
27
+ transcription = whisper_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
28
+ return transcription
29
 
30
+ # Function to handle user input
31
+ def handle_user_input(user_text):
32
+ # Call LLaMA model to process input and generate response
33
+ response = llama_model(user_text, max_length=150)
34
+ return response[0]['generated_text']
35
 
36
+ # Function to generate audio from text
37
+ def text_to_audio(text):
38
+ tts = gTTS(text)
39
+ audio_bytes = io.BytesIO()
40
+ tts.save(audio_bytes)
41
+ audio_bytes.seek(0)
42
+ return audio_bytes
 
 
 
 
43
 
44
+ # Streamlit UI setup
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  st.title("Career Counseling Chatbot")
46
+ st.write("Answer questions to receive personalized career advice.")
47
+
48
+ # Input method selection
49
+ input_method = st.radio("Choose your input method:", ("Text", "Voice"))
50
 
51
+ # Collect educational background, skills, and aspirations
52
+ st.subheader("Tell us about your educational background and skills.")
53
+ education_level = st.selectbox("Education Level", ["Intermediate", "Undergraduate", "Graduate"])
54
+ technical_skills = st.text_area("Technical Skills", "E.g., Python, Machine Learning, etc.")
55
+ soft_skills = st.text_area("Soft Skills", "E.g., Communication, Leadership, etc.")
56
+ future_aspirations = st.text_area("Future Career Interests", "E.g., Data Scientist, Software Developer, etc.")
57
 
58
+ # Conduct quiz
59
+ st.subheader("Career Aptitude Quiz")
60
+ quiz_questions = [
61
+ "Question 1: Your interest in scientific research?",
62
+ "Question 2: Your comfort with technology?",
63
+ # Add more questions as needed
64
+ ]
65
+ quiz_responses = []
66
+ for question in quiz_questions:
67
+ response = st.radio(question, ["High", "Moderate", "Low"])
68
+ quiz_responses.append(response)
69
 
70
+ # Process input and provide career suggestion
71
+ if input_method == "Text":
72
+ user_input = st.text_input("Enter your query:")
73
+ if user_input:
74
+ response = handle_user_input(user_input)
75
+ st.write("Chatbot:", response)
76
+ st.audio(text_to_audio(response), format="audio/mp3")
77
 
78
+ elif input_method == "Voice":
79
+ user_audio = st.file_uploader("Upload your voice query:", type=["wav", "mp3"])
80
+ if user_audio:
81
+ transcription = audio_to_text(user_audio)
82
+ response = handle_user_input(transcription)
83
+ st.write("You said:", transcription)
84
+ st.write("Chatbot:", response)
85
+ st.audio(text_to_audio(response), format="audio/mp3")