SpotifyProject / app.py
brendabor's picture
Update app.py
55dd68f
raw
history blame
2.66 kB
import streamlit as st
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
import joblib
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
# Check if scikit-learn is installed
try:
import sklearn
st.write("scikit-learn is installed.")
except ImportError:
st.error("scikit-learn is not installed.")
# Load your emotion prediction model
emotion_model = load_model('lstm_model.h5')
# Load the KNN recommender model
try:
recommender_model = joblib.load('knn_model.pkl')
except Exception as e:
st.error(f"Error loading KNN model: {e}")
# Load the tokenizer (ensure it's the one used during training)
tokenizer = joblib.load('tokenizer.pkl') # Correct the path
# Load the dataset and preprocess
df = pd.read_csv('df1.csv')
df = df.drop(['Unnamed: 0', 'lyrics_filename', 'analysis_url', 'track_href', "type", "id", "uri", 'mood'], axis=1)
# Set up the title of the app
st.title('Emotion and Audio Feature-based Song Recommendation System')
# Input field for lyrics
st.header('Enter Song Lyrics')
lyrics = st.text_area("Input the lyrics of the song here:")
# Input fields for audio features
st.header('Enter Audio Features')
audio_features = []
# Display only relevant columns for audio features
audio_feature_columns = ['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence', 'tempo']
for feature_name in audio_feature_columns:
feature = st.number_input(f"Enter value for {feature_name}:", step=0.01)
audio_features.append(feature)
# Predict and Recommend button
if st.button('Predict Emotion and Recommend Songs'):
if lyrics and all(audio_features):
# Process the lyrics
sequence = tokenizer.texts_to_sequences([lyrics])
padded_sequence = pad_sequences(sequence, maxlen=50) # Adjust the maxlen to match the expected input size
emotion = emotion_model.predict(padded_sequence).flatten()
# Combine emotion and audio features for recommendation
combined_features = np.concatenate([emotion, audio_features])
# Generate recommendations using the KNN model
knn_distances, knn_indices = recommender_model.kneighbors([combined_features], n_neighbors=5)
knn_recommended_songs = df.iloc[knn_indices.flatten()]
st.write("Emotion Detected:", emotion[0])
st.header('Recommended Songs (KNN)')
for _, song in knn_recommended_songs.iterrows():
st.write(song)
else:
st.error("Please fill in all the fields.")