Spaces:
Runtime error
Runtime error
import streamlit as st | |
from tensorflow.keras.models import load_model | |
from tensorflow.keras.preprocessing.sequence import pad_sequences | |
import joblib | |
import pandas as pd | |
import numpy as np | |
from sklearn.preprocessing import StandardScaler | |
# Load the emotion prediction model | |
emotion_model = load_model('lstm_model.h5') | |
# Load the tokenizer (ensure it's the one used during training) | |
tokenizer = joblib.load('tokenizer.pkl') | |
# Load the dataset | |
df = pd.read_csv('df1.csv') | |
df = df.drop(['Unnamed: 0', 'lyrics_filename', 'analysis_url', 'track_href', "type", "id", "uri"], axis=1) | |
# Load the content-based recommendation module | |
recommend_cont_module = joblib.load('recommendation_cont_function.joblib') | |
# Call the function from the module | |
hybrid_recs = recommend_cont_module.recommend_cont(song_index=0) | |
# Load the hybrid recommendation function | |
hybrid_recommendation = joblib.load('hybrid_recommendation_function.joblib') | |
# Preprocess for content-based | |
audio_features = df[['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', | |
'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', | |
'duration_ms', 'time_signature']] | |
mood_cats = df[['mood_cats']] | |
scaler = StandardScaler() | |
audio_features_scaled = scaler.fit_transform(audio_features) | |
audio_features_df = pd.DataFrame(audio_features_scaled, columns=audio_features.columns) | |
mood_cats_df = pd.DataFrame(mood_cats) | |
combined_features_content = pd.concat([mood_cats_df, audio_features_df], axis=1) | |
# Set up the title of the app | |
st.title('Emotion and Audio Feature-based Song Recommendation System') | |
# Get data from index 0 | |
query_data = df.iloc[0] | |
# Process the lyrics | |
sequence = tokenizer.texts_to_sequences([query_data['lyrics']]) | |
padded_sequence = pad_sequences(sequence, maxlen=50) | |
emotion = emotion_model.predict(padded_sequence).flatten() | |
# Combine emotion and audio features for recommendation | |
combined_features_hybrid = np.concatenate([emotion, query_data[audio_features.columns].values]) | |
# Generate recommendations using the hybrid model | |
hybrid_recs = hybrid_recommendation(song_index=0) | |
st.write("Emotion Detected:", emotion[0]) | |
st.header('Recommended Songs (Hybrid)') | |
st.write(hybrid_recs) | |