File size: 2,657 Bytes
edc4276
01e4bba
 
c4d5407
 
 
1f9b2ed
401d1a7
021fc49
62a9143
 
 
 
 
 
d69f584
01e4bba
1b42e45
6955253
 
 
 
1f9b2ed
edc4276
d69f584
021fc49
c4d5407
021fc49
58ad677
55dd68f
c4d5407
577a126
 
edc4276
577a126
 
 
edc4276
577a126
 
 
1f9b2ed
 
 
 
 
 
577a126
 
 
 
 
 
1f9b2ed
01e4bba
65bb153
d69f584
577a126
1f9b2ed
021fc49
577a126
1f9b2ed
 
 
577a126
021fc49
1f9b2ed
 
021fc49
1f9b2ed
577a126
d69f584
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import streamlit as st
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
import joblib
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity

# Check if scikit-learn is installed
try:
    import sklearn
    st.write("scikit-learn is installed.")
except ImportError:
    st.error("scikit-learn is not installed.")

# Load your emotion prediction model
emotion_model = load_model('lstm_model.h5')

# Load the KNN recommender model
try:
    recommender_model = joblib.load('knn_model.pkl')
except Exception as e:
    st.error(f"Error loading KNN model: {e}")

# Load the tokenizer (ensure it's the one used during training)
tokenizer = joblib.load('tokenizer.pkl')  # Correct the path

# Load the dataset and preprocess
df = pd.read_csv('df1.csv')  
df = df.drop(['Unnamed: 0', 'lyrics_filename', 'analysis_url', 'track_href', "type", "id", "uri", 'mood'], axis=1)

# Set up the title of the app
st.title('Emotion and Audio Feature-based Song Recommendation System')

# Input field for lyrics
st.header('Enter Song Lyrics')
lyrics = st.text_area("Input the lyrics of the song here:")

# Input fields for audio features
st.header('Enter Audio Features')
audio_features = []

# Display only relevant columns for audio features
audio_feature_columns = ['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness',
                         'instrumentalness', 'liveness', 'valence', 'tempo']

for feature_name in audio_feature_columns:
    feature = st.number_input(f"Enter value for {feature_name}:", step=0.01)
    audio_features.append(feature)

# Predict and Recommend button
if st.button('Predict Emotion and Recommend Songs'):
    if lyrics and all(audio_features):
        # Process the lyrics
        sequence = tokenizer.texts_to_sequences([lyrics])
        padded_sequence = pad_sequences(sequence, maxlen=50)  # Adjust the maxlen to match the expected input size
        emotion = emotion_model.predict(padded_sequence).flatten()

        # Combine emotion and audio features for recommendation
        combined_features = np.concatenate([emotion, audio_features])

        # Generate recommendations using the KNN model
        knn_distances, knn_indices = recommender_model.kneighbors([combined_features], n_neighbors=5)
        knn_recommended_songs = df.iloc[knn_indices.flatten()]

        st.write("Emotion Detected:", emotion[0])
        st.header('Recommended Songs (KNN)')
        for _, song in knn_recommended_songs.iterrows():
            st.write(song)

    else:
        st.error("Please fill in all the fields.")