|
import os |
|
import streamlit as st |
|
import numpy as np |
|
from tensorflow.keras.models import load_model |
|
from tensorflow.keras.preprocessing.sequence import pad_sequences |
|
from tensorflow.keras.preprocessing.text import one_hot |
|
import pickle |
|
import emoji |
|
|
|
|
|
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' |
|
|
|
|
|
st.title('Unveiling Sentiment: A Deep Dive into Sentiment Analysis π¨') |
|
|
|
|
|
def predict_sentiment(custom_data): |
|
try: |
|
|
|
model_path = 'sentiment_analysis_model.h5' |
|
if not os.path.exists(model_path): |
|
st.error(f"Model file not found: {model_path}") |
|
return None |
|
model = load_model(model_path) |
|
|
|
|
|
one_hot_info_path = 'one_hot_info_1.pkl' |
|
if not os.path.exists(one_hot_info_path): |
|
st.error(f"One-hot info file not found: {one_hot_info_path}") |
|
return None |
|
with open(one_hot_info_path, 'rb') as handle: |
|
one_hot_info = pickle.load(handle) |
|
|
|
vocab_size = one_hot_info['vocab_size'] |
|
max_len = one_hot_info['max_len'] |
|
|
|
|
|
labels_with_emojis = { |
|
'Positive': 'π', |
|
'Neutral': 'π', |
|
'Negative': 'π' |
|
} |
|
|
|
|
|
one_hot_texts = [one_hot(text, vocab_size) for text in custom_data] |
|
|
|
|
|
padded_texts = pad_sequences(one_hot_texts, padding='pre', maxlen=max_len) |
|
|
|
|
|
predictions = model.predict(np.array(padded_texts)) |
|
|
|
|
|
predicted_sentiments = [] |
|
for prediction in predictions: |
|
sentiment = np.argmax(prediction) |
|
sentiment_label = list(labels_with_emojis.keys())[sentiment] |
|
sentiment_emoji = labels_with_emojis[sentiment_label] |
|
sentiment_probabilities = {label: round(prob, 4) for label, prob in zip(labels_with_emojis.keys(), prediction)} |
|
predicted_sentiments.append((sentiment_label, sentiment_emoji, sentiment_probabilities)) |
|
|
|
return predicted_sentiments |
|
|
|
except Exception as e: |
|
st.error(f"Error during prediction: {e}") |
|
return None |
|
|
|
|
|
user_input = st.text_area("Please enter the tweet you'd like analyzed π") |
|
|
|
if st.button('Analyze'): |
|
if user_input.strip(): |
|
|
|
user_input = emoji.demojize(user_input) |
|
|
|
|
|
tweets = user_input.split('\n') |
|
|
|
|
|
predicted_sentiments = predict_sentiment(tweets) |
|
|
|
if predicted_sentiments is not None: |
|
|
|
st.write("## Predicted Sentiments:") |
|
for i, (sentiment_label, sentiment_emoji, sentiment_probabilities) in enumerate(predicted_sentiments): |
|
st.write(f"Tweet {i+1}: {sentiment_label} {sentiment_emoji}") |
|
st.write("Probabilities:") |
|
for label, prob in sentiment_probabilities.items(): |
|
st.write(f"{label}: {prob:.4f}") |
|
else: |
|
st.write("Please enter tweet(s) to analyze.") |
|
|
|
|
|
st.snow() |
|
st.balloons() |
|
|