devaprobs commited on
Commit
843eeea
β€’
1 Parent(s): ed287a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -26
app.py CHANGED
@@ -8,21 +8,9 @@ classifier = pipeline("text-classification", model=model_name)
8
  # Configure the Streamlit page
9
  st.set_page_config(page_title="Amharic Hate Speech Detector", page_icon="πŸ•΅οΈβ€β™‚οΈ", layout="centered")
10
 
11
- # Add CSS to manage the background color change
12
- def set_background(color):
13
- st.markdown(
14
- f"""
15
- <style>
16
- body {{
17
- background-color: {color};
18
- }}
19
- </style>
20
- """,
21
- unsafe_allow_html=True
22
- )
23
-
24
  # Set default background color
25
  default_bg_color = "#f0f2f6"
 
26
 
27
  # Add a stylish header with a logo
28
  st.markdown(
@@ -50,25 +38,26 @@ if st.button("Analyze Text πŸš€"):
50
  if label == "LABEL_0":
51
  prediction = "Normal Text 🟒"
52
  color = "#28a745"
53
- set_background(default_bg_color) # Reset background for normal text
54
  else:
55
  prediction = "Hate Speech Detected πŸ”΄"
56
  color = "#dc3545"
57
- set_background("#FFBABA") # Set red background for hate speech
58
 
59
- # Play alarm sound and display warning
60
  st.warning("⚠️ Warning: Hate Speech Detected! ⚠️")
61
- st.markdown(
62
- """
63
- <audio autoplay="true" loop="false">
64
- <source src="beep-warning-6387.mp3" type="audio/mp3">
65
- Your browser does not support the audio element.
66
- </audio>
67
- """,
68
- unsafe_allow_html=True
69
- )
 
 
70
 
71
  # Display the result with styled message
72
  st.markdown(f"<h2 style='text-align: center; color: {color};'>{prediction}</h2>", unsafe_allow_html=True)
73
  else:
74
- st.warning("Please enter some text to analyze.")
 
8
  # Configure the Streamlit page
9
  st.set_page_config(page_title="Amharic Hate Speech Detector", page_icon="πŸ•΅οΈβ€β™‚οΈ", layout="centered")
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  # Set default background color
12
  default_bg_color = "#f0f2f6"
13
+ bg_color = default_bg_color
14
 
15
  # Add a stylish header with a logo
16
  st.markdown(
 
38
  if label == "LABEL_0":
39
  prediction = "Normal Text 🟒"
40
  color = "#28a745"
 
41
  else:
42
  prediction = "Hate Speech Detected πŸ”΄"
43
  color = "#dc3545"
44
+ bg_color = "#FFBABA" # Update background to a red color for hate speech
45
 
46
+ # Display warning
47
  st.warning("⚠️ Warning: Hate Speech Detected! ⚠️")
48
+
49
+ # Apply the background color dynamically
50
+ st.markdown(
51
+ f"""
52
+ <style>
53
+ body {{ background-color: {bg_color}; }}
54
+ .stAlert {{ text-align: center; }}
55
+ </style>
56
+ """,
57
+ unsafe_allow_html=True
58
+ )
59
 
60
  # Display the result with styled message
61
  st.markdown(f"<h2 style='text-align: center; color: {color};'>{prediction}</h2>", unsafe_allow_html=True)
62
  else:
63
+ st.warning("Please enter some text to analyze.")