devaprobs commited on
Commit
2558d2c
β€’
1 Parent(s): 94cd610

create app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+
4
+ # Load the pre-trained model for inference
5
+ model_name = "devaprobs/amharic-hate-speech-detection"
6
+ classifier = pipeline("text-classification", model=model_name)
7
+
8
+ # Configure the Streamlit page
9
+ st.set_page_config(page_title="Amharic Hate Speech Detector", page_icon="πŸ•΅οΈβ€β™‚οΈ", layout="centered")
10
+ st.markdown("<style> body { background-color: #f0f2f6; } </style>", unsafe_allow_html=True)
11
+
12
+ # Add a stylish header with a logo
13
+ st.markdown(
14
+ """
15
+ <div style="text-align:center">
16
+ <h1 style="color:#1F618D;">Amharic Hate Speech Detector πŸ•΅οΈβ€β™‚οΈ</h1>
17
+ <p style="font-size:20px; color:#555;">Type an Amharic sentence and let our model analyze it!</p>
18
+ </div>
19
+ """,
20
+ unsafe_allow_html=True
21
+ )
22
+
23
+ # Input text box for user to enter Amharic text
24
+ user_input = st.text_area("Enter Amharic text here:", height=150, placeholder="ምሳሌ: αŠ₯αŠ“αŠ•α‰° αŠ₯αŠ•α‹°αˆ›αŠ•αˆ αŠ α‹­αˆ†αŠ‘αˆ...")
25
+
26
+ # Submit button for classification
27
+ if st.button("Analyze Text πŸš€"):
28
+ if user_input:
29
+ # Get the classification result
30
+ result = classifier(user_input)
31
+ label = result[0]['label']
32
+ score = result[0]['score']
33
+
34
+ # Map label to understandable output
35
+ if label == "LABEL_0":
36
+ prediction = "Normal Text 🟒"
37
+ color = "#28a745"
38
+ else:
39
+ prediction = "Hate Speech πŸ”΄"
40
+ color = "#dc3545"
41
+
42
+ # Display the result with styled message
43
+ st.markdown(f"<h2 style='text-align: center; color: {color};'>{prediction}</h2>", unsafe_allow_html=True)
44
+ st.write(f"Confidence: {score * 100:.2f}%")
45
+ else:
46
+ st.warning("Please enter some text to analyze.")