Update app.py
Browse files
app.py
CHANGED
@@ -7,21 +7,24 @@ Original file is located at
|
|
7 |
https://colab.research.google.com/drive/148du8431_JkTaH-totdocC2aUXzOWimL
|
8 |
"""
|
9 |
|
10 |
-
|
11 |
from transformers import BertTokenizer, TFBertForSequenceClassification
|
12 |
import tensorflow as tf
|
|
|
13 |
|
14 |
# Load tokenizer
|
15 |
-
tokenizer = BertTokenizer.from_pretrained("nlpaueb/bert-base-greek-uncased-v1")
|
16 |
|
17 |
# Load model
|
18 |
-
model = TFBertForSequenceClassification.from_pretrained('https://huggingface.co/spaces/Kleo/Sarcasm/tree/main')
|
19 |
|
|
|
20 |
def check_sarcasm(sentence):
|
21 |
-
tf_batch = tokenizer(sentence, max_length=128, padding=True, truncation=True, return_tensors='tf')
|
22 |
-
tf_outputs = model(tf_batch.input_ids, tf_batch.token_type_ids)
|
23 |
-
tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)
|
24 |
-
pred_label = tf.argmax(tf_predictions, axis=1)
|
|
|
25 |
|
26 |
if pred_label == 1:
|
27 |
return "Sarcastic"
|
@@ -33,18 +36,6 @@ sentence = "Μεξικό: 25 νεκροί από την πτώση λεωφορ
|
|
33 |
result = check_sarcasm(sentence)
|
34 |
print(result)
|
35 |
|
36 |
-
import gradio as gr
|
37 |
-
|
38 |
-
def check_sarcasm(sentence):
|
39 |
-
tf_batch = tokenizer(sentence, max_length=128, padding=True, truncation=True, return_tensors='tf')
|
40 |
-
tf_outputs = model(tf_batch.input_ids, tf_batch.token_type_ids)
|
41 |
-
tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)
|
42 |
-
pred_label = tf.argmax(tf_predictions, axis=1)
|
43 |
-
|
44 |
-
if pred_label == 1:
|
45 |
-
return "Sarcastic"
|
46 |
-
else:
|
47 |
-
return "Not sarcastic"
|
48 |
|
49 |
# Create a Gradio interface
|
50 |
iface = gr.Interface(
|
@@ -53,7 +44,7 @@ iface = gr.Interface(
|
|
53 |
outputs="text",
|
54 |
title="Sarcasm Detection",
|
55 |
server_name="0.0.0.0",
|
56 |
-
description="Enter a headline and check if it's sarcastic."
|
57 |
)
|
58 |
|
59 |
# Launch the interface
|
|
|
7 |
https://colab.research.google.com/drive/148du8431_JkTaH-totdocC2aUXzOWimL
|
8 |
"""
|
9 |
|
10 |
+
import gradio as gr
|
11 |
from transformers import BertTokenizer, TFBertForSequenceClassification
|
12 |
import tensorflow as tf
|
13 |
+
from transformers import pipeline
|
14 |
|
15 |
# Load tokenizer
|
16 |
+
#tokenizer = BertTokenizer.from_pretrained("nlpaueb/bert-base-greek-uncased-v1")
|
17 |
|
18 |
# Load model
|
19 |
+
#model = TFBertForSequenceClassification.from_pretrained('https://huggingface.co/spaces/Kleo/Sarcasm/tree/main')
|
20 |
|
21 |
+
pipeline= pipeline(task="sequence-classification", model="https://huggingface.co/spaces/Kleo/Sarcasm/blob/main/fine-tuned-bert-gr.h5")
|
22 |
def check_sarcasm(sentence):
|
23 |
+
#tf_batch = tokenizer(sentence, max_length=128, padding=True, truncation=True, return_tensors='tf')
|
24 |
+
#tf_outputs = model(tf_batch.input_ids, tf_batch.token_type_ids)
|
25 |
+
#tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)
|
26 |
+
#pred_label = tf.argmax(tf_predictions, axis=1)
|
27 |
+
pred_label=pipeline(sentence)
|
28 |
|
29 |
if pred_label == 1:
|
30 |
return "Sarcastic"
|
|
|
36 |
result = check_sarcasm(sentence)
|
37 |
print(result)
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
# Create a Gradio interface
|
41 |
iface = gr.Interface(
|
|
|
44 |
outputs="text",
|
45 |
title="Sarcasm Detection",
|
46 |
server_name="0.0.0.0",
|
47 |
+
description="Enter a headline from the Greek news and check if it's sarcastic."
|
48 |
)
|
49 |
|
50 |
# Launch the interface
|