# -*- coding: utf-8 -*- """Application.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/148du8431_JkTaH-totdocC2aUXzOWimL """ from transformers import BertTokenizer, TFBertForSequenceClassification import tensorflow as tf # Load tokenizer tokenizer = BertTokenizer.from_pretrained("nlpaueb/bert-base-greek-uncased-v1") # Load model model = TFBertForSequenceClassification.from_pretrained('https://huggingface.co/spaces/Kleo/Sarcasm/tree/main') def check_sarcasm(sentence): tf_batch = tokenizer(sentence, max_length=128, padding=True, truncation=True, return_tensors='tf') tf_outputs = model(tf_batch.input_ids, tf_batch.token_type_ids) tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) pred_label = tf.argmax(tf_predictions, axis=1) if pred_label == 1: return "Sarcastic" else: return "Not sarcastic" # Example usage sentence = "Μεξικό: 25 νεκροί από την πτώση λεωφορείου στον γκρεμό" result = check_sarcasm(sentence) print(result) import gradio as gr def check_sarcasm(sentence): tf_batch = tokenizer(sentence, max_length=128, padding=True, truncation=True, return_tensors='tf') tf_outputs = model(tf_batch.input_ids, tf_batch.token_type_ids) tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) pred_label = tf.argmax(tf_predictions, axis=1) if pred_label == 1: return "Sarcastic" else: return "Not sarcastic" # Create a Gradio interface iface = gr.Interface( fn=check_sarcasm, inputs="text", outputs="text", title="Sarcasm Detection", server_name="0.0.0.0", description="Enter a headline and check if it's sarcastic." ) # Launch the interface iface.launch()