Spaces:
Runtime error
Runtime error
import datasets | |
from datasets import load_dataset | |
import transformers | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments, pipeline | |
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2) # 2 classes : positif et négatif | |
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") | |
ds = load_dataset("stanfordnlp/sst2") | |
sst2_dataset = load_dataset("glue", "sst2", split="train") | |
def encode(examples): | |
return tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, padding="max_length") | |
sst2_dataset = sst2_dataset.map(encode, batched=True) | |
sst2_dataset = sst2_dataset.map(lambda examples: {"labels": examples["label"]}, batched=True) | |
training_args = TrainingArguments( | |
per_device_train_batch_size=8, | |
evaluation_strategy="epoch", | |
logging_dir="./logs", | |
output_dir="./results", | |
num_train_epochs=3, | |
) | |
trainer = Trainer( | |
model=model, | |
args=training_args, | |
train_dataset=encoded_dataset["train"], | |
eval_dataset=encoded_dataset["test"], | |
) | |
import os | |
if not os.path.exists("./fine_tuned_model"): | |
trainer.train() | |
# Sauvegarder le modèle fine-tuné et le tokenizer | |
model.save_pretrained("./fine_tuned_model") | |
tokenizer.save_pretrained("./fine_tuned_model") | |
else: | |
# Charger le modèle fine-tuné | |
model = BertForSequenceClassification.from_pretrained("./fine_tuned_model") | |
tokenizer = BertTokenizer.from_pretrained("./fine_tuned_model") | |
sentiment_analysis = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) | |
def generate_response(message): | |
result = sentiment_analysis(message)[0] | |
return f"Label: {result['label']}, Score: {result['score']}" | |
gr.ChatInterface(fn=generate_response).launch() | |