|
|
|
|
|
|
|
|
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
|
|
tokenizer1 = AutoTokenizer.from_pretrained("Emma0123/fine_tuned_model") |
|
model1 = AutoModelForSequenceClassification.from_pretrained("Emma0123/fine_tuned_model") |
|
|
|
|
|
tokenizer2 = AutoTokenizer.from_pretrained("jonas/roberta-base-finetuned-sdg") |
|
model2 = AutoModelForSequenceClassification.from_pretrained("jonas/roberta-base-finetuned-sdg") |
|
|
|
|
|
input_text = input() |
|
|
|
|
|
inputs = tokenizer1(input_text, return_tensors="pt", truncation=True) |
|
outputs = model1(**inputs) |
|
predictions = torch.argmax(outputs.logits, dim=1).item() |
|
|
|
|
|
if predictions == 1: |
|
|
|
inputs2 = tokenizer2(input_text, return_tensors="pt", truncation=True) |
|
outputs2 = model2(**inputs2) |
|
predictions2 = torch.argmax(outputs2.logits, dim=1).item() |
|
print("Second model prediction:", predictions2) |
|
else: |
|
print("This content is unrelated to Environment.") |