import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification

# 加载第一个模型
tokenizer1 = AutoTokenizer.from_pretrained("Emma0123/fine_tuned_model")
model1 = AutoModelForSequenceClassification.from_pretrained("Emma0123/fine_tuned_model")

# 加载第二个模型
tokenizer2 = AutoTokenizer.from_pretrained("jonas/roberta-base-finetuned-sdg")
model2 = AutoModelForSequenceClassification.from_pretrained("jonas/roberta-base-finetuned-sdg")

# 输入文本
input_text = input()

# 对第一个模型进行推理
inputs = tokenizer1(input_text, return_tensors="pt", truncation=True)
outputs = model1(**inputs)
predictions = torch.argmax(outputs.logits, dim=1).item()

# 根据第一个模型的输出进行条件判断
if predictions == 1:
    # 使用第二个模型进行判断
    inputs2 = tokenizer2(input_text, return_tensors="pt", truncation=True)
    outputs2 = model2(**inputs2)
    predictions2 = torch.argmax(outputs2.logits, dim=1).item()
    print("Second model prediction:", predictions2)
else:
    print("This content is unrelated to Environment.")