from transformers import AutoModelForCausalLM, AutoTokenizer import torch import gradio as gr model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1",torch_dtype=torch.float16, device_map="auto") tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") def generate(message): text = f"You are an assistant to help people with suicide toughts and their family and friends. Given a sentence you will classify it into three categories: suicide intent, if the sentence belongs that has a suicide intent or have suicidal toughts; information, if the sentence belongs to a person that is looking for information about suicide or is concern about some relative; or depression, if the sentence belongs to a person that is depressed or have negative toughts. Classify with just one word the sentence {message} and give an explanation in Spanish" messages = [ {"role": "user", "content": text}, ] encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt") model_inputs = encodeds.to(device) model.to(device) generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True) decoded = tokenizer.batch_decode(generated_ids) print(decoded[0][decoded[0].rfind(["[/INST]"])+6:]) example1 = "No quiero seguir viviendo ¿qué puedo hacer?." example2 = "¿Cómo puedo ayudar a un amigo que quiere quitarse la vida?" iface = gr.Interface(fn=information_or_intent,inputs="text", outputs="text",examples=[example1,example2]).launch(share=False)