Backed4 / app.py
DrDemon's picture
Update app.py
6eb8eb0 verified
raw
history blame contribute delete
914 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the model and tokenizer
model_name = "cognitivecomputations/dolphin-2.9.3-mistral-nemo-12b"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Define the prediction function
def predict(input_text):
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model.generate(**inputs)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return generated_text
# Create the Gradio interface
iface = gr.Interface(
fn=predict,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your text here..."),
outputs="text",
title="Dolphin-2.9.3-Mistral-Nemo-12b Text Generation",
description="Generate text using the Dolphin-2.9.3-Mistral-Nemo-12b model from Hugging Face."
)
# Launch the interface
iface.launch()