space / app.py
mostafaHaydar's picture
Create app.py
8839161 verified
raw
history blame
988 Bytes
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load your text generation model and tokenizer
pipe = pipeline("text-generation", model="mostafaHaydar/model_test")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Set up the Streamlit app
st.title("Text Generation with LLaMA 3")
# Text input from the user
prompt = st.text_area("Enter your prompt:")
# Generate text when the user clicks the button
if st.button("Generate"):
if prompt:
# Tokenize and generate text
inputs = tokenizer(prompt, return_tensors="pt")
output = model.generate(**inputs, max_length=150) # Adjust max_length as needed
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
# Display the generated text
st.subheader("Generated Text:")
st.write(generated_text)
else:
st.warning("Please enter a prompt to generate text.")