mostafaHaydar commited on
Commit
8839161
1 Parent(s): db464b9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -0
app.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # Load your text generation model and tokenizer
5
+ pipe = pipeline("text-generation", model="mostafaHaydar/model_test")
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ # Set up the Streamlit app
10
+ st.title("Text Generation with LLaMA 3")
11
+
12
+ # Text input from the user
13
+ prompt = st.text_area("Enter your prompt:")
14
+
15
+ # Generate text when the user clicks the button
16
+ if st.button("Generate"):
17
+ if prompt:
18
+ # Tokenize and generate text
19
+ inputs = tokenizer(prompt, return_tensors="pt")
20
+ output = model.generate(**inputs, max_length=150) # Adjust max_length as needed
21
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
22
+
23
+ # Display the generated text
24
+ st.subheader("Generated Text:")
25
+ st.write(generated_text)
26
+ else:
27
+ st.warning("Please enter a prompt to generate text.")