amanm10000 commited on
Commit
e968230
1 Parent(s): 3197fd5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -8
app.py CHANGED
@@ -1,13 +1,33 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- tokenizer = AutoTokenizer.from_pretrained("gpt2")
5
- model = AutoModelForCausalLM.from_pretrained("gpt2")
 
 
6
 
7
- def generate(text):
8
- inputs = tokenizer.encode(text, return_tensors='pt')
9
- outputs = model.generate(inputs, max_length=100, do_sample=True)
10
- generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
11
- return generated
 
 
 
 
 
 
 
12
 
13
- gr.Interface(fn=generate, inputs="text", outputs="text").launch()
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ # Load the tokenizer and model
5
+ model_name = "meta-llama/Llama-3.2-1B-Instruct"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
+ # Define the generation function
10
+ def generate_response(prompt):
11
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
12
+ outputs = model.generate(
13
+ inputs,
14
+ max_length=512,
15
+ num_return_sequences=1,
16
+ do_sample=True,
17
+ temperature=0.7,
18
+ )
19
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
20
+ return response
21
 
22
+ # Create the Gradio interface
23
+ interface = gr.Interface(
24
+ fn=generate_response,
25
+ inputs=gr.Textbox(lines=5, placeholder="Enter your prompt here..."),
26
+ outputs=gr.Textbox(label="Generated Response"),
27
+ title="Llama-3.2-1B-Instruct Model",
28
+ description="A simple interface to interact with the Llama-3.2-1B-Instruct model.",
29
+ )
30
+
31
+ # Launch the app
32
+ if __name__ == "__main__":
33
+ interface.launch()