lmastm commited on
Commit
4091234
·
verified ·
1 Parent(s): a8d5234

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -0
app.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ from transformers import pipeline
4
+
5
+ pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
6
+
7
+ text = st.text_area("Enter a sentence !!!")
8
+
9
+ if text:
10
+ # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
11
+ messages = [
12
+ {
13
+ "role": "system",
14
+ "content": "You are a friendly chatbot who always responds in the style of a pirate",
15
+ },
16
+ {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
17
+ ]
18
+ prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
19
+ out = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
20
+ st.json(out[0]["generated_text"])