Omnibus commited on
Commit
eafe5d9
1 Parent(s): 0a21ad9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -0
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import transformers
3
+ import torch
4
+
5
+
6
+ def fmt_prompt(prompt: str) -> str:
7
+ return f"""[Instructions]:\n{prompt}\n\n[Response]:"""
8
+
9
+
10
+
11
+ model_name = "abacaj/starcoderbase-1b-sft"
12
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
13
+
14
+ model = (
15
+ transformers.AutoModelForCausalLM.from_pretrained(
16
+ model_name,
17
+ )
18
+ .to("cuda:0")
19
+ .eval()
20
+ )
21
+ def chat_fn(prompt):
22
+ #prompt = "Write a python function to sort the following array in ascending order, don't use any built in sorting methods: [9,2,8,1,5]"
23
+ prompt_input = fmt_prompt(prompt)
24
+ inputs = tokenizer(prompt_input, return_tensors="pt").to(model.device)
25
+ input_ids_cutoff = inputs.input_ids.size(dim=1)
26
+
27
+ with torch.no_grad():
28
+ generated_ids = model.generate(
29
+ **inputs,
30
+ use_cache=True,
31
+ max_new_tokens=512,
32
+ temperature=0.2,
33
+ top_p=0.95,
34
+ do_sample=True,
35
+ eos_token_id=tokenizer.eos_token_id,
36
+ pad_token_id=tokenizer.pad_token_id,
37
+ )
38
+
39
+ completion = tokenizer.decode(
40
+ generated_ids[0][input_ids_cutoff:],
41
+ skip_special_tokens=True,
42
+ )
43
+
44
+ print(completion)
45
+ return completion
46
+
47
+ with gr.Blocks() as app:
48
+ inp = gr.Textbox()
49
+ outp = gr.Textbox()
50
+ btn = gr.Button()
51
+
52
+ btn.click(chat_fn,inp,outp)
53
+
54
+ app.launch()