starcoderbase / app.py
Omnibus's picture
Update app.py
83cbf5a
raw
history blame
1.89 kB
import gradio as gr
import transformers
import torch
import time
def fmt_prompt(prompt: str) -> str:
return f"""[Instructions]:\n{prompt}\n\n[Response]:"""
#device = "cuda:0"
device = "cpu"
model_name = "abacaj/starcoderbase-1b-sft"
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
model = (
transformers.AutoModelForCausalLM.from_pretrained(
model_name,
)
.to(device)
.eval()
)
def respond(message, chat_history):
#prompt = "Write a python function to sort the following array in ascending order, don't use any built in sorting methods: [9,2,8,1,5]"
prompt_input = fmt_prompt(message)
inputs = tokenizer(prompt_input, return_tensors="pt").to(model.device)
input_ids_cutoff = inputs.input_ids.size(dim=1)
with torch.no_grad():
generated_ids = model.generate(
**inputs,
use_cache=True,
max_new_tokens=1024,
temperature=0.2,
top_p=0.95,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
)
completion = tokenizer.decode(
generated_ids[0][input_ids_cutoff:],
skip_special_tokens=True,
)
chat_history.append((message, completion))
time.sleep(2)
return "", chat_history
with gr.Blocks() as app:
gr.Markdown("""<h1 style="text-align: center;">Starcoder 1b-sft Demo</h1><br><h3 style="text-align: center"><a href src='https://huggingface.co/abacaj/starcoderbase-1b-sft'>https://huggingface.co/abacaj/starcoderbase-1b-sft</a>""")
chatbot = gr.Chatbot()
msg = gr.Textbox(label = "Input")
with gr.Row():
sub_btn = gr.Button("Submit")
clear = gr.ClearButton([msg, chatbot])
sub_btn.click(respond, [msg,chatbot],[msg,chatbot])
msg.submit(respond, [msg, chatbot], [msg, chatbot])
app.launch()