import gradio as gr import transformers import torch def fmt_prompt(prompt: str) -> str: return f"""[Instructions]:\n{prompt}\n\n[Response]:""" model_name = "abacaj/starcoderbase-1b-sft" tokenizer = transformers.AutoTokenizer.from_pretrained(model_name) model = ( transformers.AutoModelForCausalLM.from_pretrained( model_name, ) .to("cuda:0") .eval() ) def chat_fn(prompt): #prompt = "Write a python function to sort the following array in ascending order, don't use any built in sorting methods: [9,2,8,1,5]" prompt_input = fmt_prompt(prompt) inputs = tokenizer(prompt_input, return_tensors="pt").to(model.device) input_ids_cutoff = inputs.input_ids.size(dim=1) with torch.no_grad(): generated_ids = model.generate( **inputs, use_cache=True, max_new_tokens=512, temperature=0.2, top_p=0.95, do_sample=True, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, ) completion = tokenizer.decode( generated_ids[0][input_ids_cutoff:], skip_special_tokens=True, ) print(completion) return completion with gr.Blocks() as app: inp = gr.Textbox() outp = gr.Textbox() btn = gr.Button() btn.click(chat_fn,inp,outp) app.launch()