import torch import gradio as gr from transformers import pipeline, logging, AutoModelForCausalLM, AutoTokenizer model_name = "microsoft/phi-2" model = AutoModelForCausalLM.from_pretrained( model_name, trust_remote_code=True ) model.config.use_cache = False tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token peft_model_folder = './ckpts' model.load_adapter(peft_model_folder) def generate_text(input_text, max_length): pipe = pipeline(task="text-generation",model=model,tokenizer=tokenizer, max_length=max_length) result = pipe(f"[INST] {input_text} [/INST]") return_answer = result[0]['generated_text'] return return_answer # Create a Gradio interface title = "Phi2-QLora." description = "A simple Gradio interface to demo Phi2 model finetuned on openassist dataset with Qlora." examples = [["What is Large Language Model?"], ["Why Python is most popular Language?"], ["How to do rice?"]] demo = gr.Interface( generate_text, inputs=[ gr.TextArea(label="Enter Question"), gr.Slider(1, 200, value = 10, step=1, label="Max Length") ], outputs=[ gr.Textbox(label="Response from Phi2 Model: "), ], title=title, description=description, examples=examples, cache_examples=False, ) demo.launch()