|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
from transformers import pipeline |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("nlux/CodeLlama-7b-hf_merge") |
|
|
|
|
|
model = "nlux/CodeLlama-7b-hf_merge" |
|
|
|
|
|
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
def predict(input): |
|
|
|
outputs = pipe(input, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95, eos_token_id=pipe.tokenizer.eos_token_id, pad_token_id=pipe.tokenizer.pad_token_id) |
|
output = outputs[0]['generated_text'].strip() |
|
|
|
|
|
print(f"Generated Answer:\\n{output}") |
|
return output |
|
|
|
|
|
iface = gr.Interface(fn=predict, inputs="text", outputs="text") |
|
|
|
|
|
iface.launch(share=True) |
|
|
|
|