File size: 3,911 Bytes
3738ef6 51a7d9e 1e64d54 51a7d9e edb9e8a 51a7d9e 8a59c8f 51a7d9e 8a59c8f 3738ef6 51a7d9e 3738ef6 51a7d9e 3738ef6 8a59c8f 3bc2ef0 3738ef6 1e64d54 3738ef6 1e64d54 659ca36 85dc104 3738ef6 6414f48 3738ef6 51a7d9e 3738ef6 51a7d9e 3738ef6 99a7a45 8a59c8f 84e1807 3738ef6 1c74333 3738ef6 51a7d9e 3738ef6 84e1807 d6a6e58 84e1807 9eefdf9 84e1807 88b5b33 84e1807 3738ef6 51a7d9e 3738ef6 51a7d9e 3738ef6 13f5041 3738ef6 51a7d9e 3738ef6 51a7d9e 3738ef6 51a7d9e 6414f48 3738ef6 51a7d9e 3738ef6 51a7d9e 3738ef6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
import os
import time
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
from threading import Thread
HF_TOKEN = os.environ.get("HF_TOKEN", None)
MODEL = "evabyte/EvaByte-SFT"
MODEL_BASE = "evabyte/EvaByte"
TITLE = "<h1><center>EvaByte</center></h1>"
PLACEHOLDER = """
<center>
<p>Hi! How can I help you today?</p>
</center>
"""
CSS = """
.duplicate-button {
margin: auto !important;
color: white !important;
background: black !important;
border-radius: 100vh !important;
}
h3 {
text-align: center;
}
"""
device = "cuda" # for GPU usage or "cpu" for CPU usage
tokenizer = AutoTokenizer.from_pretrained(MODEL, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
MODEL,
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True).eval().to(device)
@spaces.GPU()
def stream_chat(
message: str,
history: list,
system_prompt: str,
temperature: float = 0.8,
max_new_tokens: int = 512,
top_p: float = 1.0,
):
print(f'message: {message}')
print(f'history: {history}')
conversation = [
{"role": "system", "content": system_prompt}
]
for prompt, answer in history:
conversation.extend([
{"role": "user", "content": prompt},
{"role": "assistant", "content": answer},
])
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(device)
gen_out = model.multi_byte_generate(
input_ids=input_ids,
max_new_tokens = max_new_tokens,
do_sample = False if temperature == 0 else True,
top_p = top_p,
temperature = temperature,
)
response = tokenizer.decode(
gen_out[0][input_ids.shape[1]:],
skip_special_tokens=False,
clean_up_tokenization_spaces=False
)
for i in range(len(response)):
time.sleep(0.02)
yield response[: i + 1]
chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
with gr.Blocks(css=CSS, theme="soft") as demo:
gr.HTML(TITLE)
gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
gr.ChatInterface(
fn=stream_chat,
chatbot=chatbot,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Textbox(
value="You are a helpful assistant.",
label="System Prompt",
lines=5,
render=False,
),
gr.Slider(
minimum=0,
maximum=1,
step=0.1,
value=0.8,
label="Temperature",
render=False,
),
gr.Slider(
minimum=128,
maximum=8192,
step=1,
value= 512,
label="Max new tokens",
render=False,
),
gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.1,
value=1.0,
label="top_p",
render=False,
),
],
examples=[
["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."],
["What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."],
["Tell me a random fun fact about the Roman Empire."],
["Show me a code snippet of a website's sticky header in CSS and JavaScript."],
],
cache_examples=False,
)
if __name__ == "__main__":
demo.launch() |