Spaces:
Running
on
T4
Running
on
T4
File size: 12,728 Bytes
f714b01 fb01d13 f714b01 fcdf9f0 1d5e556 a528e66 a71493c 1d5e556 f714b01 1d5e556 f714b01 1d5e556 3bcbfb1 f714b01 3bcbfb1 f714b01 c4f1727 2f0f296 1d60034 78953d9 2f0f296 fe5759e 78953d9 c4f1727 fb01d13 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 |
import gradio as gr
import os, gc, copy, torch
from datetime import datetime
from huggingface_hub import hf_hub_download
from pynvml import *
nvmlInit()
gpu_h = nvmlDeviceGetHandleByIndex(0)
ctx_limit = 1024
title = "RWKV-4-Raven-7B-v9-Eng99%-Other1%-20230412-ctx8192"
os.environ["RWKV_JIT_ON"] = '1'
os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster)
from rwkv.model import RWKV
model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-raven", filename=f"{title}.pth")
model = RWKV(model=model_path, strategy='cuda fp16i8 *8 -> cuda fp16')
from rwkv.utils import PIPELINE, PIPELINE_ARGS
pipeline = PIPELINE(model, "20B_tokenizer.json")
def generate_prompt(instruction, input=None):
if input:
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
# Instruction:
{instruction}
# Input:
{input}
# Response:
"""
else:
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
# Instruction:
{instruction}
# Response:
"""
def evaluate(
instruction,
input=None,
token_count=200,
temperature=1.0,
top_p=0.7,
presencePenalty = 0.1,
countPenalty = 0.1,
):
args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
alpha_frequency = countPenalty,
alpha_presence = presencePenalty,
token_ban = [], # ban the generation of some tokens
token_stop = [0]) # stop generation whenever you see any token here
instruction = instruction.strip()
input = input.strip()
ctx = generate_prompt(instruction, input)
gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
all_tokens = []
out_last = 0
out_str = ''
occurrence = {}
state = None
for i in range(int(token_count)):
out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
for n in occurrence:
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
if token in args.token_stop:
break
all_tokens += [token]
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
tmp = pipeline.decode(all_tokens[out_last:])
if '\ufffd' not in tmp:
out_str += tmp
yield out_str.strip()
out_last = i + 1
gc.collect()
torch.cuda.empty_cache()
yield out_str.strip()
examples = [
["Tell me about ravens.", "", 150, 1.0, 0.5, 0.4, 0.4],
["Write a python function to mine 1 BTC, with details and comments.", "", 150, 1.0, 0.5, 0.2, 0.2],
["Write a song about ravens.", "", 150, 1.0, 0.5, 0.4, 0.4],
["Explain the following metaphor: Life is like cats.", "", 150, 1.0, 0.5, 0.4, 0.4],
["Write a story using the following information", "A man named Alex chops a tree down", 150, 1.0, 0.5, 0.4, 0.4],
["Generate a list of adjectives that describe a person as brave.", "", 150, 1.0, 0.5, 0.4, 0.4],
["You have $100, and your goal is to turn that into as much money as possible with AI and Machine Learning. Please respond with detailed plan.", "", 150, 1.0, 0.5, 0.4, 0.4],
]
chat_intro = '''The following is a coherent verbose detailed conversation between an AI girl named <|bot|> and <|user|>. One day, they meet at a café.
Note the following important facts about <|bot|>:
1. <|bot|> is very intelligent, creative and friendly.
2. <|bot|> likes to tell <|user|> a lot about herself and her opinions.
3. <|bot|> usually gives <|user|> kind, helpful and informative advices.
<|user|>: Hello, how are you doing?
<|bot|>: Hi! Thanks, I'm fine. What about you?
<|user|>: I am fine. It's nice to see you. Look, here is a store selling tea and juice. We can go and take a look. Would you like to chat with me for a while?
<|bot|>: Sure. Let's go inside. What would you like to talk about? I'm listening.
'''
def user(message, chatbot):
chatbot = chatbot or []
print(f"User: {message}")
return "", chatbot + [[message, None]]
def alternative(chatbot, history):
if not chatbot or not history:
return chatbot, history
chatbot[-1][1] = None
history[0] = copy.deepcopy(history[1])
return chatbot, history
def chat(
prompt,
user,
bot,
chatbot,
history,
temperature=1.0,
top_p=0.8,
presence_penalty=0.1,
count_penalty=0.1,
):
args = PIPELINE_ARGS(temperature=max(0.2, float(temperature)), top_p=float(top_p),
alpha_frequency=float(count_penalty),
alpha_presence=float(presence_penalty),
token_ban=[], # ban the generation of some tokens
token_stop=[]) # stop generation whenever you see any token here
if not chatbot:
return chatbot, history
message = chatbot[-1][0]
message = message.strip().replace('\r\n','\n').replace('\n\n','\n')
ctx = f"{user}: {message}\n\n{bot}:"
# gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
# print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
if not history:
prompt = prompt.replace("<|user|>", user.strip())
prompt = prompt.replace("<|bot|>", bot.strip())
prompt = prompt.strip()
prompt = f"\n{prompt}\n\n"
out, state = model.forward(pipeline.encode(prompt), None)
history = [state, None, []] # [state, state_pre, tokens]
print("History reloaded.")
[state, _, all_tokens] = history
state_pre_0 = copy.deepcopy(state)
out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:], state)
state_pre_1 = copy.deepcopy(state) # For recovery
print("Bot: ", end='')
begin = len(all_tokens)
out_last = begin
out_str: str = ''
occurrence = {}
for i in range(300):
if i <= 0:
nl_bias = -float('inf')
elif i <= 30:
nl_bias = (i - 30) * 0.1
elif i <= 130:
nl_bias = 0
else:
nl_bias = (i - 130) * 0.25
out[187] += nl_bias
for n in occurrence:
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
next_tokens = [token]
if token == 0:
next_tokens = pipeline.encode('\n\n')
all_tokens += next_tokens
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
out, state = model.forward(next_tokens, state)
tmp = pipeline.decode(all_tokens[out_last:])
if '\ufffd' not in tmp:
print(tmp, end='', flush=True)
out_last = begin + i + 1
out_str += tmp
chatbot[-1][1] = out_str.strip()
history = [state, all_tokens]
yield chatbot, history
out_str = pipeline.decode(all_tokens[begin:])
out_str = out_str.replace("\r\n", '\n').replace('\\n', '\n')
if '\n\n' in out_str:
break
# State recovery
if f'{user}:' in out_str or f'{bot}:' in out_str:
idx_user = out_str.find(f'{user}:')
idx_user = len(out_str) if idx_user == -1 else idx_user
idx_bot = out_str.find(f'{bot}:')
idx_bot = len(out_str) if idx_bot == -1 else idx_bot
idx = min(idx_user, idx_bot)
if idx < len(out_str):
out_str = f" {out_str[:idx].strip()}\n\n"
tokens = pipeline.encode(out_str)
all_tokens = all_tokens[:begin] + tokens
out, state = model.forward(tokens, state_pre_1)
break
gc.collect()
torch.cuda.empty_cache()
chatbot[-1][1] = out_str.strip()
history = [state, state_pre_0, all_tokens]
yield chatbot, history
with gr.Blocks(title=title) as demo:
gr.HTML(f"<div style=\"text-align: center;\">\n<h1>🐦Raven - {title}</h1>\n</div>")
with gr.Tab("Instruct"):
gr.Markdown(f"Raven is [RWKV 7B](https://github.com/BlinkDL/ChatRWKV) 100% RNN [RWKV-LM](https://github.com/BlinkDL/RWKV-LM) finetuned to follow instructions. *** Please try examples first (bottom of page) *** (edit them to use your question). Demo limited to ctxlen 1024. It is finetuned on [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca), codealpaca and more. For best results, *** keep you prompt short and clear ***.")
with gr.Row():
with gr.Column():
instruction = gr.Textbox(lines=2, label="Instruction", value="Tell me about ravens.")
input = gr.Textbox(lines=2, label="Input", placeholder="none")
token_count = gr.Slider(10, 200, label="Max Tokens", step=10, value=150)
temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.2)
count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.2)
with gr.Column():
with gr.Row():
submit = gr.Button("Submit", variant="primary")
clear = gr.Button("Clear", variant="secondary")
output = gr.Textbox(label="Output", lines=5)
data = gr.Dataset(components=[instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty], samples=examples, label="Example Instructions", headers=["Instruction", "Input", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
submit.click(evaluate, [instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty], [output])
clear.click(lambda: None, [], [output])
data.click(lambda x: x, [data], [instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty])
with gr.Tab("Chat"):
gr.Markdown(f'''*** <b>Default Chat Scenario: You (Bob) and Bot (Alice) meet at a café.</b> ***\nIf you want to change the scenario, make sure to use an empty new line to separate different people's words. Also, make sure there is no empty new lines within one person's lines. Changes only take effect after clearing.''', label="Description")
with gr.Row():
with gr.Column():
chatbot = gr.Chatbot()
state = gr.State()
message = gr.Textbox(label="Message")
with gr.Row():
send = gr.Button("Send", variant="primary")
alt = gr.Button("Alternative", variant="secondary")
clear = gr.Button("Clear", variant="secondary")
with gr.Column():
with gr.Row():
user_name = gr.Textbox(lines=1, max_lines=1, label="User Name", value="Bob")
bot_name = gr.Textbox(lines=1, max_lines=1, label="Bot Name", value="Alice")
prompt = gr.Textbox(lines=10, max_lines=50, label="Scenario", value=chat_intro)
temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.2)
count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.2)
chat_inputs = [
prompt,
user_name,
bot_name,
chatbot,
state,
temperature,
top_p,
presence_penalty,
count_penalty
]
chat_outputs = [chatbot, state]
message.submit(user, [message, chatbot], [message, chatbot], queue=False).then(chat, chat_inputs, chat_outputs)
send.click(user, [message, chatbot], [message, chatbot], queue=False).then(chat, chat_inputs, chat_outputs)
alt.click(alternative, [chatbot, state], [chatbot, state], queue=False).then(chat, chat_inputs, chat_outputs)
clear.click(lambda: ([], None, ""), [], [chatbot, state, message], queue=False)
demo.queue(max_size=10)
demo.launch(share=True)
|