Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
import spaces | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer | |
from threading import Thread | |
# Loading the tokenizer and model from Hugging Face's model hub. | |
if torch.cuda.is_available(): | |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-7B-Chat") | |
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-7B-Chat", torch_dtype=torch.float16, device_map="auto") | |
# Defining a custom stopping criteria class for the model's text generation. | |
class StopOnTokens(StoppingCriteria): | |
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: | |
stop_ids = [2] # IDs of tokens where the generation should stop. | |
for stop_id in stop_ids: | |
if input_ids[0][-1] == stop_id: # Checking if the last generated token is a stop token. | |
return True | |
return False | |
# Function to generate model predictions. | |
def predict(message, history): | |
stop = StopOnTokens() | |
conversation = [] | |
for user, assistant in history: | |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}]) | |
conversation.append({"role": "user", "content": message}) | |
prompt = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True) | |
model_inputs = tokenizer(prompt, return_tensors="pt").to("cuda") | |
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True) | |
generate_kwargs = dict( | |
model_inputs, | |
streamer=streamer, | |
max_new_tokens=1024, | |
do_sample=True, | |
top_p=0.95, | |
top_k=50, | |
temperature=0.7, | |
repetition_penalty=1.0, | |
num_beams=1, | |
stopping_criteria=StoppingCriteriaList([stop]) | |
) | |
t = Thread(target=model.generate, kwargs=generate_kwargs) | |
t.start() # Starting the generation in a separate thread. | |
partial_message = "" | |
for new_token in streamer: | |
partial_message += new_token | |
if '</s>' in partial_message: # Breaking the loop if the stop token is generated. | |
break | |
yield partial_message | |
# Setting up the Gradio chat interface. | |
gr.ChatInterface(predict, | |
title="Qwen1.5 7B Chat Demo", | |
description="Warning. All answers are generated and may contain inaccurate information.", | |
examples=['How do you cook fish?', 'Who is the president of the United States?'] | |
).launch() # Launching the web interface. | |