ysharma's picture
ysharma HF staff
Update app.py
b54f917 verified
raw
history blame
5.87 kB
import gradio as gr
import os
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
TITLE = '''
<h1 style="text-align: center;">Meta Llama3.1 8B <a href="https://huggingface.co/spaces/ysharma/Chat_with_Meta_llama3_1_8b?duplicate=true" id="duplicate-button"><button style="color:white">Duplicate this Space</button></a></h1>
'''
DESCRIPTION = '''
<div>
<p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct"><b>Meta Llama3.1 8b Chat</b></a>. Feel free to play with this demo, or duplicate to run privately!</p>
<p>πŸ”¨ Interested in trying out more powerful Instruct versions of Llama3.1? Check out the <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> integration for 🐘 Meta Llama 3.1 70b, and πŸ¦• Meta Llama 3.1 405b</p>
<p>πŸ”Ž For more details about the Llama3.1 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/llama31">at our blog post</a>.</p>
</div>
'''
LICENSE = """
<p/>
---
Built with Llama
"""
PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
<img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Meta llama3.1</h1>
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
</div>
"""
css = """
h1 {
text-align: center;
display: block;
display: flex;
align-items: center;
justify-content: center;
}
#duplicate-button {
margin-left: 10px;
color: white;
background: #1565c0;
border-radius: 100vh;
font-size: 1rem;
padding: 3px 5px;
}
"""
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("<|eot_id|>")
]
MAX_INPUT_TOKEN_LENGTH = 4096
# Gradio inference function
@spaces.GPU(duration=120)
def chat_llama3_1_8b(message: str,
history: list,
temperature: float,
max_new_tokens: int
) -> str:
"""
Generate a streaming response using the llama3-8b model.
Args:
message (str): The input message.
history (list): The conversation history used by ChatInterface.
temperature (float): The temperature for generating the response.
max_new_tokens (int): The maximum number of new tokens to generate.
Returns:
str: The generated response.
"""
conversation = []
for user, assistant in history:
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
input_ids= input_ids,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=temperature != 0, # This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.
temperature=temperature,
eos_token_id=terminators,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
# Gradio block
chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
with gr.Blocks(fill_height=True, css=css) as demo:
gr.Markdown(TITLE)
gr.Markdown(DESCRIPTION)
#gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
gr.ChatInterface(
fn=chat_llama3_1_8b,
chatbot=chatbot,
fill_height=True,
examples_per_page=3,
additional_inputs_accordion=gr.Accordion(label="βš™οΈ Parameters", open=False, render=False),
additional_inputs=[
gr.Slider(minimum=0,
maximum=1,
step=0.1,
value=0.95,
label="Temperature",
render=False),
gr.Slider(minimum=128,
maximum=4096,
step=1,
value=512,
label="Max new tokens",
render=False ),
],
examples=[
["There's a llama in my garden 😱 What should I do?"],
["What is the best way to open a can of worms?"],
["The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1. "],
['How to setup a human base on Mars? Give short answer.'],
['Explain theory of relativity to me like I’m 8 years old.'],
['What is 9,000 * 9,000?'],
['Write a pun-filled happy birthday message to my friend Alex.'],
['Justify why a penguin might make a good king of the jungle.']
],
cache_examples=False,
)
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.launch()