AnishKumbhar commited on
Commit
28df89a
1 Parent(s): 48ddfa2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +147 -0
app.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from threading import Thread
3
+ from typing import Iterator
4
+
5
+ # We need to run this before importing gradio and spaces to avoid the weird error related to bitsandbytes
6
+ from bitsandbytes.cuda_setup.main import CUDASetup
7
+
8
+ setup = CUDASetup.get_instance()
9
+
10
+ import gradio as gr
11
+ import spaces
12
+ import torch
13
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
14
+
15
+ MAX_MAX_NEW_TOKENS = 2048
16
+ DEFAULT_MAX_NEW_TOKENS = 1024
17
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
18
+
19
+ DESCRIPTION = """\
20
+ # Llama-2 13B Chat
21
+ This Space demonstrates model [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta, a Llama 2 model with 13B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
22
+ 🔎 For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
23
+ 🔨 Looking for an even more powerful model? Check out the large [**70B** model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
24
+ 🐇 For a smaller model that you can run on many GPUs, check our [7B model demo](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat).
25
+ """
26
+
27
+ LICENSE = """
28
+ <p/>
29
+ ---
30
+ As a derivate work of [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta,
31
+ this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/USE_POLICY.md).
32
+ """
33
+
34
+ if not torch.cuda.is_available():
35
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
36
+
37
+
38
+ if torch.cuda.is_available():
39
+ model_id = "meta-llama/Llama-2-13b-chat-hf"
40
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
41
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
42
+ tokenizer.use_default_system_prompt = False
43
+
44
+
45
+ @spaces.GPU
46
+ def generate(
47
+ message: str,
48
+ chat_history: list[tuple[str, str]],
49
+ system_prompt: str,
50
+ max_new_tokens: int = 1024,
51
+ temperature: float = 0.6,
52
+ top_p: float = 0.9,
53
+ top_k: int = 50,
54
+ repetition_penalty: float = 1.2,
55
+ ) -> Iterator[str]:
56
+ conversation = []
57
+ if system_prompt:
58
+ conversation.append({"role": "system", "content": system_prompt})
59
+ for user, assistant in chat_history:
60
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
61
+ conversation.append({"role": "user", "content": message})
62
+
63
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
64
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
65
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
66
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
67
+ input_ids = input_ids.to(model.device)
68
+
69
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
70
+ generate_kwargs = dict(
71
+ {"input_ids": input_ids},
72
+ streamer=streamer,
73
+ max_new_tokens=max_new_tokens,
74
+ do_sample=True,
75
+ top_p=top_p,
76
+ top_k=top_k,
77
+ temperature=temperature,
78
+ num_beams=1,
79
+ repetition_penalty=repetition_penalty,
80
+ )
81
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
82
+ t.start()
83
+
84
+ outputs = []
85
+ for text in streamer:
86
+ outputs.append(text)
87
+ yield "".join(outputs)
88
+
89
+
90
+ chat_interface = gr.ChatInterface(
91
+ fn=generate,
92
+ additional_inputs=[
93
+ gr.Textbox(label="System prompt", lines=6),
94
+ gr.Slider(
95
+ label="Max new tokens",
96
+ minimum=1,
97
+ maximum=MAX_MAX_NEW_TOKENS,
98
+ step=1,
99
+ value=DEFAULT_MAX_NEW_TOKENS,
100
+ ),
101
+ gr.Slider(
102
+ label="Temperature",
103
+ minimum=0.1,
104
+ maximum=4.0,
105
+ step=0.1,
106
+ value=0.6,
107
+ ),
108
+ gr.Slider(
109
+ label="Top-p (nucleus sampling)",
110
+ minimum=0.05,
111
+ maximum=1.0,
112
+ step=0.05,
113
+ value=0.9,
114
+ ),
115
+ gr.Slider(
116
+ label="Top-k",
117
+ minimum=1,
118
+ maximum=1000,
119
+ step=1,
120
+ value=50,
121
+ ),
122
+ gr.Slider(
123
+ label="Repetition penalty",
124
+ minimum=1.0,
125
+ maximum=2.0,
126
+ step=0.05,
127
+ value=1.2,
128
+ ),
129
+ ],
130
+ stop_btn=None,
131
+ examples=[
132
+ ["Hello there! How are you doing?"],
133
+ ["Can you explain briefly to me what is the Python programming language?"],
134
+ ["Explain the plot of Cinderella in a sentence."],
135
+ ["How many hours does it take a man to eat a Helicopter?"],
136
+ ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
137
+ ],
138
+ )
139
+
140
+ with gr.Blocks(css="style.css") as demo:
141
+ gr.Markdown(DESCRIPTION)
142
+ gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
143
+ chat_interface.render()
144
+ gr.Markdown(LICENSE)
145
+
146
+ if __name__ == "__main__":
147
+ demo.queue(max_size=20).launch()