praeclarumjj3 commited on
Commit
7015bfd
1 Parent(s): b7aa5c8

Update chat.py

Browse files
Files changed (1) hide show
  1. chat.py +193 -383
chat.py CHANGED
@@ -1,395 +1,205 @@
 
 
 
1
  import argparse
2
- import datetime
3
  import json
4
- import os
5
- import time
6
-
7
- import gradio as gr
8
- import hashlib
9
-
10
- from vcoder_llava.vcoder_conversation import (default_conversation, conv_templates,
11
- SeparatorStyle)
12
- from vcoder_llava.constants import LOGDIR
13
- from vcoder_llava.utils import (build_logger, server_error_msg,
14
- violates_moderation, moderation_msg)
15
- from chat import Chat
16
-
17
-
18
- logger = build_logger("gradio_app", "gradio_web_server.log")
19
-
20
- headers = {"User-Agent": "VCoder Client"}
21
-
22
- no_change_btn = gr.Button.update()
23
- enable_btn = gr.Button.update(interactive=True)
24
- disable_btn = gr.Button.update(interactive=False)
25
-
26
- priority = {
27
- "vicuna-13b": "aaaaaaa",
28
- "koala-13b": "aaaaaab",
29
- }
30
-
31
-
32
- def get_conv_log_filename():
33
- t = datetime.datetime.now()
34
- name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
35
- return name
36
-
37
-
38
- get_window_url_params = """
39
- function() {
40
- const params = new URLSearchParams(window.location.search);
41
- url_params = Object.fromEntries(params);
42
- console.log(url_params);
43
- return url_params;
44
- }
45
- """
46
-
47
-
48
- def load_demo_refresh_model_list(request: gr.Request):
49
- logger.info(f"load_demo. ip: {request.client.host}")
50
- state = default_conversation.copy()
51
- dropdown_update = gr.Dropdown.update(
52
- choices=models,
53
- value=models[0] if len(models) > 0 else ""
54
- )
55
- return state, dropdown_update
56
-
57
-
58
- def vote_last_response(state, vote_type, model_selector, request: gr.Request):
59
- with open(get_conv_log_filename(), "a") as fout:
60
- data = {
61
- "tstamp": round(time.time(), 4),
62
- "type": vote_type,
63
- "model": model_selector,
64
- "state": state.dict(),
65
- }
66
- fout.write(json.dumps(data) + "\n")
67
-
68
-
69
- def upvote_last_response(state, model_selector, request: gr.Request):
70
- vote_last_response(state, "upvote", model_selector, request)
71
- return ("",) + (disable_btn,) * 3
72
-
73
-
74
- def downvote_last_response(state, model_selector, request: gr.Request):
75
- vote_last_response(state, "downvote", model_selector, request)
76
- return ("",) + (disable_btn,) * 3
77
-
78
-
79
- def flag_last_response(state, model_selector, request: gr.Request):
80
- vote_last_response(state, "flag", model_selector, request)
81
- return ("",) + (disable_btn,) * 3
82
-
83
- def regenerate(state, image_process_mode, seg_process_mode):
84
- state.messages[-1][-1] = None
85
- prev_human_msg = state.messages[-2]
86
- if type(prev_human_msg[1]) in (tuple, list):
87
- prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode, prev_human_msg[1][3], seg_process_mode, None, None)
88
- state.skip_next = False
89
- return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
90
-
91
-
92
- def clear_history(request: gr.Request):
93
- state = default_conversation.copy()
94
- return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
95
-
96
-
97
- def add_text(state, text, image, image_process_mode, seg, seg_process_mode, depth, depth_process_mode, request: gr.Request):
98
- logger.info(f"add_text. len: {len(text)}")
99
- if len(text) <= 0 and image is None:
100
- state.skip_next = True
101
- return (state, state.to_gradio_chatbot(), "", None, None) + (no_change_btn,) * 5
102
- if args.moderate:
103
- flagged = violates_moderation(text)
104
- if flagged:
105
- state.skip_next = True
106
- return (state, state.to_gradio_chatbot(), moderation_msg, None, None) + (
107
- no_change_btn,) * 5
108
-
109
- text = text[:1576] # Hard cut-off
110
- if image is not None:
111
- text = text[:1200] # Hard cut-off for images
112
- if '<image>' not in text:
113
- text = '<image>\n' + text
114
- if seg is not None:
115
- if '<seg>' not in text:
116
- text = '<seg>\n' + text
117
-
118
- text = (text, image, image_process_mode, seg, seg_process_mode, None, None)
119
- if len(state.get_images(return_pil=True)) > 0:
120
- state = default_conversation.copy()
121
- state.append_message(state.roles[0], text)
122
- state.append_message(state.roles[1], None)
123
- state.skip_next = False
124
- return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
125
-
126
-
127
- def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request: gr.Request):
128
- start_tstamp = time.time()
129
- model_name = model_selector
130
-
131
- if state.skip_next:
132
- # This generate call is skipped due to invalid inputs
133
- yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
134
- return
135
-
136
- if len(state.messages) == state.offset + 2:
137
- # First round of conversation
138
- if "llava" in model_name.lower():
139
- template_name = "llava_v1"
140
- new_state = conv_templates[template_name].copy()
141
- new_state.append_message(new_state.roles[0], state.messages[-2][1])
142
- new_state.append_message(new_state.roles[1], None)
143
- state = new_state
144
-
145
- # Construct prompt
146
- prompt = state.get_prompt()
147
-
148
- all_images = state.get_images(return_pil=True)
149
- all_image_hash = [hashlib.md5(image.tobytes()).hexdigest() for image in all_images]
150
- for image, hash in zip(all_images, all_image_hash):
151
- t = datetime.datetime.now()
152
- filename = os.path.join(LOGDIR, "serve_images", f"{t.year}-{t.month:02d}-{t.day:02d}", f"{hash}.jpg")
153
- if not os.path.isfile(filename):
154
- os.makedirs(os.path.dirname(filename), exist_ok=True)
155
- image.save(filename)
156
-
157
- all_segs = state.get_segs(return_pil=True)
158
- all_seg_hash = [hashlib.md5(seg.tobytes()).hexdigest() for seg in all_segs]
159
- for seg, hash in zip(all_segs, all_seg_hash):
160
- t = datetime.datetime.now()
161
- filename = os.path.join(LOGDIR, "serve_segs", f"{t.year}-{t.month:02d}-{t.day:02d}", f"{hash}.jpg")
162
- if not os.path.isfile(filename):
163
- os.makedirs(os.path.dirname(filename), exist_ok=True)
164
- seg.save(filename)
165
-
166
- # Make requests
167
- pload = {
168
- "model": model_name,
169
- "prompt": prompt,
170
- "temperature": float(temperature),
171
- "top_p": float(top_p),
172
- "max_new_tokens": min(int(max_new_tokens), 1536),
173
- "stop": state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2,
174
- "images": f'List of {len(state.get_images())} images: {all_image_hash}',
175
- "segs": f'List of {len(state.get_segs())} segs: {all_seg_hash}',
176
- }
177
- logger.info(f"==== request ====\n{pload}")
178
-
179
- pload['images'] = state.get_images()
180
- pload['segs'] = state.get_segs()
181
-
182
- state.messages[-1][-1] = "▌"
183
- yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
184
-
185
-
186
- try:
187
- # Stream output
188
- response = chat.generate_stream_gate(pload)
189
- for chunk in response:
190
- if chunk:
191
- data = json.loads(chunk.decode())
192
- if data["error_code"] == 0:
193
- output = data["text"][len(prompt):].strip()
194
- state.messages[-1][-1] = output + "▌"
195
- yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
196
  else:
197
- output = data["text"] + f" (error_code: {data['error_code']})"
198
- state.messages[-1][-1] = output
199
- yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
200
- return
201
- time.sleep(0.03)
202
- except Exception:
203
- gr.Warning(server_error_msg)
204
- state.messages[-1][-1] = server_error_msg
205
- yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
206
- return
207
-
208
- state.messages[-1][-1] = state.messages[-1][-1][:-1]
209
- yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
210
-
211
- finish_tstamp = time.time()
212
- logger.info(f"{output}")
213
-
214
- with open(get_conv_log_filename(), "a") as fout:
215
- data = {
216
- "tstamp": round(finish_tstamp, 4),
217
- "type": "chat",
218
- "model": model_name,
219
- "start": round(start_tstamp, 4),
220
- "finish": round(start_tstamp, 4),
221
- "state": state.dict(),
222
- "images": all_image_hash,
223
- "segs": all_seg_hash,
224
- "ip": request.client.host,
225
- }
226
- fout.write(json.dumps(data) + "\n")
227
-
228
-
229
- title = "<h1 style='margin-bottom: -10px; text-align: center'>VCoder: Versatile Vision Encoders for Multimodal Large Language Models</h1>"
230
- # style='
231
- description = "<p style='font-size: 16px; margin: 5px; font-weight: w300; text-align: center'> <a href='https://praeclarumjj3.github.io/' style='text-decoration:none' target='_blank'>Jitesh Jain, </a> <a href='https://jwyang.github.io/' style='text-decoration:none' target='_blank'>Jianwei Yang, <a href='https://www.humphreyshi.com/home' style='text-decoration:none' target='_blank'>Humphrey Shi</a></p>" \
232
- + "<p style='font-size: 16px; margin: 5px; font-weight: w600; text-align: center'> <a href='https://praeclarumjj3.github.io/vcoder/' target='_blank'>Project Page</a> | <a href='https://praeclarumjj3.github.io/vcoder/' target='_blank'>Video</a> | <a href='https://arxiv.org/abs/2211.06220' target='_blank'>ArXiv Paper</a> | <a href='https://github.com/SHI-Labs/VCoder' target='_blank'>Github Repo</a></p>" \
233
- + "<p style='text-align: center; font-size: 16px; margin: 5px; font-weight: w300;'> [Note: Please click on Regenerate button if you are unsatisfied with the generated response. You may find screenshots of our demo trials <a href='https://github.com/SHI-Labs/VCoder/blob/main/images/' style='text-decoration:none' target='_blank'>here</a>.]</p>" \
234
- + "<p style='text-align: center; font-size: 16px; margin: 5px; font-weight: w300;'> [Note: You can obtain segmentation maps for your image using the <a href='https://huggingface.co/spaces/shi-labs/OneFormer' style='text-decoration:none' target='_blank'>OneFormer Demo</a>. Please click on Regenerate button if you are unsatisfied with the generated response. You may find screenshots of our demo trials <a href='https://github.com/SHI-Labs/VCoder/blob/main/images/' style='text-decoration:none' target='_blank'>here</a>.]</p>"
235
-
236
- tos_markdown = ("""
237
- ### Terms of use
238
- By using this service, users are required to agree to the following terms:
239
- The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.
240
- """)
241
-
242
-
243
- learn_more_markdown = ("""
244
- ### License
245
- The service is a research preview intended for non-commercial use only, subject to the [License](https://huggingface.co/lmsys/vicuna-7b-v1.5) of Vicuna-v1.5, [License](https://github.com/haotian-liu/LLaVA/blob/main/LICENSE) of LLaVA, [Terms of Use](https://cocodataset.org/#termsofuse) of the COCO dataset, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
246
- """)
247
-
248
- block_css = """
249
-
250
- #buttons button {
251
- min-width: min(120px,100%);
252
- }
253
-
254
- """
255
-
256
- def build_demo(embed_mode):
257
-
258
- textbox = gr.Textbox(show_label=False, placeholder="Enter text and press ENTER", container=False)
259
- with gr.Blocks(title="LLaVA", theme=gr.themes.Default(), css=block_css) as demo:
260
- state = gr.State()
261
-
262
- if not embed_mode:
263
- gr.Markdown(title)
264
- gr.Markdown(description)
265
-
266
- with gr.Row():
267
- with gr.Column(scale=3):
268
- with gr.Row(elem_id="model_selector_row"):
269
- model_selector = gr.Dropdown(
270
- choices=models,
271
- value=models[0] if len(models) > 0 else "",
272
- interactive=True,
273
- show_label=False,
274
- container=False)
275
-
276
- # with gr.Row():
277
- imagebox = gr.Image(type="pil", label="Image Input")
278
- image_process_mode = gr.Radio(
279
- ["Crop", "Resize", "Pad", "Default"],
280
- value="Default",
281
- label="Preprocess for non-square image", visible=False)
282
-
283
- segbox = gr.Image(type="pil", label="Seg Map")
284
- seg_process_mode = gr.Radio(
285
- ["Crop", "Resize", "Pad", "Default"],
286
- value="Default",
287
- label="Preprocess for non-square Seg Map", visible=False)
288
-
289
- with gr.Accordion("Parameters", open=False) as parameter_row:
290
- temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.8, step=0.1, interactive=True, label="Temperature",)
291
- top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.9, step=0.1, interactive=True, label="Top P",)
292
- max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
293
-
294
- with gr.Column(scale=8):
295
- chatbot = gr.Chatbot(elem_id="chatbot", label="VCoder Chatbot", height=550)
296
- with gr.Row():
297
- with gr.Column(scale=8):
298
- textbox.render()
299
- with gr.Column(scale=1, min_width=50):
300
- submit_btn = gr.Button(value="Send", variant="primary")
301
- with gr.Row(elem_id="buttons") as button_row:
302
- upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
303
- downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
304
- flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
305
- #stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
306
- regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
307
- clear_btn = gr.Button(value="🗑️ Clear", interactive=False)
308
-
309
- cur_dir = os.path.dirname(os.path.abspath(__file__))
310
- gr.Examples(examples=[
311
- [f"{cur_dir}/examples/people.jpg", f"{cur_dir}/examples/people_pan.png", "What objects can be seen in the image?", "0.9", "1.0"],
312
- [f"{cur_dir}/examples/corgi.jpg", f"{cur_dir}/examples/corgi_pan.png", "What objects can be seen in the image?", "0.6", "0.7"],
313
- [f"{cur_dir}/examples/friends.jpg", f"{cur_dir}/examples/friends_pan.png", "Can you count the number of people in the image?", "0.8", "0.9"],
314
- [f"{cur_dir}/examples/friends.jpg", f"{cur_dir}/examples/friends_pan.png", "What is happening in the image?", "0.8", "0.9"],
315
- [f"{cur_dir}/examples/suits.jpg", f"{cur_dir}/examples/suits_pan.png", "What objects can be seen in the image?", "0.5", "0.5"],
316
- [f"{cur_dir}/examples/suits.jpg", f"{cur_dir}/examples/suits_ins.png", "What objects can be seen in the image?", "0.5", "0.5"],
317
- ], inputs=[imagebox, segbox, textbox, temperature, top_p])
318
-
319
- if not embed_mode:
320
- gr.Markdown(tos_markdown)
321
- gr.Markdown(learn_more_markdown)
322
-
323
- # Register listeners
324
- btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
325
- upvote_btn.click(upvote_last_response,
326
- [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
327
- downvote_btn.click(downvote_last_response,
328
- [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
329
- flag_btn.click(flag_last_response,
330
- [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
331
- regenerate_btn.click(regenerate, [state, image_process_mode, seg_process_mode],
332
- [state, chatbot, textbox, imagebox, segbox] + btn_list).then(
333
- http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
334
- [state, chatbot] + btn_list)
335
- clear_btn.click(clear_history, None, [state, chatbot, textbox, imagebox, segbox] + btn_list)
336
-
337
- textbox.submit(add_text, [state, textbox, imagebox, image_process_mode, segbox, seg_process_mode], [state, chatbot, textbox, imagebox, segbox] + btn_list
338
- ).then(http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
339
- [state, chatbot] + btn_list)
340
- submit_btn.click(add_text, [state, textbox, imagebox, image_process_mode, segbox, seg_process_mode], [state, chatbot, textbox, imagebox, segbox] + btn_list
341
- ).then(http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
342
- [state, chatbot] + btn_list)
343
-
344
- demo.load(load_demo_refresh_model_list, None, [state, model_selector])
345
-
346
- return demo
347
 
348
 
349
  if __name__ == "__main__":
350
  parser = argparse.ArgumentParser()
351
- parser.add_argument("--model-path", type=str, default="shi-labs/vcoder_ds_llava-v1.5-13b")
 
 
 
 
 
 
352
  parser.add_argument("--model-base", type=str, default=None)
353
  parser.add_argument("--model-name", type=str)
 
 
 
 
 
354
  parser.add_argument("--load-8bit", action="store_true")
355
  parser.add_argument("--load-4bit", action="store_true")
356
- parser.add_argument("--device", type=str, default="cuda")
357
- parser.add_argument("--share", action="store_true")
358
- parser.add_argument("--moderate", action="store_true")
359
- parser.add_argument("--embed", action="store_true")
360
- parser.add_argument("--concurrency-count", type=int, default=10)
361
- parser.add_argument("--host", type=str, default="0.0.0.0")
362
- parser.add_argument("--port", type=int)
363
  args = parser.parse_args()
364
- logger.info(f"args: {args}")
365
-
366
- if args.model_name is None:
367
- model_paths = args.model_path.split("/")
368
- if model_paths[-1].startswith('checkpoint-'):
369
- model_name = model_paths[-2] + "_" + model_paths[-1]
370
- else:
371
- model_name = model_paths[-1]
372
- else:
373
- model_name = args.model_name
374
-
375
- models = [model_name]
376
- chat = Chat(
377
- args.model_path,
378
- args.model_base,
379
- args.model_name,
380
- args.load_8bit,
381
- args.load_4bit,
382
- args.device,
383
- logger
384
- )
385
-
386
- logger.info(args)
387
- demo = build_demo(args.embed)
388
- demo.queue(
389
- concurrency_count=args.concurrency_count,
390
- api_open=False
391
- ).launch(
392
- server_name=args.host,
393
- server_port=args.port,
394
- share=args.share
395
- )
 
1
+ """
2
+ A model worker executes the model.
3
+ """
4
  import argparse
 
5
  import json
6
+ import torch
7
+
8
+ from vcoder_llava.utils import server_error_msg
9
+ from vcoder_llava.model.builder import load_pretrained_model
10
+ from vcoder_llava.mm_utils import process_images, load_image_from_base64, tokenizer_seg_token, tokenizer_depth_seg_token, tokenizer_image_token, KeywordsStoppingCriteria
11
+ from vcoder_llava.constants import (
12
+ IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN,
13
+ SEG_TOKEN_INDEX, DEFAULT_SEG_TOKEN,
14
+ DEPTH_TOKEN_INDEX, DEFAULT_DEPTH_TOKEN
15
+ )
16
+ from transformers import TextIteratorStreamer
17
+
18
+ class Chat:
19
+ def __init__(self, model_path, model_base, model_name,
20
+ load_8bit, load_4bit, device, logger):
21
+ if model_path.endswith("/"):
22
+ model_path = model_path[:-1]
23
+ if model_name is None:
24
+ model_paths = model_path.split("/")
25
+ if model_paths[-1].startswith('checkpoint-'):
26
+ self.model_name = model_paths[-2] + "_" + model_paths[-1]
27
+ else:
28
+ self.model_name = model_paths[-1]
29
+ else:
30
+ self.model_name = model_name
31
+
32
+ self.device = device
33
+ logger.info(f"Loading the model {self.model_name} ...")
34
+ self.tokenizer, self.model, self.image_processor, self.seg_image_processor, self.depth_image_processor, self.context_len = load_pretrained_model(
35
+ model_path, model_base, self.model_name, load_8bit, load_4bit, device=self.device)
36
+ self.is_multimodal = 'llava' in self.model_name.lower()
37
+ self.is_seg = "seg_llava" in self.model_name.lower()
38
+ self.is_depth = False
39
+
40
+ @torch.inference_mode()
41
+ def generate_stream(self, params):
42
+ tokenizer, model, image_processor, seg_image_processor, depth_image_processor = self.tokenizer, self.model, self.image_processor, self.seg_image_processor, self.depth_image_processor
43
+
44
+ prompt = params["prompt"]
45
+ ori_prompt = prompt
46
+ images = params.get("images", None)
47
+ segs = params.get("segs", None)
48
+ depths = params.get("depths", None)
49
+ num_image_tokens = 0
50
+ num_seg_tokens = 0
51
+ num_depth_tokens = 0
52
+ if images is not None and len(images) > 0 and self.is_multimodal:
53
+ if len(images) > 0:
54
+ if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
55
+ raise ValueError("Number of images does not match number of <image> tokens in prompt")
56
+
57
+ images = [load_image_from_base64(image) for image in images]
58
+ images = process_images(images, image_processor, model.config)
59
+
60
+ if type(images) is list:
61
+ images = [image.to(self.model.device, dtype=torch.float16) for image in images]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  else:
63
+ images = images.to(self.model.device, dtype=torch.float16)
64
+
65
+ replace_token = DEFAULT_IMAGE_TOKEN
66
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
67
+ num_image_tokens = prompt.count(replace_token) * model.get_vision_tower().num_patches
68
+
69
+ if segs is not None and len(segs) > 0 and self.is_seg:
70
+ if len(segs) != prompt.count(DEFAULT_SEG_TOKEN):
71
+ raise ValueError("Number of segs does not match number of <seg> tokens in prompt")
72
+
73
+ segs = [load_image_from_base64(seg) for seg in segs]
74
+ segs = process_images(segs, seg_image_processor, model.config)
75
+
76
+ if type(segs) is list:
77
+ segs = [seg.to(self.model.device, dtype=torch.float16) for seg in segs]
78
+ else:
79
+ segs = segs.to(self.model.device, dtype=torch.float16)
80
+
81
+ replace_seg_token = DEFAULT_SEG_TOKEN
82
+ prompt = prompt.replace(DEFAULT_SEG_TOKEN, replace_seg_token)
83
+ num_seg_tokens = prompt.count(replace_seg_token) * model.get_vision_tower().num_patches
84
+
85
+ if depths is not None and len(depths) > 0 and self.is_depth:
86
+ if len(depths) != prompt.count(DEFAULT_DEPTH_TOKEN):
87
+ raise ValueError("Number of depths does not match number of <depth> tokens in prompt")
88
+
89
+ depths = [load_image_from_base64(depth) for depth in depths]
90
+ depths = process_images(depths, depth_image_processor, model.config)
91
+
92
+ if type(depths) is list:
93
+ depths = [depth.to(self.model.device, dtype=torch.float16) for depth in depths]
94
+ else:
95
+ depths = depths.to(self.model.device, dtype=torch.float16)
96
+
97
+ replace_depth_token = DEFAULT_DEPTH_TOKEN
98
+ prompt = prompt.replace(DEFAULT_DEPTH_TOKEN, replace_depth_token)
99
+ num_depth_tokens = prompt.count(replace_depth_token) * model.get_vision_tower().num_patches
100
+ else:
101
+ depths = None
102
+ else:
103
+ segs = None
104
+ depths = None
105
+ else:
106
+ images = None
107
+ segs = None
108
+ depths = None
109
+ image_args = {"images": images, "segs": segs, "depths": depths}
110
+ else:
111
+ images = None
112
+ segs = None
113
+ depths = None
114
+ image_args = {}
115
+
116
+ temperature = float(params.get("temperature", 1.0))
117
+ top_p = float(params.get("top_p", 1.0))
118
+ max_context_length = getattr(model.config, 'max_position_embeddings', 2048)
119
+ max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
120
+ stop_str = params.get("stop", None)
121
+ do_sample = True if temperature > 0.001 else False
122
+
123
+ if self.is_seg:
124
+ if self.is_depth:
125
+ input_ids = tokenizer_depth_seg_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, DEPTH_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
126
+ else:
127
+ input_ids = tokenizer_seg_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
128
+ else:
129
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
130
+ keywords = [stop_str]
131
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
132
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15)
133
+
134
+ max_new_tokens = min(max_new_tokens, max_context_length - input_ids.shape[-1] - num_image_tokens - num_seg_tokens - num_depth_tokens)
135
+
136
+ if max_new_tokens < 1:
137
+ yield json.dumps({"text": ori_prompt + "Exceeds max token length. Please start a new conversation, thanks.", "error_code": 0}).encode() + b"\0"
138
+ return
139
+
140
+ generated_text = model.generate(
141
+ inputs=input_ids,
142
+ do_sample=do_sample,
143
+ temperature=temperature,
144
+ top_p=top_p,
145
+ max_new_tokens=max_new_tokens,
146
+ streamer=streamer,
147
+ stopping_criteria=[stopping_criteria],
148
+ use_cache=True,
149
+ **image_args
150
+ )
151
+ # thread.start()
152
+
153
+ generated_text = ori_prompt
154
+ for new_text in streamer:
155
+ generated_text += new_text
156
+ if generated_text.endswith(stop_str):
157
+ generated_text = generated_text[:-len(stop_str)]
158
+ yield json.dumps({"text": generated_text, "error_code": 0}).encode()
159
+
160
+ def generate_stream_gate(self, params):
161
+ try:
162
+ for x in self.generate_stream(params):
163
+ yield x
164
+ except ValueError as e:
165
+ print("Caught ValueError:", e)
166
+ ret = {
167
+ "text": server_error_msg,
168
+ "error_code": 1,
169
+ }
170
+ yield json.dumps(ret).encode()
171
+ except torch.cuda.CudaError as e:
172
+ print("Caught torch.cuda.CudaError:", e)
173
+ ret = {
174
+ "text": server_error_msg,
175
+ "error_code": 1,
176
+ }
177
+ yield json.dumps(ret).encode()
178
+ except Exception as e:
179
+ print("Caught Unknown Error", e)
180
+ ret = {
181
+ "text": server_error_msg,
182
+ "error_code": 1,
183
+ }
184
+ yield json.dumps(ret).encode()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
 
187
  if __name__ == "__main__":
188
  parser = argparse.ArgumentParser()
189
+ parser.add_argument("--host", type=str, default="localhost")
190
+ parser.add_argument("--port", type=int, default=21002)
191
+ parser.add_argument("--worker-address", type=str,
192
+ default="http://localhost:21002")
193
+ parser.add_argument("--controller-address", type=str,
194
+ default="http://localhost:21001")
195
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
196
  parser.add_argument("--model-base", type=str, default=None)
197
  parser.add_argument("--model-name", type=str)
198
+ parser.add_argument("--device", type=str, default="cuda")
199
+ parser.add_argument("--multi-modal", action="store_true", help="Multimodal mode is automatically detected with model name, please make sure `llava` is included in the model path.")
200
+ parser.add_argument("--limit-model-concurrency", type=int, default=5)
201
+ parser.add_argument("--stream-interval", type=int, default=1)
202
+ parser.add_argument("--no-register", action="store_true")
203
  parser.add_argument("--load-8bit", action="store_true")
204
  parser.add_argument("--load-4bit", action="store_true")
 
 
 
 
 
 
 
205
  args = parser.parse_args()