Spaces:
Running
on
Zero
Running
on
Zero
MaziyarPanahi
commited on
Commit
•
cf67b8a
1
Parent(s):
bec5a84
Update app.py (#7)
Browse files- Update app.py (381ab5c96ab6b4588fba45cab0769fe9d544c736)
app.py
CHANGED
@@ -11,6 +11,12 @@ import torch
|
|
11 |
import spaces
|
12 |
import requests
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
model_id = "xtuner/llava-llama-3-8b-v1_1-transformers"
|
15 |
|
16 |
processor = AutoProcessor.from_pretrained(model_id)
|
@@ -43,7 +49,7 @@ def bot_streaming(message, history):
|
|
43 |
inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)
|
44 |
|
45 |
streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True})
|
46 |
-
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=
|
47 |
generated_text = ""
|
48 |
|
49 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
@@ -62,7 +68,7 @@ def bot_streaming(message, history):
|
|
62 |
yield generated_text_without_prompt
|
63 |
|
64 |
|
65 |
-
demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA Llama-3-8B", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
|
66 |
{"text": "How to make this pastry?", "files":["./baklava.png"]}],
|
67 |
description="Try [LLaVA Llama-3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
|
68 |
stop_btn="Stop Generation", multimodal=True)
|
|
|
11 |
import spaces
|
12 |
import requests
|
13 |
|
14 |
+
CSS ="""
|
15 |
+
.contain { display: flex; flex-direction: column; }
|
16 |
+
#component-0 { height: 100%; }
|
17 |
+
#chatbot { flex-grow: 1; }
|
18 |
+
"""
|
19 |
+
|
20 |
model_id = "xtuner/llava-llama-3-8b-v1_1-transformers"
|
21 |
|
22 |
processor = AutoProcessor.from_pretrained(model_id)
|
|
|
49 |
inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)
|
50 |
|
51 |
streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True})
|
52 |
+
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
53 |
generated_text = ""
|
54 |
|
55 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
|
|
68 |
yield generated_text_without_prompt
|
69 |
|
70 |
|
71 |
+
demo = gr.ChatInterface(fn=bot_streaming, css=CSS, title="LLaVA Llama-3-8B", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
|
72 |
{"text": "How to make this pastry?", "files":["./baklava.png"]}],
|
73 |
description="Try [LLaVA Llama-3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
|
74 |
stop_btn="Stop Generation", multimodal=True)
|