Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -8,20 +8,13 @@ from PIL import Image
|
|
8 |
from transformers import AutoProcessor, AutoTokenizer,LlavaForConditionalGeneration
|
9 |
from transformers import TextIteratorStreamer
|
10 |
import spaces
|
11 |
-
|
12 |
app = Flask(__name__)
|
13 |
|
14 |
-
|
15 |
-
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
16 |
-
<img src="https://cdn-uploads.huggingface.co/production/uploads/64ccdc322e592905f922a06e/DDIW0kbWmdOQWwy4XMhwX.png" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
|
17 |
-
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">LLaVA-Llama-3-8B</h1>
|
18 |
-
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Llava-Llama-3-8b is a LLaVA model fine-tuned from Meta-Llama-3-8B-Instruct and CLIP-ViT-Large-patch14-336 with ShareGPT4V-PT and InternVL-SFT by XTuner</p>
|
19 |
-
</div>
|
20 |
-
"""
|
21 |
|
22 |
model_id = "xtuner/llava-llama-3-8b-v1_1-transformers"
|
23 |
processor = AutoProcessor.from_pretrained(model_id)
|
24 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id, legacy=False)
|
25 |
|
26 |
model = LlavaForConditionalGeneration.from_pretrained(
|
27 |
model_id,
|
|
|
8 |
from transformers import AutoProcessor, AutoTokenizer,LlavaForConditionalGeneration
|
9 |
from transformers import TextIteratorStreamer
|
10 |
import spaces
|
11 |
+
import gradio as gr
|
12 |
app = Flask(__name__)
|
13 |
|
14 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
model_id = "xtuner/llava-llama-3-8b-v1_1-transformers"
|
17 |
processor = AutoProcessor.from_pretrained(model_id)
|
|
|
18 |
|
19 |
model = LlavaForConditionalGeneration.from_pretrained(
|
20 |
model_id,
|