Spaces:
Paused
Paused
add vllm
Browse files
app.py
CHANGED
@@ -38,11 +38,22 @@ with open(f'{model_path}/params.json', 'r') as f:
|
|
38 |
with open(f'{model_path}/tekken.json', 'r') as f:
|
39 |
tokenizer_config = json.load(f)
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
def encode_image(image: Image.Image, image_format="PNG") -> str:
|
48 |
im_file = BytesIO()
|
|
|
38 |
with open(f'{model_path}/tekken.json', 'r') as f:
|
39 |
tokenizer_config = json.load(f)
|
40 |
|
41 |
+
@spaces.GPU()
|
42 |
+
def initialize_llm():
|
43 |
+
try:
|
44 |
+
llm = LLM(
|
45 |
+
model=repo_id,
|
46 |
+
tokenizer_mode="mistral",
|
47 |
+
max_model_len=65536,
|
48 |
+
max_num_batched_tokens=max_img_per_msg * max_tokens_per_img,
|
49 |
+
limit_mm_per_prompt={"image": max_img_per_msg}
|
50 |
+
)
|
51 |
+
return llm
|
52 |
+
except Exception as e:
|
53 |
+
print("LLM initialization failed:", e)
|
54 |
+
return None
|
55 |
+
|
56 |
+
llm = initialize_llm()
|
57 |
|
58 |
def encode_image(image: Image.Image, image_format="PNG") -> str:
|
59 |
im_file = BytesIO()
|