aiqtech commited on
Commit
a997f34
ยท
verified ยท
1 Parent(s): ae0aef7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -3
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
2
  from PIL import Image
3
  import requests
@@ -7,10 +8,25 @@ import gradio as gr
7
  from gradio import FileData
8
  import time
9
  import spaces
 
 
 
 
 
 
10
  ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
11
- model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
- torch_dtype=torch.bfloat16).to("cuda")
13
- processor = AutoProcessor.from_pretrained(ckpt)
 
 
 
 
 
 
 
 
 
14
 
15
 
16
  @spaces.GPU
 
1
+ import os
2
  from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
3
  from PIL import Image
4
  import requests
 
8
  from gradio import FileData
9
  import time
10
  import spaces
11
+
12
+ # Hugging Face ํ† ํฐ์„ ํ™˜๊ฒฝ ๋ณ€์ˆ˜์—์„œ ๊ฐ€์ ธ์˜ด
13
+ hf_token = os.getenv("HF_TOKEN")
14
+ if not hf_token:
15
+ raise ValueError("HF_TOKEN environment variable not found")
16
+
17
  ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
18
+ # ํ† ํฐ์„ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ๊ณผ ํ”„๋กœ์„ธ์„œ ๋กœ๋“œ
19
+ model = MllamaForConditionalGeneration.from_pretrained(
20
+ ckpt,
21
+ torch_dtype=torch.bfloat16,
22
+ token=hf_token # ํ† ํฐ ์ถ”๊ฐ€
23
+ ).to("cuda")
24
+
25
+ processor = AutoProcessor.from_pretrained(
26
+ ckpt,
27
+ token=hf_token # ํ† ํฐ ์ถ”๊ฐ€
28
+ )
29
+
30
 
31
 
32
  @spaces.GPU