load models
Browse files- Dockerfile +0 -6
- startup.sh +13 -2
Dockerfile
CHANGED
@@ -27,10 +27,4 @@ RUN cd vlm-evaluation && pip install -e .
|
|
27 |
|
28 |
RUN pip install -e .
|
29 |
|
30 |
-
|
31 |
-
RUN echo "HF_HOME (before setting it)= $HF_HOME "
|
32 |
-
|
33 |
-
ENV HF_HOME=/data/.huggingface
|
34 |
-
RUN mkdir /logs
|
35 |
-
|
36 |
CMD ["sh", "/code/startup.sh"]
|
|
|
27 |
|
28 |
RUN pip install -e .
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
CMD ["sh", "/code/startup.sh"]
|
startup.sh
CHANGED
@@ -1,5 +1,16 @@
|
|
1 |
#!/bin/sh
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
python -m serve.controller --host 0.0.0.0 --port 10000 &
|
|
|
|
|
3 |
python -m serve.gradio_web_server --controller http://localhost:10000 --model-list-mode reload --share &
|
4 |
-
|
5 |
-
|
|
|
|
1 |
#!/bin/sh
|
2 |
+
echo $HF_TOKEN > .hf_token
|
3 |
+
python -m interactive_demo --port 40000 --model_id prism-dinosiglip+7b
|
4 |
+
python -m interactive_demo --port 40001 --model_family llava-v15 --model_id llava-v1.5-7b --model_dir liuhaotian/llava-v1.5-7b
|
5 |
+
|
6 |
+
|
7 |
+
python -m serve.gradio_web_server --controller http://localhost:10000 --model-list-mode reload --share
|
8 |
+
|
9 |
+
#!/bin/bash
|
10 |
python -m serve.controller --host 0.0.0.0 --port 10000 &
|
11 |
+
P1=$!
|
12 |
+
|
13 |
python -m serve.gradio_web_server --controller http://localhost:10000 --model-list-mode reload --share &
|
14 |
+
P2=$!
|
15 |
+
|
16 |
+
wait $P1 $P2
|