|
#!/bin/bash |
|
|
|
export HF_HOME="/data/.huggingface" |
|
|
|
echo "PWD: $(pwd)" |
|
echo $HF_TOKEN > .hf_token |
|
echo "LS: $(ls -als)" |
|
|
|
while true; do nvidia-smi; sleep 600; done & |
|
|
|
python3 -c "import torch; \ |
|
print(f\"is availeble = {torch.cuda.is_available()}\"); \ |
|
print(f\"device count = {torch.cuda.device_count()}\"); \ |
|
print(f\"current device = {torch.cuda.current_device()}\")" |
|
|
|
|
|
COMPONENT="serve.controller" |
|
echo "Starting $COMPONENT" |
|
nohup python3 -m serve.controller --host 0.0.0.0 --port 10000 2>&1 | tee serve.controller.log & |
|
while [ `grep -c "Uvicorn running on" serve.controller.log` -eq '0' ];do |
|
sleep 1s; |
|
echo "wait $COMPONENT to be running" |
|
done |
|
echo "$COMPONENT is running" |
|
|
|
|
|
COMPONENT="interactive_demo prism-dinosiglip+13b" |
|
echo "Starting $COMPONENT" |
|
nohup python3 -m interactive_demo --port 40000 --model_id prism-dinosiglip+13b | tee prism-dinosiglip_13b.log & |
|
while [ `grep -c "loaded prismatic prism-dinosiglip+13b" prism-dinosiglip_13b.log` -eq '0' ];do |
|
sleep 10s; |
|
echo "wait $COMPONENT to be running" |
|
done |
|
echo "$COMPONENT is running" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ls -als $HF_HOME |
|
tree --du -h $HF_HOME |
|
|
|
echo "Starting serve.gradio_web_server" |
|
python3 -m serve.gradio_web_server --controller http://127.0.0.1:10000 --model-list-mode reload |