Spaces:
Runtime error
Runtime error
yuanze1024
commited on
Commit
•
03e01a8
1
Parent(s):
f15a1cd
bugfix & remove redundent uni3d
Browse files- app.py +4 -4
- dockerfile +3 -3
- feature_extractors/uni3d_embedding_encoder.py +12 -11
app.py
CHANGED
@@ -6,8 +6,8 @@ import functools
|
|
6 |
from datasets import load_dataset
|
7 |
from feature_extractors.uni3d_embedding_encoder import Uni3dEmbeddingEncoder
|
8 |
|
9 |
-
|
10 |
-
|
11 |
|
12 |
MAX_BATCH_SIZE = 16
|
13 |
MAX_QUEUE_SIZE = 10
|
@@ -119,8 +119,8 @@ def launch():
|
|
119 |
|
120 |
demo.queue(max_size=10)
|
121 |
|
122 |
-
|
123 |
-
|
124 |
|
125 |
demo.launch(server_name='0.0.0.0')
|
126 |
|
|
|
6 |
from datasets import load_dataset
|
7 |
from feature_extractors.uni3d_embedding_encoder import Uni3dEmbeddingEncoder
|
8 |
|
9 |
+
os.environ['HTTP_PROXY'] = 'http://192.168.48.17:18000'
|
10 |
+
os.environ['HTTPS_PROXY'] = 'http://192.168.48.17:18000'
|
11 |
|
12 |
MAX_BATCH_SIZE = 16
|
13 |
MAX_QUEUE_SIZE = 10
|
|
|
119 |
|
120 |
demo.queue(max_size=10)
|
121 |
|
122 |
+
os.environ.pop('HTTP_PROXY')
|
123 |
+
os.environ.pop('HTTPS_PROXY')
|
124 |
|
125 |
demo.launch(server_name='0.0.0.0')
|
126 |
|
dockerfile
CHANGED
@@ -7,13 +7,13 @@ LABEL email="yuanze1024@gmail.com"
|
|
7 |
# Install webp support
|
8 |
RUN apt update && apt install libwebp-dev -y
|
9 |
|
10 |
-
RUN pip install -r requirements.txt
|
11 |
|
12 |
# note that you may need to modify the TORCH_CUDA_ARCH_LIST in the setup.py file
|
13 |
ENV TORCH_CUDA_ARCH_LIST="8.6"
|
14 |
|
15 |
# Install Pointnet2_PyTorch
|
16 |
RUN git clone https://github.com/erikwijmans/Pointnet2_PyTorch.git \
|
17 |
-
&&
|
18 |
&& cd Pointnet2_PyTorch/pointnet2_ops_lib \
|
19 |
-
&&
|
|
|
7 |
# Install webp support
|
8 |
RUN apt update && apt install libwebp-dev -y
|
9 |
|
10 |
+
RUN pip install -r requirements.txt
|
11 |
|
12 |
# note that you may need to modify the TORCH_CUDA_ARCH_LIST in the setup.py file
|
13 |
ENV TORCH_CUDA_ARCH_LIST="8.6"
|
14 |
|
15 |
# Install Pointnet2_PyTorch
|
16 |
RUN git clone https://github.com/erikwijmans/Pointnet2_PyTorch.git \
|
17 |
+
&& cp -f change_setup.txt Pointnet2_PyTorch/pointnet2_ops_lib/setup.py \
|
18 |
&& cd Pointnet2_PyTorch/pointnet2_ops_lib \
|
19 |
+
&& pip install .
|
feature_extractors/uni3d_embedding_encoder.py
CHANGED
@@ -281,21 +281,21 @@ def create_uni3d(uni3d_path):
|
|
281 |
class Uni3dEmbeddingEncoder(FeatureExtractor):
|
282 |
def __init__(self, cache_dir, **kwargs) -> None:
|
283 |
bpe_path = "utils/bpe_simple_vocab_16e6.txt.gz"
|
284 |
-
uni3d_path = os.path.join(cache_dir, "Uni3D", "modelzoo", "uni3d-g", "model.pt") # concat the subfolder as hf_hub_download will put it here
|
285 |
clip_path = os.path.join(cache_dir, "Uni3D", "open_clip_pytorch_model.bin")
|
286 |
|
287 |
-
if not os.path.exists(uni3d_path):
|
288 |
-
|
289 |
-
|
290 |
if not os.path.exists(clip_path):
|
291 |
hf_hub_download("timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k", "open_clip_pytorch_model.bin",
|
292 |
cache_dir=cache_dir, local_dir=cache_dir + os.sep + "Uni3D")
|
293 |
|
294 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
295 |
self.tokenizer = SimpleTokenizer(bpe_path)
|
296 |
-
self.model = create_uni3d(uni3d_path)
|
297 |
-
self.model.eval()
|
298 |
-
self.model.to(self.device)
|
299 |
self.clip_model, _, self.preprocess = open_clip.create_model_and_transforms(model_name="EVA02-E-14-plus", pretrained=clip_path)
|
300 |
self.clip_model.to(self.device)
|
301 |
|
@@ -309,10 +309,11 @@ class Uni3dEmbeddingEncoder(FeatureExtractor):
|
|
309 |
|
310 |
@torch.no_grad()
|
311 |
def encode_3D(self, data):
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
|
|
316 |
|
317 |
@torch.no_grad()
|
318 |
def encode_text(self, input_text):
|
|
|
281 |
class Uni3dEmbeddingEncoder(FeatureExtractor):
|
282 |
def __init__(self, cache_dir, **kwargs) -> None:
|
283 |
bpe_path = "utils/bpe_simple_vocab_16e6.txt.gz"
|
284 |
+
# uni3d_path = os.path.join(cache_dir, "Uni3D", "modelzoo", "uni3d-g", "model.pt") # concat the subfolder as hf_hub_download will put it here
|
285 |
clip_path = os.path.join(cache_dir, "Uni3D", "open_clip_pytorch_model.bin")
|
286 |
|
287 |
+
# if not os.path.exists(uni3d_path):
|
288 |
+
# hf_hub_download("BAAI/Uni3D", "model.pt", subfolder="modelzoo/uni3d-g", cache_dir=cache_dir,
|
289 |
+
# local_dir=cache_dir + os.sep + "Uni3D")
|
290 |
if not os.path.exists(clip_path):
|
291 |
hf_hub_download("timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k", "open_clip_pytorch_model.bin",
|
292 |
cache_dir=cache_dir, local_dir=cache_dir + os.sep + "Uni3D")
|
293 |
|
294 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
295 |
self.tokenizer = SimpleTokenizer(bpe_path)
|
296 |
+
# self.model = create_uni3d(uni3d_path)
|
297 |
+
# self.model.eval()
|
298 |
+
# self.model.to(self.device)
|
299 |
self.clip_model, _, self.preprocess = open_clip.create_model_and_transforms(model_name="EVA02-E-14-plus", pretrained=clip_path)
|
300 |
self.clip_model.to(self.device)
|
301 |
|
|
|
309 |
|
310 |
@torch.no_grad()
|
311 |
def encode_3D(self, data):
|
312 |
+
pass
|
313 |
+
# pc = data.to(device=self.device, non_blocking=True)
|
314 |
+
# pc_features = self.model.encode_pc(pc)
|
315 |
+
# pc_features = pc_features / pc_features.norm(dim=-1, keepdim=True)
|
316 |
+
# return pc_features.float()
|
317 |
|
318 |
@torch.no_grad()
|
319 |
def encode_text(self, input_text):
|