Spaces:
Running
Running
move model to huggingface
Browse files- README.md +5 -6
- app.py +8 -5
- inference.py +0 -1
README.md
CHANGED
@@ -19,7 +19,7 @@ Please refer there for more information about the proect and implementation.
|
|
19 |
|
20 |
## Installation
|
21 |
|
22 |
-
|
23 |
|
24 |
For the docker build, you will just need docker in order to build and run the container, else you will need
|
25 |
|
@@ -30,18 +30,17 @@ For the docker build, you will just need docker in order to build and run the co
|
|
30 |
|
31 |
A full list of other packages can be found in the Dockerfile, or in `Open3D/util/install_deps_ubuntu.sh`.
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
work will make this step no longer required.
|
36 |
|
37 |
-
### Docker (preferred)
|
38 |
|
39 |
To build the docker container, run
|
40 |
```
|
41 |
docker build -f Dockerfile -t opdmulti-demo .
|
42 |
```
|
43 |
|
44 |
-
### Local
|
45 |
|
46 |
To setup the environment, run the following (recommended in a virtual environment):
|
47 |
```
|
|
|
19 |
|
20 |
## Installation
|
21 |
|
22 |
+
### Requirements
|
23 |
|
24 |
For the docker build, you will just need docker in order to build and run the container, else you will need
|
25 |
|
|
|
30 |
|
31 |
A full list of other packages can be found in the Dockerfile, or in `Open3D/util/install_deps_ubuntu.sh`.
|
32 |
|
33 |
+
The model file can currently be found [here](https://huggingface.co/3dlg-hcvc/opdmulti-motion-state-rgb-model) and is
|
34 |
+
downloaded as part of the demo code.
|
|
|
35 |
|
36 |
+
### Docker Build (preferred)
|
37 |
|
38 |
To build the docker container, run
|
39 |
```
|
40 |
docker build -f Dockerfile -t opdmulti-demo .
|
41 |
```
|
42 |
|
43 |
+
### Local Build
|
44 |
|
45 |
To setup the environment, run the following (recommended in a virtual environment):
|
46 |
```
|
app.py
CHANGED
@@ -3,11 +3,12 @@ import re
|
|
3 |
import shutil
|
4 |
import time
|
5 |
from types import SimpleNamespace
|
6 |
-
from typing import Any, Callable, Generator
|
7 |
|
8 |
import gradio as gr
|
9 |
import numpy as np
|
10 |
from detectron2 import engine
|
|
|
11 |
from natsort import natsorted
|
12 |
from PIL import Image
|
13 |
|
@@ -20,7 +21,7 @@ SCORE_THRESHOLD = 0.8
|
|
20 |
MAX_PARTS = 5 # TODO: we can replace this by having a slider and a single image visualization component rather than multiple components
|
21 |
ARGS = SimpleNamespace(
|
22 |
config_file="configs/coco/instance-segmentation/swin/opd_v1_real.yaml",
|
23 |
-
model="
|
24 |
input_format="RGB",
|
25 |
output=".output",
|
26 |
cpu=True,
|
@@ -87,6 +88,8 @@ def predict(rgb_image: str, depth_image: str, intrinsic: np.ndarray, num_samples
|
|
87 |
return [None] * 5
|
88 |
|
89 |
# run model
|
|
|
|
|
90 |
cfg = setup_cfg(ARGS)
|
91 |
engine.launch(
|
92 |
main,
|
@@ -117,7 +120,7 @@ def predict(rgb_image: str, depth_image: str, intrinsic: np.ndarray, num_samples
|
|
117 |
|
118 |
|
119 |
def get_trigger(
|
120 |
-
idx: int, fps: int =
|
121 |
) -> Callable[[str], Generator[Image.Image, None, None]]:
|
122 |
"""
|
123 |
Return event listener trigger function for image component to animate image sequence.
|
@@ -260,8 +263,8 @@ with gr.Blocks() as demo:
|
|
260 |
image_comp.select(get_trigger(idx), inputs=rgb_image, outputs=image_comp, api_name=False)
|
261 |
|
262 |
# if user changes input, clear output images
|
263 |
-
rgb_image.change(clear_outputs, inputs=
|
264 |
-
depth_image.change(clear_outputs, inputs=
|
265 |
|
266 |
submit_btn.click(
|
267 |
fn=predict, inputs=[rgb_image, depth_image, intrinsic, num_samples], outputs=images, api_name=False
|
|
|
3 |
import shutil
|
4 |
import time
|
5 |
from types import SimpleNamespace
|
6 |
+
from typing import Any, Callable, Generator
|
7 |
|
8 |
import gradio as gr
|
9 |
import numpy as np
|
10 |
from detectron2 import engine
|
11 |
+
from huggingface_hub import hf_hub_download
|
12 |
from natsort import natsorted
|
13 |
from PIL import Image
|
14 |
|
|
|
21 |
MAX_PARTS = 5 # TODO: we can replace this by having a slider and a single image visualization component rather than multiple components
|
22 |
ARGS = SimpleNamespace(
|
23 |
config_file="configs/coco/instance-segmentation/swin/opd_v1_real.yaml",
|
24 |
+
model={"repo_id": "3dlg-hcvc/opdmulti-motion-state-rgb-model", "filename": "pytorch_model.pth"},
|
25 |
input_format="RGB",
|
26 |
output=".output",
|
27 |
cpu=True,
|
|
|
88 |
return [None] * 5
|
89 |
|
90 |
# run model
|
91 |
+
weights_path = hf_hub_download(repo_id=ARGS.model["repo_id"], filename=ARGS.model["filename"])
|
92 |
+
ARGS.model = weights_path
|
93 |
cfg = setup_cfg(ARGS)
|
94 |
engine.launch(
|
95 |
main,
|
|
|
120 |
|
121 |
|
122 |
def get_trigger(
|
123 |
+
idx: int, fps: int = 15, oscillate: bool = True
|
124 |
) -> Callable[[str], Generator[Image.Image, None, None]]:
|
125 |
"""
|
126 |
Return event listener trigger function for image component to animate image sequence.
|
|
|
263 |
image_comp.select(get_trigger(idx), inputs=rgb_image, outputs=image_comp, api_name=False)
|
264 |
|
265 |
# if user changes input, clear output images
|
266 |
+
rgb_image.change(clear_outputs, inputs=[], outputs=images, api_name=False)
|
267 |
+
depth_image.change(clear_outputs, inputs=[], outputs=images, api_name=False)
|
268 |
|
269 |
submit_btn.click(
|
270 |
fn=predict, inputs=[rgb_image, depth_image, intrinsic, num_samples], outputs=images, api_name=False
|
inference.py
CHANGED
@@ -44,7 +44,6 @@ from visualization import (
|
|
44 |
generate_rotation_visualization,
|
45 |
generate_translation_visualization,
|
46 |
batch_trim,
|
47 |
-
create_gif,
|
48 |
)
|
49 |
|
50 |
# import based on torch version. Required for model loading. Code is taken from fvcore.common.checkpoint, in order to
|
|
|
44 |
generate_rotation_visualization,
|
45 |
generate_translation_visualization,
|
46 |
batch_trim,
|
|
|
47 |
)
|
48 |
|
49 |
# import based on torch version. Required for model loading. Code is taken from fvcore.common.checkpoint, in order to
|