ZiyuG commited on
Commit
fc930ba
1 Parent(s): 86e6120

Update demo_utils.py

Browse files
Files changed (1) hide show
  1. demo_utils.py +7 -31
demo_utils.py CHANGED
@@ -11,21 +11,12 @@ import sam2point.dataset as dataset
11
  import sam2point.configs as configs
12
  from sam2point.voxelizer import Voxelizer
13
  from sam2point.utils import cal
14
-
15
  import matplotlib.pyplot as plt
16
  import plotly.graph_objects as go
17
 
18
  print("Torch CUDA:", torch.cuda.is_available())
19
- # use bfloat16 for the entire notebook
20
  torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__()
21
 
22
-
23
- # if torch.cuda.get_device_properties(0).major >= 8:
24
- # # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)
25
- # torch.backends.cuda.matmul.allow_tf32 = True
26
- # torch.backends.cudnn.allow_tf32 = True
27
-
28
-
29
  def run_demo(dataset_name, prompt_type, sample_idx, prompt_idx, voxel_size, theta, mode, ret_prompt):
30
  parser = argparse.ArgumentParser()
31
  parser.add_argument('--dataset', choices=['S3DIS', 'ScanNet', 'Objaverse', 'KITTI', 'Semantic3D'], default='Objaverse', help='dataset selected')
@@ -33,19 +24,18 @@ def run_demo(dataset_name, prompt_type, sample_idx, prompt_idx, voxel_size, thet
33
  parser.add_argument('--sample_idx', type=int, default=2, help='the index of the scene or object')
34
  parser.add_argument('--prompt_idx', type=int, default=0, help='the index of the prompt')
35
  parser.add_argument('--voxel_size', type=float, default=0.02, help='voxel size')
36
- parser.add_argument('--theta', type=float, default=0.5) # indoor NOTE
37
- parser.add_argument('--mode', type=str, default='bilinear') # indoor NOTE
38
  parser.add_argument("--ret_prompt", action="store_true")
39
  args = parser.parse_args()
40
  args.dataset, args.prompt_type, args.sample_idx, args.prompt_idx = dataset_name, prompt_type, sample_idx, prompt_idx
41
  args.voxel_size, args.theta, args.mode, args.ret_prompt = voxel_size, theta, mode, ret_prompt
42
  print(args)
43
 
44
- #cache
45
  name_list = [args.dataset, "sample" + str(args.sample_idx), args.prompt_type + "-prompt" + str(args.prompt_idx)]
46
  name = '_'.join(name_list)
47
 
48
- # hf
49
  repo_id = "ZiyuG/Cache"
50
  result_name = "cache_results/" + name + '.npy'
51
  prompt_name = "cache_prompt/" + name + '.npy'
@@ -64,7 +54,7 @@ def run_demo(dataset_name, prompt_type, sample_idx, prompt_idx, voxel_size, thet
64
  PROMPT = np.load("./cache_prompt/" + name + '.npy')
65
  if not args.ret_prompt: return new_color, PROMPT
66
  else: return PROMPT
67
- #########
68
  if args.dataset == 'S3DIS':
69
  info = configs.S3DIS_samples[args.sample_idx]
70
  # early return
@@ -99,14 +89,12 @@ def run_demo(dataset_name, prompt_type, sample_idx, prompt_idx, voxel_size, thet
99
  point, color = dataset.load_Semantic3D_sample(info['path'], args.sample_idx)
100
  args.voxel_size = info[configs.VOXEL[args.prompt_type]][args.prompt_idx]
101
 
102
-
103
  point_color = np.concatenate([point, color], axis=1)
104
  voxelizer = Voxelizer(voxel_size=args.voxel_size, clip_bound=None)
105
 
106
  labels_in = point[:, :1].astype(int)
107
  locs, feats, labels, inds_reconstruct = voxelizer.voxelize(point, color, labels_in)
108
 
109
-
110
  if args.prompt_type == 'point':
111
  if args.ret_prompt: return list(np.array(info['point_prompts'])[args.prompt_idx])
112
  mask = seg_point(locs, feats, info['point_prompts'], args)
@@ -145,9 +133,8 @@ def run_demo(dataset_name, prompt_type, sample_idx, prompt_idx, voxel_size, thet
145
 
146
  name_list = [args.dataset, "sample" + str(args.sample_idx), args.prompt_type + "-prompt" + str(args.prompt_idx)]
147
  name = '_'.join(name_list) + 'frames'
148
- # os.system('rm -rf ' + name)
149
 
150
- #cache
151
  name_list = [args.dataset, "sample" + str(args.sample_idx), args.prompt_type + "-prompt" + str(args.prompt_idx)]
152
  name = '_'.join(name_list)
153
  os.makedirs("cache_results", exist_ok=True)
@@ -156,9 +143,6 @@ def run_demo(dataset_name, prompt_type, sample_idx, prompt_idx, voxel_size, thet
156
  np.save("./cache_prompt/" + name + '.npy', PROMPT)
157
  return new_color, PROMPT
158
 
159
-
160
-
161
-
162
  def create_box(prompt):
163
  x_min, y_min, z_min, x_max, y_max, z_max = tuple(prompt)
164
  bbox_points = np.array([
@@ -171,15 +155,11 @@ def create_box(prompt):
171
  [x_max, y_max, z_max],
172
  [x_min, y_max, z_max]
173
  ])
174
-
175
-
176
  edges = [
177
  (0, 1), (1, 2), (2, 3), (3, 0), # Bottom face
178
  (4, 5), (5, 6), (6, 7), (7, 4), # Top face
179
  (0, 4), (1, 5), (2, 6), (3, 7) # Vertical edges
180
  ]
181
-
182
-
183
  bbox_lines = []
184
  f = 1
185
  for start, end in edges:
@@ -188,13 +168,9 @@ def create_box(prompt):
188
  y=[bbox_points[start, 1], bbox_points[end, 1]],
189
  z=[bbox_points[start, 2], bbox_points[end, 2]],
190
  mode='lines',
191
- # line=dict(color='red', width=2), # Customize color and width
192
- # line=dict(color='rgb(255, 140, 0)', width=4), # Customize color and width
193
- line=dict(color='rgb(220, 20, 60)', width=6), # Customize color and width
194
  name="Box Prompt" if f == 1 else "",
195
  showlegend=True if f == 1 else False
196
  ))
197
  f = 0
198
- return bbox_lines
199
-
200
-
 
11
  import sam2point.configs as configs
12
  from sam2point.voxelizer import Voxelizer
13
  from sam2point.utils import cal
 
14
  import matplotlib.pyplot as plt
15
  import plotly.graph_objects as go
16
 
17
  print("Torch CUDA:", torch.cuda.is_available())
 
18
  torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__()
19
 
 
 
 
 
 
 
 
20
  def run_demo(dataset_name, prompt_type, sample_idx, prompt_idx, voxel_size, theta, mode, ret_prompt):
21
  parser = argparse.ArgumentParser()
22
  parser.add_argument('--dataset', choices=['S3DIS', 'ScanNet', 'Objaverse', 'KITTI', 'Semantic3D'], default='Objaverse', help='dataset selected')
 
24
  parser.add_argument('--sample_idx', type=int, default=2, help='the index of the scene or object')
25
  parser.add_argument('--prompt_idx', type=int, default=0, help='the index of the prompt')
26
  parser.add_argument('--voxel_size', type=float, default=0.02, help='voxel size')
27
+ parser.add_argument('--theta', type=float, default=0.5)
28
+ parser.add_argument('--mode', type=str, default='bilinear')
29
  parser.add_argument("--ret_prompt", action="store_true")
30
  args = parser.parse_args()
31
  args.dataset, args.prompt_type, args.sample_idx, args.prompt_idx = dataset_name, prompt_type, sample_idx, prompt_idx
32
  args.voxel_size, args.theta, args.mode, args.ret_prompt = voxel_size, theta, mode, ret_prompt
33
  print(args)
34
 
 
35
  name_list = [args.dataset, "sample" + str(args.sample_idx), args.prompt_type + "-prompt" + str(args.prompt_idx)]
36
  name = '_'.join(name_list)
37
 
38
+ # use cache result for speeding up
39
  repo_id = "ZiyuG/Cache"
40
  result_name = "cache_results/" + name + '.npy'
41
  prompt_name = "cache_prompt/" + name + '.npy'
 
54
  PROMPT = np.load("./cache_prompt/" + name + '.npy')
55
  if not args.ret_prompt: return new_color, PROMPT
56
  else: return PROMPT
57
+
58
  if args.dataset == 'S3DIS':
59
  info = configs.S3DIS_samples[args.sample_idx]
60
  # early return
 
89
  point, color = dataset.load_Semantic3D_sample(info['path'], args.sample_idx)
90
  args.voxel_size = info[configs.VOXEL[args.prompt_type]][args.prompt_idx]
91
 
 
92
  point_color = np.concatenate([point, color], axis=1)
93
  voxelizer = Voxelizer(voxel_size=args.voxel_size, clip_bound=None)
94
 
95
  labels_in = point[:, :1].astype(int)
96
  locs, feats, labels, inds_reconstruct = voxelizer.voxelize(point, color, labels_in)
97
 
 
98
  if args.prompt_type == 'point':
99
  if args.ret_prompt: return list(np.array(info['point_prompts'])[args.prompt_idx])
100
  mask = seg_point(locs, feats, info['point_prompts'], args)
 
133
 
134
  name_list = [args.dataset, "sample" + str(args.sample_idx), args.prompt_type + "-prompt" + str(args.prompt_idx)]
135
  name = '_'.join(name_list) + 'frames'
 
136
 
137
+ #cache for speeding up
138
  name_list = [args.dataset, "sample" + str(args.sample_idx), args.prompt_type + "-prompt" + str(args.prompt_idx)]
139
  name = '_'.join(name_list)
140
  os.makedirs("cache_results", exist_ok=True)
 
143
  np.save("./cache_prompt/" + name + '.npy', PROMPT)
144
  return new_color, PROMPT
145
 
 
 
 
146
  def create_box(prompt):
147
  x_min, y_min, z_min, x_max, y_max, z_max = tuple(prompt)
148
  bbox_points = np.array([
 
155
  [x_max, y_max, z_max],
156
  [x_min, y_max, z_max]
157
  ])
 
 
158
  edges = [
159
  (0, 1), (1, 2), (2, 3), (3, 0), # Bottom face
160
  (4, 5), (5, 6), (6, 7), (7, 4), # Top face
161
  (0, 4), (1, 5), (2, 6), (3, 7) # Vertical edges
162
  ]
 
 
163
  bbox_lines = []
164
  f = 1
165
  for start, end in edges:
 
168
  y=[bbox_points[start, 1], bbox_points[end, 1]],
169
  z=[bbox_points[start, 2], bbox_points[end, 2]],
170
  mode='lines',
171
+ line=dict(color='rgb(220, 20, 60)', width=6),
 
 
172
  name="Box Prompt" if f == 1 else "",
173
  showlegend=True if f == 1 else False
174
  ))
175
  f = 0
176
+ return bbox_lines