File size: 2,675 Bytes
d2410ba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import os
import cv2
import shutil
import numpy as np
import torch
def build_fold(path):
if os.path.exists(path):
return True
# shutil.rmtree(path)
# return True
os.makedirs(path)
return False
def visualize_frame_with_mask(grid0, grid1, grid2, a0_mask, a1_mask, a2_mask, point_coords, resolution, name, args=None):
os.makedirs(name + 'frames_seg', exist_ok=True)
a0_dir, a1_dir, a2_dir = name + 'frames_seg/x', name + 'frames_seg/y', name + 'frames_seg/z'
# build_fold(a1_dir)
# build_fold(a2_dir)
a0_mask, a1_mask, a2_mask = a0_mask.repeat(1, 3, 1, 1), a1_mask.repeat(1, 3, 1, 1), a2_mask.repeat(1, 3, 1, 1)
a0_mask[:, 1:], a1_mask[:, 1:], a2_mask[:, 1:] = a0_mask[:, 1:] * 0, a1_mask[:, 1:] * 0, a2_mask[:, 1:] * 0
grid0, grid1, grid2 = grid0 * 0.7 + a0_mask * 0.3, grid1 * 0.7 + a1_mask * 0.3, grid2 * 0.7 + a2_mask * 0.3
grid0[point_coords[0], :, point_coords[1], point_coords[2]] = torch.Tensor([0., 1., 0.])
grid1[point_coords[1], :, point_coords[0], point_coords[2]] = torch.Tensor([0., 1., 0.])
grid2[point_coords[2], :, point_coords[0], point_coords[1]] = torch.Tensor([0., 1., 0.])
if not build_fold(a0_dir):
visualize_per_frame(grid0, a0_dir, resolution, args)
if not build_fold(a1_dir):
visualize_per_frame(grid1, a1_dir, resolution, args)
if not build_fold(a2_dir):
visualize_per_frame(grid2, a2_dir, resolution, args)
def visualize_per_frame(grid, foldpath, resolution, args=None):
grid = torch.nn.functional.interpolate(grid, size=(resolution, resolution), mode=args.mode)
# gridb = torch.nn.functional.interpolate(grid, size=(256, 256), mode='nearest')
# grid = grida * 0.8 + gridb * 0.2
imgs = grid.cpu().numpy()
#print(imgs[0, :, 0:3, 0:3])
n, _, _, _ = grid.shape
for ii in range(n):
r = np.uint8(imgs[ii, 0, :, :]*255)
g = np.uint8(imgs[ii, 1, :, :]*255)
b = np.uint8(imgs[ii, 2, :, :]*255)
img = cv2.merge([b, g, r])
# bilateralFilter
# img = cv2.bilateralFilter(img, d=-1, sigmaColor=25, sigmaSpace=7)
# img = cv2.bilateralFilter(img, d=9, sigmaColor=50, sigmaSpace=16)
# img = cv2.GaussianBlur(img, (5, 5), 0)
# img = cv2.medianBlur(img, 5)
cv2.imwrite('{}/{}.png'.format(foldpath, ii), img)
return
def cal(input, points):
reference_point_3d = np.array(input)
distances = np.linalg.norm(points - reference_point_3d, axis=1)
closest_index = np.argmin(distances)
closest_point = points[closest_index]
return [closest_point[0], closest_point[1], closest_point[2]] |