caizhongang commited on
Commit
02e54ed
1 Parent(s): 7b80d9b

add annotations for SMPL-X_subset

Browse files
SMPL-X_subset/Synbody_v1_0_sampled-smpl.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f596de7243082720be6d0cceec616e566d2199382f7589b4a55c7f56b0deb77
3
+ size 891643079
SMPL-X_subset/Synbody_v1_0_sampled-smplx.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e02e372c35a5be9a4491177f570ffc9f4af40b82147ab01a6baeb1e9136e22d7
3
+ size 2622572405
SMPL-X_subset/visualize_2d.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ For examples:
3
+
4
+ >>> python release/visualize_2d.py \
5
+ --seq_dir synbody_v1_0/20230113/Downtown/LS_0114_004551_088/ \
6
+ --body_model_path {path_to_body_models} \
7
+ --save_path vis/LS_0114_004551_088.mp4
8
+ """
9
+
10
+ from pathlib import Path
11
+
12
+ import cv2
13
+ import numpy as np
14
+ import pyrender
15
+ import smplx
16
+ import torch
17
+ import tqdm
18
+ import trimesh
19
+ from pyrender.viewer import DirectionalLight, Node
20
+
21
+ # some constants
22
+ num_betas = 10
23
+ num_pca_comps = 45
24
+ flat_hand_mean = False
25
+
26
+ w = 1280
27
+ h = 720
28
+ fx = fy = max(w, h) / 2
29
+
30
+
31
+ def load_data(seq_dir):
32
+ seq_dir = Path(seq_dir)
33
+ # load images
34
+ frame_paths = sorted(seq_dir.glob('rgb/*.jpeg'))
35
+ images = [cv2.imread(p) for p in frame_paths]
36
+
37
+ # load parameters
38
+ person_paths = sorted(seq_dir.glob('smplx/*.npz'))
39
+ persons = {}
40
+ for p in person_paths:
41
+ person_id = p.stem
42
+ person = dict(np.load(p, allow_pickle=True))
43
+ for annot in person.keys():
44
+ if isinstance(person[annot], np.ndarray) and person[annot].ndim == 0:
45
+ person[annot] = person[annot].item()
46
+ persons[person_id] = person
47
+
48
+ return images, persons
49
+
50
+
51
+ def compute_camera_pose(camera_pose):
52
+ # Convert OpenCV cam pose to OpenGL cam pose
53
+
54
+ # x,-y,-z -> x,y,z
55
+ R_convention = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
56
+ camera_pose = R_convention @ camera_pose
57
+
58
+ return camera_pose
59
+
60
+
61
+ def create_raymond_lights():
62
+ # set directional light at axis origin, with -z direction align with +z direction of camera/world frame
63
+ matrix = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
64
+ return [Node(light=DirectionalLight(color=np.ones(3), intensity=2.0), matrix=matrix)]
65
+
66
+
67
+ def draw_overlay(img, camera, camera_pose, meshes):
68
+ scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3))
69
+
70
+ for i, mesh in enumerate(meshes):
71
+ scene.add(mesh, f'mesh_{i}')
72
+
73
+ # Defination of cam_pose: transformation from cam coord to world coord
74
+ scene.add(camera, pose=camera_pose)
75
+
76
+ light_nodes = create_raymond_lights()
77
+ for node in light_nodes:
78
+ scene.add_node(node)
79
+
80
+ r = pyrender.OffscreenRenderer(viewport_width=w, viewport_height=h, point_size=1)
81
+ color, _ = r.render(scene, flags=pyrender.RenderFlags.RGBA)
82
+ color = color.astype(np.float32) / 255.0
83
+
84
+ valid_mask = color > 0
85
+ img = img / 255
86
+ output_img = color * valid_mask + (1 - valid_mask) * img
87
+ img = (output_img * 255).astype(np.uint8)
88
+
89
+ return img
90
+
91
+
92
+ def draw_bboxes(img, bboxes):
93
+ for person_id, bbox in bboxes.items():
94
+ x, y, w, h = bbox
95
+ x, y, w, h = int(x), int(y), int(w), int(h)
96
+ img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
97
+ img = cv2.putText(img, person_id, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
98
+
99
+ return img
100
+
101
+
102
+ def visualize_2d(seq_dir, body_model_path, save_path):
103
+ # Set device
104
+ device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
105
+
106
+ # Initialize body model
107
+ body_model = smplx.create(
108
+ body_model_path,
109
+ model_type='smplx',
110
+ flat_hand_mean=flat_hand_mean,
111
+ use_face_contour=True,
112
+ use_pca=True,
113
+ num_betas=num_betas,
114
+ num_pca_comps=num_pca_comps,
115
+ ).to(device)
116
+
117
+ # Initialize components for rendering
118
+ camera = pyrender.camera.IntrinsicsCamera(fx=fx, fy=fy, cx=w / 2, cy=h / 2)
119
+ camera_pose = compute_camera_pose(np.eye(4)) # visualize in camera coord
120
+ material = pyrender.MetallicRoughnessMaterial(
121
+ metallicFactor=0.0, alphaMode='OPAQUE', baseColorFactor=(1.0, 1.0, 0.9, 1.0)
122
+ )
123
+
124
+ # Load data
125
+ images, persons = load_data(seq_dir)
126
+
127
+ # Draw overlay
128
+ save_images = []
129
+ for frame_idx, image in enumerate(tqdm.tqdm(images)):
130
+ # Prepare meshes to visualize
131
+ meshes = []
132
+ for person in persons.values():
133
+ person = person['smplx']
134
+ model_output = body_model(
135
+ global_orient=torch.tensor(person['global_orient'][[frame_idx]], device=device),
136
+ body_pose=torch.tensor(person['body_pose'][[frame_idx]], device=device),
137
+ transl=torch.tensor(person['transl'][[frame_idx]], device=device),
138
+ betas=torch.tensor(person['betas'][[frame_idx]], device=device),
139
+ left_hand_pose=torch.tensor(person['left_hand_pose'][[frame_idx]], device=device),
140
+ right_hand_pose=torch.tensor(person['right_hand_pose'][[frame_idx]], device=device),
141
+ return_verts=True,
142
+ )
143
+ vertices = model_output.vertices.detach().cpu().numpy().squeeze()
144
+ faces = body_model.faces
145
+
146
+ out_mesh = trimesh.Trimesh(vertices, faces, process=False)
147
+ mesh = pyrender.Mesh.from_trimesh(out_mesh, material=material)
148
+ meshes.append(mesh)
149
+
150
+ image = draw_overlay(image, camera, camera_pose, meshes)
151
+
152
+ # Visualize bounding boxes
153
+ # bboxes = {person_id: person['keypoints2d'][frame_idx] for person_id, person in persons.items()}
154
+ # image = draw_bboxes(image, bboxes)
155
+
156
+ save_images.append(image)
157
+
158
+ # Save visualization video
159
+ Path(save_path).parent.mkdir(parents=True, exist_ok=True)
160
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
161
+ video = cv2.VideoWriter(save_path, fourcc, fps=15, frameSize=(w, h))
162
+ for image in save_images:
163
+ video.write(image)
164
+ video.release()
165
+
166
+ print(f'Visualization video saved at {save_path}')
167
+
168
+
169
+ if __name__ == '__main__':
170
+ import argparse
171
+
172
+ parser = argparse.ArgumentParser()
173
+ parser.add_argument('--seq_dir', type=str, required=True, help='directory containing the sequence data.')
174
+ parser.add_argument(
175
+ '--body_model_path', type=str, required=True, help='directory in which SMPL body models are stored.'
176
+ )
177
+ parser.add_argument('--save_path', type=str, required=True, help='path to save the visualization video.')
178
+ args = parser.parse_args()
179
+
180
+ visualize_2d(args.seq_dir, args.body_model_path, args.save_path)