File size: 6,094 Bytes
02e54ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
"""
For examples:

>>> python release/visualize_2d.py \
        --seq_dir synbody_v1_0/20230113/Downtown/LS_0114_004551_088/ \
        --body_model_path {path_to_body_models} \
        --save_path vis/LS_0114_004551_088.mp4
"""

from pathlib import Path

import cv2
import numpy as np
import pyrender
import smplx
import torch
import tqdm
import trimesh
from pyrender.viewer import DirectionalLight, Node

# some constants
num_betas = 10
num_pca_comps = 45
flat_hand_mean = False

w = 1280
h = 720
fx = fy = max(w, h) / 2


def load_data(seq_dir):
    seq_dir = Path(seq_dir)
    # load images
    frame_paths = sorted(seq_dir.glob('rgb/*.jpeg'))
    images = [cv2.imread(p) for p in frame_paths]

    # load parameters
    person_paths = sorted(seq_dir.glob('smplx/*.npz'))
    persons = {}
    for p in person_paths:
        person_id = p.stem
        person = dict(np.load(p, allow_pickle=True))
        for annot in person.keys():
            if isinstance(person[annot], np.ndarray) and person[annot].ndim == 0:
                person[annot] = person[annot].item()
        persons[person_id] = person

    return images, persons


def compute_camera_pose(camera_pose):
    # Convert OpenCV cam pose to OpenGL cam pose

    # x,-y,-z -> x,y,z
    R_convention = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
    camera_pose = R_convention @ camera_pose

    return camera_pose


def create_raymond_lights():
    # set directional light at axis origin, with -z direction align with +z direction of camera/world frame
    matrix = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
    return [Node(light=DirectionalLight(color=np.ones(3), intensity=2.0), matrix=matrix)]


def draw_overlay(img, camera, camera_pose, meshes):
    scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3))

    for i, mesh in enumerate(meshes):
        scene.add(mesh, f'mesh_{i}')

    # Defination of cam_pose: transformation from cam coord to world coord
    scene.add(camera, pose=camera_pose)

    light_nodes = create_raymond_lights()
    for node in light_nodes:
        scene.add_node(node)

    r = pyrender.OffscreenRenderer(viewport_width=w, viewport_height=h, point_size=1)
    color, _ = r.render(scene, flags=pyrender.RenderFlags.RGBA)
    color = color.astype(np.float32) / 255.0

    valid_mask = color > 0
    img = img / 255
    output_img = color * valid_mask + (1 - valid_mask) * img
    img = (output_img * 255).astype(np.uint8)

    return img


def draw_bboxes(img, bboxes):
    for person_id, bbox in bboxes.items():
        x, y, w, h = bbox
        x, y, w, h = int(x), int(y), int(w), int(h)
        img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
        img = cv2.putText(img, person_id, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

    return img


def visualize_2d(seq_dir, body_model_path, save_path):
    # Set device
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

    # Initialize body model
    body_model = smplx.create(
        body_model_path,
        model_type='smplx',
        flat_hand_mean=flat_hand_mean,
        use_face_contour=True,
        use_pca=True,
        num_betas=num_betas,
        num_pca_comps=num_pca_comps,
    ).to(device)

    # Initialize components for rendering
    camera = pyrender.camera.IntrinsicsCamera(fx=fx, fy=fy, cx=w / 2, cy=h / 2)
    camera_pose = compute_camera_pose(np.eye(4))  # visualize in camera coord
    material = pyrender.MetallicRoughnessMaterial(
        metallicFactor=0.0, alphaMode='OPAQUE', baseColorFactor=(1.0, 1.0, 0.9, 1.0)
    )

    # Load data
    images, persons = load_data(seq_dir)

    # Draw overlay
    save_images = []
    for frame_idx, image in enumerate(tqdm.tqdm(images)):
        # Prepare meshes to visualize
        meshes = []
        for person in persons.values():
            person = person['smplx']
            model_output = body_model(
                global_orient=torch.tensor(person['global_orient'][[frame_idx]], device=device),
                body_pose=torch.tensor(person['body_pose'][[frame_idx]], device=device),
                transl=torch.tensor(person['transl'][[frame_idx]], device=device),
                betas=torch.tensor(person['betas'][[frame_idx]], device=device),
                left_hand_pose=torch.tensor(person['left_hand_pose'][[frame_idx]], device=device),
                right_hand_pose=torch.tensor(person['right_hand_pose'][[frame_idx]], device=device),
                return_verts=True,
            )
            vertices = model_output.vertices.detach().cpu().numpy().squeeze()
            faces = body_model.faces

            out_mesh = trimesh.Trimesh(vertices, faces, process=False)
            mesh = pyrender.Mesh.from_trimesh(out_mesh, material=material)
            meshes.append(mesh)

        image = draw_overlay(image, camera, camera_pose, meshes)

        # Visualize bounding boxes
        # bboxes = {person_id: person['keypoints2d'][frame_idx] for person_id, person in persons.items()}
        # image = draw_bboxes(image, bboxes)

        save_images.append(image)

    # Save visualization video
    Path(save_path).parent.mkdir(parents=True, exist_ok=True)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    video = cv2.VideoWriter(save_path, fourcc, fps=15, frameSize=(w, h))
    for image in save_images:
        video.write(image)
    video.release()

    print(f'Visualization video saved at {save_path}')


if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('--seq_dir', type=str, required=True, help='directory containing the sequence data.')
    parser.add_argument(
        '--body_model_path', type=str, required=True, help='directory in which SMPL body models are stored.'
    )
    parser.add_argument('--save_path', type=str, required=True, help='path to save the visualization video.')
    args = parser.parse_args()

    visualize_2d(args.seq_dir, args.body_model_path, args.save_path)