diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..72e3f146a5c6bfbdee30b40903cebb1b4e79ad5e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,20 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +h36m_detailed/16/metric_full_original_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/16/metric_original_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/16/metric_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/16/metric_train.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/16/sample_original_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/32/metrics_original_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/32/samples_original_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/64/metric_full_original_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/64/metric_original_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/64/metric_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/64/metric_train.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/64/sample_original_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/8/metric_full_original_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/8/metric_original_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/8/metric_test.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/8/metric_train.xlsx filter=lfs diff=lfs merge=lfs -text +h36m_detailed/8/sample_original_test.xlsx filter=lfs diff=lfs merge=lfs -text diff --git a/amass_h36m_models/CISTGCN_M16_AMASS.tar b/amass_h36m_models/CISTGCN_M16_AMASS.tar new file mode 100644 index 0000000000000000000000000000000000000000..6ae6d00737dd2c0a8b45ce6482432093366b53e6 --- /dev/null +++ b/amass_h36m_models/CISTGCN_M16_AMASS.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f56dbfb8a5f34a0984c6de9f1ee795b94862c8c3c20aac38c0956204503c47a +size 4354193 diff --git a/amass_h36m_models/CISTGCN_M16_H36M.tar b/amass_h36m_models/CISTGCN_M16_H36M.tar new file mode 100644 index 0000000000000000000000000000000000000000..b2e8b60992dc957556217c72ab9f54cd00ee2d18 --- /dev/null +++ b/amass_h36m_models/CISTGCN_M16_H36M.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9d99e17e43f39c998b141adf81c261a8b1eb78084feed8bf9de722a51111aef +size 5935307 diff --git a/amass_h36m_models/CISTGCN_M32_AMASS.tar b/amass_h36m_models/CISTGCN_M32_AMASS.tar new file mode 100644 index 0000000000000000000000000000000000000000..2391595209e9f63c119e0410e832980b35ca9e81 --- /dev/null +++ b/amass_h36m_models/CISTGCN_M32_AMASS.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:228b649b126a634ed070c7a60bcbeb37504ab5f2b4948acd2556edcf342aac1f +size 6327249 diff --git a/amass_h36m_models/CISTGCN_M32_H36M.tar b/amass_h36m_models/CISTGCN_M32_H36M.tar new file mode 100644 index 0000000000000000000000000000000000000000..dd4725cfa8a34ff986bfc351c00ca3df70d9ad4a --- /dev/null +++ b/amass_h36m_models/CISTGCN_M32_H36M.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d1d356c1b73f1bc6d0d056e643f345a4727373779fe8e9aabbd23b58c3ca343 +size 8133899 diff --git a/amass_h36m_models/CISTGCN_M64_H36M.tar b/amass_h36m_models/CISTGCN_M64_H36M.tar new file mode 100644 index 0000000000000000000000000000000000000000..cb22a66d82f2110983abc41a33a4bc0cc2d09f27 --- /dev/null +++ b/amass_h36m_models/CISTGCN_M64_H36M.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb41d06736803c4e7b0aa66e36820440d5125b072739610481d1c06c23cedb5a +size 16582347 diff --git a/amass_h36m_models/CISTGCN_M8_H36M.tar b/amass_h36m_models/CISTGCN_M8_H36M.tar new file mode 100644 index 0000000000000000000000000000000000000000..d90b13999f9ad96742653eb879b398243a627f44 --- /dev/null +++ b/amass_h36m_models/CISTGCN_M8_H36M.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47b28248ab629ce18f5908f0c39c1d4700d12c5539f64828ffe4b73ee9c3c5af +size 5339339 diff --git a/amass_h36m_models/CISTGCN_best.pth.tar b/amass_h36m_models/CISTGCN_best.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..dd4725cfa8a34ff986bfc351c00ca3df70d9ad4a --- /dev/null +++ b/amass_h36m_models/CISTGCN_best.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d1d356c1b73f1bc6d0d056e643f345a4727373779fe8e9aabbd23b58c3ca343 +size 8133899 diff --git a/amass_h36m_models/short-CISTGCN-400ms-16-best.pth.tar b/amass_h36m_models/short-CISTGCN-400ms-16-best.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..6376618bfcdeb7c25c8aa0a9bb7c50bc1599b557 --- /dev/null +++ b/amass_h36m_models/short-CISTGCN-400ms-16-best.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c161bc7186d800db0d372133d13ac4bdf01ca89ca7d165e22386890088e64e6 +size 3827665 diff --git a/amass_h36m_models/short-CISTGCN-400ms-32-best.pth.tar b/amass_h36m_models/short-CISTGCN-400ms-32-best.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..e66437e510bc5b29af69867ea9b63988363caa63 --- /dev/null +++ b/amass_h36m_models/short-CISTGCN-400ms-32-best.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:565aa3f07715a52021a481065af53bf6b6f2e438a1fb8ea1cc5ea3ed0ccbd715 +size 6026705 diff --git a/h36m_detailed/16/files/CISTGCN-benchmark-best.pth.tar b/h36m_detailed/16/files/CISTGCN-benchmark-best.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..b2e8b60992dc957556217c72ab9f54cd00ee2d18 --- /dev/null +++ b/h36m_detailed/16/files/CISTGCN-benchmark-best.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9d99e17e43f39c998b141adf81c261a8b1eb78084feed8bf9de722a51111aef +size 5935307 diff --git a/h36m_detailed/16/files/CISTGCN-benchmark-last.pth.tar b/h36m_detailed/16/files/CISTGCN-benchmark-last.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..d9f902f47a4a856c3b9a1bce20073d48db481a46 --- /dev/null +++ b/h36m_detailed/16/files/CISTGCN-benchmark-last.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66ed4eb9b213f8b042fff19f85c1b34f7530176d65c52da4e21c96d24692f13f +size 5929419 diff --git a/h36m_detailed/16/files/config-20221118_0919-id0862.yaml b/h36m_detailed/16/files/config-20221118_0919-id0862.yaml new file mode 100644 index 0000000000000000000000000000000000000000..927783d8faed9cb70096d2efd78d3bf4002f21e7 --- /dev/null +++ b/h36m_detailed/16/files/config-20221118_0919-id0862.yaml @@ -0,0 +1,105 @@ +architecture_config: + model: MlpMixer_ext + model_params: + input_n: 10 + joints: 22 + output_n: 25 + n_txcnn_layers: 4 + txc_kernel_size: 3 + reduction: 8 + hidden_dim: 64 + input_gcn: + model_complexity: + - 16 + - 16 + - 16 + - 16 + interpretable: + - true + - true + - true + - true + - true + output_gcn: + model_complexity: + - 3 + interpretable: + - true + clipping: 15 +learning_config: + WarmUp: 100 + normalize: false + dropout: 0.1 + weight_decay: 1e-4 + epochs: 50 + lr: 0.01 +# max_norm: 3 + scheduler: + type: StepLR + params: + step_size: 3000 + gamma: 0.8 + loss: + weights: "" + type: "mpjpe" + augmentations: + random_scale: + x: + - 0.95 + - 1.05 + y: + - 0.90 + - 1.10 + z: + - 0.95 + - 1.05 + random_noise: "" + random_flip: + x: true + y: "" + z: true + random_rotation: + x: + - -5 + - 5 + y: + - -180 + - 180 + z: + - -5 + - 5 + random_translation: + x: + - -0.10 + - 0.10 + y: + - -0.10 + - 0.10 + z: + - -0.10 + - 0.10 +environment_config: + actions: all + evaluate_from: 0 + is_norm: true + job: 16 + sample_rate: 2 + return_all_joints: true + save_grads: false + test_batch: 128 + train_batch: 128 +general_config: + data_dir: /ai-research/datasets/attention/ann_h3.6m/ + experiment_name: STSGCN-tests + load_model_path: '' + log_path: /ai-research/notebooks/testing_repos/logdir/ + model_name_rel_path: STSGCN-benchmark + save_all_intermediate_models: false + save_models: true + tensorboard: + num_mesh: 4 +meta_config: + comment: Testing a new architecture based on STSGCN paper. + project: Attention + task: 3d keypoint prediction + version: 0.1.1 diff --git a/h36m_detailed/16/files/model.py b/h36m_detailed/16/files/model.py new file mode 100644 index 0000000000000000000000000000000000000000..810b55431f52d972af0b3897abf5d1b9e0e4c602 --- /dev/null +++ b/h36m_detailed/16/files/model.py @@ -0,0 +1,597 @@ +import math + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from ..layers import deformable_conv, SE + +torch.manual_seed(0) + + +# This is the simple CNN layer,that performs a 2-D convolution while maintaining the dimensions of the input(except for the features dimension) +class CNN_layer(nn.Module): + def __init__(self, + in_ch, + out_ch, + kernel_size, + dropout, + bias=True): + super(CNN_layer, self).__init__() + self.kernel_size = kernel_size + padding = ( + (kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) # padding so that both dimensions are maintained + assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1 + + self.block1 = [nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=padding, dilation=(1, 1)), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ] + + self.block1 = nn.Sequential(*self.block1) + + def forward(self, x): + output = self.block1(x) + return output + + +class FPN(nn.Module): + def __init__(self, in_ch, + out_ch, + kernel, # (3,1) + dropout, + reduction, + ): + super(FPN, self).__init__() + kernel_size = kernel if isinstance(kernel, (tuple, list)) else (kernel, kernel) + padding = ((kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) + pad1 = (padding[0], padding[1]) + pad2 = (padding[0] + pad1[0], padding[1] + pad1[1]) + pad3 = (padding[0] + pad2[0], padding[1] + pad2[1]) + dil1 = (1, 1) + dil2 = (1 + pad1[0], 1 + pad1[1]) + dil3 = (1 + pad2[0], 1 + pad2[1]) + self.block1 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad1, dilation=dil1), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block2 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad2, dilation=dil2), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block3 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad3, dilation=dil3), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.pooling = nn.AdaptiveAvgPool2d((1, 1)) # Action Context. + self.compress = nn.Conv2d(out_ch * 3 + in_ch, + out_ch, + kernel_size=(1, 1)) # PRELU is outside the loop, check at the end of the code. + + def forward(self, x): + b, dim, joints, seq = x.shape + global_action = F.interpolate(self.pooling(x), (joints, seq)) + out = torch.cat((self.block1(x), self.block2(x), self.block3(x), global_action), dim=1) + out = self.compress(out) + return out + + +def mish(x): + return (x * torch.tanh(F.softplus(x))) + + +class ConvTemporalGraphical(nn.Module): + # Source : https://github.com/yysijie/st-gcn/blob/master/net/st_gcn.py + r"""The basic module for applying a graph convolution. + Args: + Shape: + - Input: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Output: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + """ + + def __init__(self, time_dim, joints_dim, domain, interpratable): + super(ConvTemporalGraphical, self).__init__() + + if domain == "time": + # learnable, graph-agnostic 3-d adjacency matrix(or edge importance matrix) + size = joints_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(time_dim, size, size)) + self.domain = 'nctv,tvw->nctw' + else: + self.domain = 'nctv,ntvw->nctw' + elif domain == "space": + size = time_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(joints_dim, size, size)) + self.domain = 'nctv,vtq->ncqv' + else: + self.domain = 'nctv,nvtq->ncqv' + if not interpratable: + stdv = 1. / math.sqrt(self.A.size(1)) + self.A.data.uniform_(-stdv, stdv) + + def forward(self, x): + x = torch.einsum(self.domain, (x, self.A)) + return x.contiguous() + + +class Map2Adj(nn.Module): + def __init__(self, + in_ch, + time_dim, + joints_dim, + domain, + dropout, + ): + super(Map2Adj, self).__init__() + self.domain = domain + inter_ch = in_ch // 2 + self.time_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(time_dim, 1), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, time_dim, kernel_size=1, bias=False), + ) + self.joint_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(1, joints_dim), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, joints_dim, kernel_size=1, bias=False), + ) + + if self.domain == "space": + ch = joints_dim + self.perm1 = (0, 1, 2, 3) + self.perm2 = (0, 3, 2, 1) + if self.domain == "time": + ch = time_dim + self.perm1 = (0, 2, 1, 3) + self.perm2 = (0, 1, 2, 3) + + inter_ch = ch # // 2 + self.expansor = nn.Sequential(nn.Conv2d(ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(inter_ch, ch, kernel_size=1, bias=False), + ) + self.time_compress.apply(self._init_weights) + self.joint_compress.apply(self._init_weights) + self.expansor.apply(self._init_weights) + + def _init_weights(self, m, gain=0.05): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + if isinstance(m, (nn.Conv2d, nn.Conv1d)): + torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, dims, seq, joints = x.shape + dim_seq = self.time_compress(x) + dim_space = self.joint_compress(x) + o = torch.matmul(dim_space.permute(self.perm1), dim_seq.permute(self.perm2)) + Adj = self.expansor(o) + return Adj + + +class Domain_GCNN_layer(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + kernel_size, + stride, + time_dim, + joints_dim, + domain, + interpratable, + dropout, + bias=True): + + super(Domain_GCNN_layer, self).__init__() + self.kernel_size = kernel_size + assert self.kernel_size[0] % 2 == 1 + assert self.kernel_size[1] % 2 == 1 + padding = ((self.kernel_size[0] - 1) // 2, (self.kernel_size[1] - 1) // 2) + self.interpratable = interpratable + self.domain = domain + + self.gcn = ConvTemporalGraphical(time_dim, joints_dim, domain, interpratable) + self.tcn = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + (self.kernel_size[0], self.kernel_size[1]), + (stride, stride), + padding, + ), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ) + + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + if self.interpratable: + self.map_to_adj = Map2Adj(in_ch, + time_dim, + joints_dim, + domain, + dropout, + ) + else: + self.map_to_adj = nn.Identity() + self.prelu = nn.PReLU() + + def forward(self, x): + # assert A.shape[0] == self.kernel_size[1], print(A.shape[0],self.kernel_size) + res = self.residual(x) + self.Adj = self.map_to_adj(x) + if self.interpratable: + self.gcn.A = self.Adj + x1 = self.gcn(x) + x2 = self.tcn(x1) + x3 = x2 + res + x4 = self.prelu(x3) + return x4 + + +# Dynamic SpatioTemporal Decompose Graph Convolutions (DSTD-GC) +class DSTD_GC(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + : in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + interpratable, + kernel_size, + stride, + time_dim, + joints_dim, + reduction, + dropout): + super(DSTD_GC, self).__init__() + self.dsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "space", interpratable, dropout) + self.tsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "time", interpratable, dropout) + + self.compressor = nn.Sequential(nn.Conv2d(out_ch * 2, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch), + nn.PReLU(), + SE.SELayer2d(out_ch, reduction=reduction), + ) + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + + # Weighting features + out_ch_c = out_ch // 2 if out_ch // 2 > 1 else 1 + self.global_norm = nn.BatchNorm2d(in_ch) + self.conv_s = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.conv_t = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map_s = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.map_t = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.prelu1 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + self.prelu2 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + + def _get_stats_(self, x): + global_avg_pool = x.mean((3, 2)).mean(1, keepdims=True) + global_avg_pool_features = x.mean(3).mean(1) + global_std_pool = x.std((3, 2)).std(1, keepdims=True) + global_std_pool_features = x.std(3).std(1) + return torch.cat(( + global_avg_pool, + global_avg_pool_features, + global_std_pool, + global_std_pool_features, + ), + dim=1) + + def forward(self, x): + b, dim, seq, joints = x.shape # 64, 3, 10, 22 + xn = self.global_norm(x) + + stats = self._get_stats_(xn) + w1 = torch.cat((self.conv_s(xn).view(b, -1), stats), dim=1) + stats = self._get_stats_(xn) + w2 = torch.cat((self.conv_t(xn).view(b, -1), stats), dim=1) + self.w1 = self.map_s(w1) + self.w2 = self.map_t(w2) + w1 = self.w1[..., None, None] + w2 = self.w2[..., None, None] + + x1 = self.dsgn(xn) + x2 = self.tsgn(xn) + out = torch.cat((self.prelu1(w1 * x1), self.prelu2(w2 * x2)), dim=1) + out = self.compressor(out) + return torch.clip(out + self.residual(xn), -1e5, 1e5) + + +class ContextLayer(nn.Module): + def __init__(self, + in_ch, + hidden_ch, + output_seq, + input_seq, + joints, + dims=3, + reduction=8, + dropout=0.1, + ): + super(ContextLayer, self).__init__() + self.n_output = output_seq + self.n_joints = joints + self.n_input = input_seq + self.context_conv1 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + + self.context_conv2 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, (input_seq, 1), bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.context_conv3 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.map1 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map2 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map3 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fmap_s = nn.Sequential(nn.Linear(self.n_output * 3, self.n_joints, bias=False), + nn.BatchNorm1d(self.n_joints), + nn.Dropout(dropout, inplace=True), ) + + self.fmap_t = nn.Sequential(nn.Linear(self.n_output * 3, self.n_output, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), ) + + # inter_ch = self.n_joints # // 2 + self.norm_map = nn.Sequential(nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + SE.SELayer1d(self.n_output, reduction=reduction), + nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fconv = nn.Sequential(nn.Conv2d(1, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + nn.Conv2d(dims, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + ) + self.SE = SE.SELayer2d(self.n_output, reduction=reduction) + + def forward(self, x): + b, _, seq, joint_dim = x.shape + y1 = self.context_conv1(x).max(-1)[0].max(-1)[0] + y2 = self.context_conv2(x).view(b, -1, joint_dim).max(-1)[0] + ym = self.context_conv3(x).mean((2, 3)) + y = torch.cat((self.map1(y1), self.map2(y2), self.map3(ym)), dim=1) + self.joints = self.fmap_s(y) + self.displacements = self.fmap_t(y) # .cumsum(1) + self.seq_joints = torch.bmm(self.displacements.unsqueeze(2), self.joints.unsqueeze(1)) + self.seq_joints_n = self.norm_map(self.seq_joints) + self.seq_joints_dims = self.fconv(self.seq_joints_n.view(b, 1, self.n_output, self.n_joints)) + o = self.SE(self.seq_joints_dims.permute(0, 2, 3, 1)) + return o + + +class MlpMixer_ext(nn.Module): + """ + Shape: + - Input[0]: Input sequence in :math:`(N, in_ch,T_in, V)` format + - Output[0]: Output sequence in :math:`(N,T_out,in_ch, V)` format + where + :math:`N` is a batch size, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch=number of channels for the coordiantes(default=3) + + + """ + + def __init__(self, arch, learn): + super(MlpMixer_ext, self).__init__() + self.clipping = arch.model_params.clipping + + self.n_input = arch.model_params.input_n + self.n_output = arch.model_params.output_n + self.n_joints = arch.model_params.joints + self.n_txcnn_layers = arch.model_params.n_txcnn_layers + self.txc_kernel_size = [arch.model_params.txc_kernel_size] * 2 + self.input_gcn = arch.model_params.input_gcn + self.output_gcn = arch.model_params.output_gcn + self.reduction = arch.model_params.reduction + self.hidden_dim = arch.model_params.hidden_dim + + self.st_gcnns = nn.ModuleList() + self.txcnns = nn.ModuleList() + self.se = nn.ModuleList() + + self.in_conv = nn.ModuleList() + self.context_layer = nn.ModuleList() + self.trans = nn.ModuleList() + self.in_ch = 10 + self.model_tx = self.input_gcn.model_complexity.copy() + self.model_tx.insert(0, 1) # add 1 in the position 0. + + self.input_gcn.model_complexity.insert(0, self.in_ch) + self.input_gcn.model_complexity.append(self.in_ch) + # self.input_gcn.interpretable.insert(0, True) + # self.input_gcn.interpretable.append(False) + for i in range(len(self.input_gcn.model_complexity) - 1): + self.st_gcnns.append(DSTD_GC(self.input_gcn.model_complexity[i], + self.input_gcn.model_complexity[i + 1], + self.input_gcn.interpretable[i], + [1, 1], 1, self.n_input, self.n_joints, self.reduction, learn.dropout)) + + self.context_layer = ContextLayer(1, self.hidden_dim, + self.n_output, self.n_output, self.n_joints, + 3, self.reduction, learn.dropout + ) + + # at this point, we must permute the dimensions of the gcn network, from (N,C,T,V) into (N,T,C,V) + # with kernel_size[3,3] the dimensions of C,V will be maintained + self.txcnns.append(FPN(self.n_input, self.n_output, self.txc_kernel_size, 0., self.reduction)) + for i in range(1, self.n_txcnn_layers): + self.txcnns.append(FPN(self.n_output, self.n_output, self.txc_kernel_size, 0., self.reduction)) + + self.prelus = nn.ModuleList() + for j in range(self.n_txcnn_layers): + self.prelus.append(nn.PReLU()) + + self.dim_conversor = nn.Sequential(nn.Conv2d(self.in_ch, 3, 1, bias=False), + nn.BatchNorm2d(3), + nn.PReLU(), + nn.Conv2d(3, 3, 1, bias=False), + nn.PReLU(3), ) + + self.st_gcnns_o = nn.ModuleList() + self.output_gcn.model_complexity.insert(0, 3) + for i in range(len(self.output_gcn.model_complexity) - 1): + self.st_gcnns_o.append(DSTD_GC(self.output_gcn.model_complexity[i], + self.output_gcn.model_complexity[i + 1], + self.output_gcn.interpretable[i], + [1, 1], 1, self.n_joints, self.n_output, self.reduction, learn.dropout)) + + self.st_gcnns_o.apply(self._init_weights) + self.st_gcnns.apply(self._init_weights) + self.txcnns.apply(self._init_weights) + + def _init_weights(self, m, gain=0.1): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, seq, joints, dim = x.shape + vel = torch.zeros_like(x) + vel[:, :-1] = torch.diff(x, dim=1) + vel[:, -1] = x[:, -1] + acc = torch.zeros_like(x) + acc[:, :-1] = torch.diff(vel, dim=1) + acc[:, -1] = vel[:, -1] + x1 = torch.cat((x, acc, vel, torch.norm(vel, dim=-1, keepdim=True)), dim=-1) + x2 = x1.permute((0, 3, 1, 2)) # (torch.Size([64, 10, 22, 7]) + x3 = x2 + + for i in range(len(self.st_gcnns)): + x3 = self.st_gcnns[i](x3) + + x5 = x3.permute(0, 2, 1, 3) # prepare the input for the Time-Extrapolator-CNN (NCTV->NTCV) + + x6 = self.prelus[0](self.txcnns[0](x5)) + for i in range(1, self.n_txcnn_layers): + x6 = self.prelus[i](self.txcnns[i](x6)) + x6 # residual connection + + x6 = self.dim_conversor(x6.permute(0, 2, 1, 3)).permute(0, 2, 3, 1) + x7 = x6.cumsum(1) + + act = self.context_layer(x7.reshape(b, 1, self.n_output, joints * x7.shape[-1])) + x8 = x7.permute(0, 3, 2, 1) + for i in range(len(self.st_gcnns_o)): + x8 = self.st_gcnns_o[i](x8) + x9 = x8.permute(0, 3, 2, 1) + act + + return x[:, -1:] + x9, diff --git a/h36m_detailed/16/metric_full_original_test.xlsx b/h36m_detailed/16/metric_full_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..bd1c603376f62e0e077e945f2871c29fe34b18c3 --- /dev/null +++ b/h36m_detailed/16/metric_full_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5da750156c6ce72e0a130f4fe2b8610a18bea6966ab8a03dafe39e9349b638cc +size 2049706 diff --git a/h36m_detailed/16/metric_original_test.xlsx b/h36m_detailed/16/metric_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..c4b27e8841c364c35f1bfe3a66c52fb98bd8ed66 --- /dev/null +++ b/h36m_detailed/16/metric_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:125c475dd472bfa25df2d197c231fbd70efe418418eb5360f8dafaaad7368110 +size 2052431 diff --git a/h36m_detailed/16/metric_test.xlsx b/h36m_detailed/16/metric_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..1fb9dd8a31556d1e501760ac9e04d65c7881f3cb --- /dev/null +++ b/h36m_detailed/16/metric_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:527caffa58e94cec2ae96719ef93b2e32360b7c267751ed413c6f7054f2b8c3b +size 2052609 diff --git a/h36m_detailed/16/metric_train.xlsx b/h36m_detailed/16/metric_train.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..f5b7a4b6c684720d9a903a9ac8293e640a0ab716 --- /dev/null +++ b/h36m_detailed/16/metric_train.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a890ffa3e5da0b224111a39d9458ca20364090624b0527a9b7acbb8c585e7ecb +size 2033364 diff --git a/h36m_detailed/16/sample_original_test.xlsx b/h36m_detailed/16/sample_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..e528e6a9a5c3fdd571f771ca95301206a6aaaae1 --- /dev/null +++ b/h36m_detailed/16/sample_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d3213309871efe19a835db7b26279cdcc7088eb23d153bc150acd6f9f10be31 +size 29579719 diff --git a/h36m_detailed/32/files/CISTGCN-benchmark-best.pth.tar b/h36m_detailed/32/files/CISTGCN-benchmark-best.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..dd4725cfa8a34ff986bfc351c00ca3df70d9ad4a --- /dev/null +++ b/h36m_detailed/32/files/CISTGCN-benchmark-best.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d1d356c1b73f1bc6d0d056e643f345a4727373779fe8e9aabbd23b58c3ca343 +size 8133899 diff --git a/h36m_detailed/32/files/CISTGCN-benchmark-last.pth.tar b/h36m_detailed/32/files/CISTGCN-benchmark-last.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..444d60a76061204edaa41011a3310b1b2c44f357 --- /dev/null +++ b/h36m_detailed/32/files/CISTGCN-benchmark-last.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a806dce099cf22d3b2989ae971e7922bfb050f3f134f74e9765c2e37e81ebb7 +size 8127691 diff --git a/h36m_detailed/32/files/config-20221111_1223-id0734.yaml b/h36m_detailed/32/files/config-20221111_1223-id0734.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d352f1f0d9448d1397074663944182a63bd9310 --- /dev/null +++ b/h36m_detailed/32/files/config-20221111_1223-id0734.yaml @@ -0,0 +1,105 @@ +architecture_config: + model: MlpMixer_ext_1 + model_params: + input_n: 10 + joints: 22 + output_n: 25 + n_txcnn_layers: 4 + txc_kernel_size: 3 + reduction: 8 + hidden_dim: 64 + input_gcn: + model_complexity: + - 32 + - 32 + - 32 + - 32 + interpretable: + - true + - true + - true + - true + - true + output_gcn: + model_complexity: + - 3 + interpretable: + - true + clipping: 15 +learning_config: + WarmUp: 100 + normalize: false + dropout: 0.1 + weight_decay: 1e-4 + epochs: 50 + lr: 0.01 +# max_norm: 3 + scheduler: + type: StepLR + params: + step_size: 3000 + gamma: 0.8 + loss: + weights: "" + type: "mpjpe" + augmentations: + random_scale: + x: + - 0.95 + - 1.05 + y: + - 0.90 + - 1.10 + z: + - 0.95 + - 1.05 + random_noise: "" + random_flip: + x: true + y: "" + z: true + random_rotation: + x: + - -5 + - 5 + y: + - -180 + - 180 + z: + - -5 + - 5 + random_translation: + x: + - -0.10 + - 0.10 + y: + - -0.10 + - 0.10 + z: + - -0.10 + - 0.10 +environment_config: + actions: all + evaluate_from: 0 + is_norm: true + job: 16 + sample_rate: 2 + return_all_joints: true + save_grads: false + test_batch: 128 + train_batch: 128 +general_config: + data_dir: /ai-research/datasets/attention/ann_h3.6m/ + experiment_name: STSGCN-tests + load_model_path: '' + log_path: /ai-research/notebooks/testing_repos/logdir/ + model_name_rel_path: STSGCN-benchmark + save_all_intermediate_models: false + save_models: true + tensorboard: + num_mesh: 4 +meta_config: + comment: Testing a new architecture based on STSGCN paper. + project: Attention + task: 3d keypoint prediction + version: 0.1.1 diff --git a/h36m_detailed/32/files/model.py b/h36m_detailed/32/files/model.py new file mode 100644 index 0000000000000000000000000000000000000000..5aa5a57b3e451186f63bbc01da5d978250d35ffd --- /dev/null +++ b/h36m_detailed/32/files/model.py @@ -0,0 +1,597 @@ +import math + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from ..layers import deformable_conv, SE + +torch.manual_seed(0) + + +# This is the simple CNN layer,that performs a 2-D convolution while maintaining the dimensions of the input(except for the features dimension) +class CNN_layer(nn.Module): + def __init__(self, + in_ch, + out_ch, + kernel_size, + dropout, + bias=True): + super(CNN_layer, self).__init__() + self.kernel_size = kernel_size + padding = ( + (kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) # padding so that both dimensions are maintained + assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1 + + self.block1 = [nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=padding, dilation=(1, 1)), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ] + + self.block1 = nn.Sequential(*self.block1) + + def forward(self, x): + output = self.block1(x) + return output + + +class FPN(nn.Module): + def __init__(self, in_ch, + out_ch, + kernel, # (3,1) + dropout, + reduction, + ): + super(FPN, self).__init__() + kernel_size = kernel if isinstance(kernel, (tuple, list)) else (kernel, kernel) + padding = ((kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) + pad1 = (padding[0], padding[1]) + pad2 = (padding[0] + pad1[0], padding[1] + pad1[1]) + pad3 = (padding[0] + pad2[0], padding[1] + pad2[1]) + dil1 = (1, 1) + dil2 = (1 + pad1[0], 1 + pad1[1]) + dil3 = (1 + pad2[0], 1 + pad2[1]) + self.block1 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad1, dilation=dil1), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block2 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad2, dilation=dil2), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block3 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad3, dilation=dil3), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.pooling = nn.AdaptiveAvgPool2d((1, 1)) # Action Context. + self.compress = nn.Conv2d(out_ch * 3 + in_ch, + out_ch, + kernel_size=(1, 1)) # PRELU is outside the loop, check at the end of the code. + + def forward(self, x): + b, dim, joints, seq = x.shape + global_action = F.interpolate(self.pooling(x), (joints, seq)) + out = torch.cat((self.block1(x), self.block2(x), self.block3(x), global_action), dim=1) + out = self.compress(out) + return out + + +def mish(x): + return (x * torch.tanh(F.softplus(x))) + + +class ConvTemporalGraphical(nn.Module): + # Source : https://github.com/yysijie/st-gcn/blob/master/net/st_gcn.py + r"""The basic module for applying a graph convolution. + Args: + Shape: + - Input: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Output: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + """ + + def __init__(self, time_dim, joints_dim, domain, interpratable): + super(ConvTemporalGraphical, self).__init__() + + if domain == "time": + # learnable, graph-agnostic 3-d adjacency matrix(or edge importance matrix) + size = joints_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(time_dim, size, size)) + self.domain = 'nctv,tvw->nctw' + else: + self.domain = 'nctv,ntvw->nctw' + elif domain == "space": + size = time_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(joints_dim, size, size)) + self.domain = 'nctv,vtq->ncqv' + else: + self.domain = 'nctv,nvtq->ncqv' + if not interpratable: + stdv = 1. / math.sqrt(self.A.size(1)) + self.A.data.uniform_(-stdv, stdv) + + def forward(self, x): + x = torch.einsum(self.domain, (x, self.A)) + return x.contiguous() + + +class Map2Adj(nn.Module): + def __init__(self, + in_ch, + time_dim, + joints_dim, + domain, + dropout, + ): + super(Map2Adj, self).__init__() + self.domain = domain + inter_ch = in_ch // 2 + self.time_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(time_dim, 1), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, time_dim, kernel_size=1, bias=False), + ) + self.joint_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(1, joints_dim), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, joints_dim, kernel_size=1, bias=False), + ) + + if self.domain == "space": + ch = joints_dim + self.perm1 = (0, 1, 2, 3) + self.perm2 = (0, 3, 2, 1) + if self.domain == "time": + ch = time_dim + self.perm1 = (0, 2, 1, 3) + self.perm2 = (0, 1, 2, 3) + + inter_ch = ch # // 2 + self.expansor = nn.Sequential(nn.Conv2d(ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(inter_ch, ch, kernel_size=1, bias=False), + ) + self.time_compress.apply(self._init_weights) + self.joint_compress.apply(self._init_weights) + self.expansor.apply(self._init_weights) + + def _init_weights(self, m, gain=0.05): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + if isinstance(m, (nn.Conv2d, nn.Conv1d)): + torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, dims, seq, joints = x.shape + dim_seq = self.time_compress(x) + dim_space = self.joint_compress(x) + o = torch.matmul(dim_space.permute(self.perm1), dim_seq.permute(self.perm2)) + Adj = self.expansor(o) + return Adj + + +class Domain_GCNN_layer(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + kernel_size, + stride, + time_dim, + joints_dim, + domain, + interpratable, + dropout, + bias=True): + + super(Domain_GCNN_layer, self).__init__() + self.kernel_size = kernel_size + assert self.kernel_size[0] % 2 == 1 + assert self.kernel_size[1] % 2 == 1 + padding = ((self.kernel_size[0] - 1) // 2, (self.kernel_size[1] - 1) // 2) + self.interpratable = interpratable + self.domain = domain + + self.gcn = ConvTemporalGraphical(time_dim, joints_dim, domain, interpratable) + self.tcn = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + (self.kernel_size[0], self.kernel_size[1]), + (stride, stride), + padding, + ), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ) + + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + if self.interpratable: + self.map_to_adj = Map2Adj(in_ch, + time_dim, + joints_dim, + domain, + dropout, + ) + else: + self.map_to_adj = nn.Identity() + self.prelu = nn.PReLU() + + def forward(self, x): + # assert A.shape[0] == self.kernel_size[1], print(A.shape[0],self.kernel_size) + res = self.residual(x) + self.Adj = self.map_to_adj(x) + if self.interpratable: + self.gcn.A = self.Adj + x1 = self.gcn(x) + x2 = self.tcn(x1) + x3 = x2 + res + x4 = self.prelu(x3) + return x4 + + +# Dynamic SpatioTemporal Decompose Graph Convolutions (DSTD-GC) +class DSTD_GC(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + : in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + interpratable, + kernel_size, + stride, + time_dim, + joints_dim, + reduction, + dropout): + super(DSTD_GC, self).__init__() + self.dsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "space", interpratable, dropout) + self.tsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "time", interpratable, dropout) + + self.compressor = nn.Sequential(nn.Conv2d(out_ch * 2, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch), + nn.PReLU(), + SE.SELayer2d(out_ch, reduction=reduction), + ) + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + + # Weighting features + out_ch_c = out_ch // 2 if out_ch // 2 > 1 else 1 + self.global_norm = nn.BatchNorm2d(in_ch) + self.conv_s = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.conv_t = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map_s = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.map_t = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.prelu1 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + self.prelu2 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + + def _get_stats_(self, x): + global_avg_pool = x.mean((3, 2)).mean(1, keepdims=True) + global_avg_pool_features = x.mean(3).mean(1) + global_std_pool = x.std((3, 2)).std(1, keepdims=True) + global_std_pool_features = x.std(3).std(1) + return torch.cat(( + global_avg_pool, + global_avg_pool_features, + global_std_pool, + global_std_pool_features, + ), + dim=1) + + def forward(self, x): + b, dim, seq, joints = x.shape # 64, 3, 10, 22 + xn = self.global_norm(x) + + stats = self._get_stats_(xn) + w1 = torch.cat((self.conv_s(xn).view(b, -1), stats), dim=1) + stats = self._get_stats_(xn) + w2 = torch.cat((self.conv_t(xn).view(b, -1), stats), dim=1) + self.w1 = self.map_s(w1) + self.w2 = self.map_t(w2) + w1 = self.w1[..., None, None] + w2 = self.w2[..., None, None] + + x1 = self.dsgn(xn) + x2 = self.tsgn(xn) + out = torch.cat((self.prelu1(w1 * x1), self.prelu2(w2 * x2)), dim=1) + out = self.compressor(out) + return out + self.residual(xn) + + +class ContextLayer(nn.Module): + def __init__(self, + in_ch, + hidden_ch, + output_seq, + input_seq, + joints, + dims=3, + reduction=8, + dropout=0.1, + ): + super(ContextLayer, self).__init__() + self.n_output = output_seq + self.n_joints = joints + self.n_input = input_seq + self.context_conv1 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + + self.context_conv2 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, (input_seq, 1), bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.context_conv3 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.map1 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map2 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map3 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fmap_s = nn.Sequential(nn.Linear(self.n_output * 3, self.n_joints, bias=False), + nn.BatchNorm1d(self.n_joints), + nn.Dropout(dropout, inplace=True), ) + + self.fmap_t = nn.Sequential(nn.Linear(self.n_output * 3, self.n_output, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), ) + + # inter_ch = self.n_joints # // 2 + self.norm_map = nn.Sequential(nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + SE.SELayer1d(self.n_output, reduction=reduction), + nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fconv = nn.Sequential(nn.Conv2d(1, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + nn.Conv2d(dims, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + ) + self.SE = SE.SELayer2d(self.n_output, reduction=reduction) + + def forward(self, x): + b, _, seq, joint_dim = x.shape + y1 = self.context_conv1(x).max(-1)[0].max(-1)[0] + y2 = self.context_conv2(x).view(b, -1, joint_dim).max(-1)[0] + ym = self.context_conv3(x).mean((2, 3)) + y = torch.cat((self.map1(y1), self.map2(y2), self.map3(ym)), dim=1) + self.joints = self.fmap_s(y) + self.displacements = self.fmap_t(y) # .cumsum(1) + self.seq_joints = torch.bmm(self.displacements.unsqueeze(2), self.joints.unsqueeze(1)) + self.seq_joints_n = self.norm_map(self.seq_joints) + self.seq_joints_dims = self.fconv(self.seq_joints_n.view(b, 1, self.n_output, self.n_joints)) + o = self.SE(self.seq_joints_dims.permute(0, 2, 3, 1)) + return o + + +class MlpMixer_ext(nn.Module): + """ + Shape: + - Input[0]: Input sequence in :math:`(N, in_ch,T_in, V)` format + - Output[0]: Output sequence in :math:`(N,T_out,in_ch, V)` format + where + :math:`N` is a batch size, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch=number of channels for the coordiantes(default=3) + + + """ + + def __init__(self, arch, learn): + super(MlpMixer_ext, self).__init__() + self.clipping = arch.model_params.clipping + + self.n_input = arch.model_params.input_n + self.n_output = arch.model_params.output_n + self.n_joints = arch.model_params.joints + self.n_txcnn_layers = arch.model_params.n_txcnn_layers + self.txc_kernel_size = [arch.model_params.txc_kernel_size] * 2 + self.input_gcn = arch.model_params.input_gcn + self.output_gcn = arch.model_params.output_gcn + self.reduction = arch.model_params.reduction + self.hidden_dim = arch.model_params.hidden_dim + + self.st_gcnns = nn.ModuleList() + self.txcnns = nn.ModuleList() + self.se = nn.ModuleList() + + self.in_conv = nn.ModuleList() + self.context_layer = nn.ModuleList() + self.trans = nn.ModuleList() + self.in_ch = 10 + self.model_tx = self.input_gcn.model_complexity.copy() + self.model_tx.insert(0, 1) # add 1 in the position 0. + + self.input_gcn.model_complexity.insert(0, self.in_ch) + self.input_gcn.model_complexity.append(self.in_ch) + # self.input_gcn.interpretable.insert(0, True) + # self.input_gcn.interpretable.append(False) + for i in range(len(self.input_gcn.model_complexity) - 1): + self.st_gcnns.append(DSTD_GC(self.input_gcn.model_complexity[i], + self.input_gcn.model_complexity[i + 1], + self.input_gcn.interpretable[i], + [1, 1], 1, self.n_input, self.n_joints, self.reduction, learn.dropout)) + + self.context_layer = ContextLayer(1, self.hidden_dim, + self.n_output, self.n_output, self.n_joints, + 3, self.reduction, learn.dropout + ) + + # at this point, we must permute the dimensions of the gcn network, from (N,C,T,V) into (N,T,C,V) + # with kernel_size[3,3] the dimensions of C,V will be maintained + self.txcnns.append(FPN(self.n_input, self.n_output, self.txc_kernel_size, 0., self.reduction)) + for i in range(1, self.n_txcnn_layers): + self.txcnns.append(FPN(self.n_output, self.n_output, self.txc_kernel_size, 0., self.reduction)) + + self.prelus = nn.ModuleList() + for j in range(self.n_txcnn_layers): + self.prelus.append(nn.PReLU()) + + self.dim_conversor = nn.Sequential(nn.Conv2d(self.in_ch, 3, 1, bias=False), + nn.BatchNorm2d(3), + nn.PReLU(), + nn.Conv2d(3, 3, 1, bias=False), + nn.PReLU(3), ) + + self.st_gcnns_o = nn.ModuleList() + self.output_gcn.model_complexity.insert(0, 3) + for i in range(len(self.output_gcn.model_complexity) - 1): + self.st_gcnns_o.append(DSTD_GC(self.output_gcn.model_complexity[i], + self.output_gcn.model_complexity[i + 1], + self.output_gcn.interpretable[i], + [1, 1], 1, self.n_joints, self.n_output, self.reduction, learn.dropout)) + + self.st_gcnns_o.apply(self._init_weights) + self.st_gcnns.apply(self._init_weights) + self.txcnns.apply(self._init_weights) + + def _init_weights(self, m, gain=0.1): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, seq, joints, dim = x.shape + vel = torch.zeros_like(x) + vel[:, :-1] = torch.diff(x, dim=1) + vel[:, -1] = x[:, -1] + acc = torch.zeros_like(x) + acc[:, :-1] = torch.diff(vel, dim=1) + acc[:, -1] = vel[:, -1] + x1 = torch.cat((x, acc, vel, torch.norm(vel, dim=-1, keepdim=True)), dim=-1) + x2 = x1.permute((0, 3, 1, 2)) # (torch.Size([64, 10, 22, 7]) + x3 = x2 + + for i in range(len(self.st_gcnns)): + x3 = self.st_gcnns[i](x3) + + x5 = x3.permute(0, 2, 1, 3) # prepare the input for the Time-Extrapolator-CNN (NCTV->NTCV) + + x6 = self.prelus[0](self.txcnns[0](x5)) + for i in range(1, self.n_txcnn_layers): + x6 = self.prelus[i](self.txcnns[i](x6)) + x6 # residual connection + + x6 = self.dim_conversor(x6.permute(0, 2, 1, 3)).permute(0, 2, 3, 1) + x7 = x6.cumsum(1) + + act = self.context_layer(x7.reshape(b, 1, self.n_output, joints * x7.shape[-1])) + x8 = x7.permute(0, 3, 2, 1) + for i in range(len(self.st_gcnns_o)): + x8 = self.st_gcnns_o[i](x8) + x9 = x8.permute(0, 3, 2, 1) + act + + return x[:, -1:] + x9, diff --git a/h36m_detailed/32/metrics_original_test.xlsx b/h36m_detailed/32/metrics_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..f5f95acbe20634573fb9eba699c3000ee623e885 --- /dev/null +++ b/h36m_detailed/32/metrics_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec4d54347d739ccaab307244384406ffcc96b7f4b44e68ffc2704f11b38d1200 +size 2052735 diff --git a/h36m_detailed/32/samples_original_test.xlsx b/h36m_detailed/32/samples_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..a2b96a43cda95a8949b6721e9be9d70f5c9a3907 --- /dev/null +++ b/h36m_detailed/32/samples_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1db05cc9b6ffb40208811b40ab486490755702331ff3e22355f100d963f984dd +size 28078149 diff --git a/h36m_detailed/64/files/CISTGCN-benchmark-best.pth.tar b/h36m_detailed/64/files/CISTGCN-benchmark-best.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..cb22a66d82f2110983abc41a33a4bc0cc2d09f27 --- /dev/null +++ b/h36m_detailed/64/files/CISTGCN-benchmark-best.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb41d06736803c4e7b0aa66e36820440d5125b072739610481d1c06c23cedb5a +size 16582347 diff --git a/h36m_detailed/64/files/CISTGCN-benchmark-last.pth.tar b/h36m_detailed/64/files/CISTGCN-benchmark-last.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..981c8c0e2192debf521fcabe5ca8f0fe3707a75a --- /dev/null +++ b/h36m_detailed/64/files/CISTGCN-benchmark-last.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8b5ce6e7fc0cbfceacf731ab896290352f7b4d929b80b3bf8d516bdfc02e704 +size 16584139 diff --git a/h36m_detailed/64/files/config-20221114_2127-id9542.yaml b/h36m_detailed/64/files/config-20221114_2127-id9542.yaml new file mode 100644 index 0000000000000000000000000000000000000000..70a2fcc71412b9a24255cd5ebe1df3ddfcd75eba --- /dev/null +++ b/h36m_detailed/64/files/config-20221114_2127-id9542.yaml @@ -0,0 +1,105 @@ +architecture_config: + model: MlpMixer_ext_1 + model_params: + input_n: 10 + joints: 22 + output_n: 25 + n_txcnn_layers: 4 + txc_kernel_size: 3 + reduction: 8 + hidden_dim: 64 + input_gcn: + model_complexity: + - 64 + - 64 + - 64 + - 64 + interpretable: + - true + - true + - true + - true + - true + output_gcn: + model_complexity: + - 3 + interpretable: + - true + clipping: 15 +learning_config: + WarmUp: 100 + normalize: false + dropout: 0.1 + weight_decay: 1e-4 + epochs: 50 + lr: 0.01 +# max_norm: 3 + scheduler: + type: StepLR + params: + step_size: 3000 + gamma: 0.8 + loss: + weights: "" + type: "mpjpe" + augmentations: + random_scale: + x: + - 0.95 + - 1.05 + y: + - 0.90 + - 1.10 + z: + - 0.95 + - 1.05 + random_noise: "" + random_flip: + x: true + y: "" + z: true + random_rotation: + x: + - -5 + - 5 + y: + - -180 + - 180 + z: + - -5 + - 5 + random_translation: + x: + - -0.10 + - 0.10 + y: + - -0.10 + - 0.10 + z: + - -0.10 + - 0.10 +environment_config: + actions: all + evaluate_from: 0 + is_norm: true + job: 16 + sample_rate: 2 + return_all_joints: true + save_grads: false + test_batch: 128 + train_batch: 128 +general_config: + data_dir: /ai-research/datasets/attention/ann_h3.6m/ + experiment_name: STSGCN-tests + load_model_path: '' + log_path: /ai-research/notebooks/testing_repos/logdir/ + model_name_rel_path: STSGCN-benchmark + save_all_intermediate_models: false + save_models: true + tensorboard: + num_mesh: 4 +meta_config: + comment: Testing a new architecture based on STSGCN paper. + project: Attention + task: 3d keypoint prediction + version: 0.1.1 diff --git a/h36m_detailed/64/files/model.py b/h36m_detailed/64/files/model.py new file mode 100644 index 0000000000000000000000000000000000000000..5aa5a57b3e451186f63bbc01da5d978250d35ffd --- /dev/null +++ b/h36m_detailed/64/files/model.py @@ -0,0 +1,597 @@ +import math + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from ..layers import deformable_conv, SE + +torch.manual_seed(0) + + +# This is the simple CNN layer,that performs a 2-D convolution while maintaining the dimensions of the input(except for the features dimension) +class CNN_layer(nn.Module): + def __init__(self, + in_ch, + out_ch, + kernel_size, + dropout, + bias=True): + super(CNN_layer, self).__init__() + self.kernel_size = kernel_size + padding = ( + (kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) # padding so that both dimensions are maintained + assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1 + + self.block1 = [nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=padding, dilation=(1, 1)), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ] + + self.block1 = nn.Sequential(*self.block1) + + def forward(self, x): + output = self.block1(x) + return output + + +class FPN(nn.Module): + def __init__(self, in_ch, + out_ch, + kernel, # (3,1) + dropout, + reduction, + ): + super(FPN, self).__init__() + kernel_size = kernel if isinstance(kernel, (tuple, list)) else (kernel, kernel) + padding = ((kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) + pad1 = (padding[0], padding[1]) + pad2 = (padding[0] + pad1[0], padding[1] + pad1[1]) + pad3 = (padding[0] + pad2[0], padding[1] + pad2[1]) + dil1 = (1, 1) + dil2 = (1 + pad1[0], 1 + pad1[1]) + dil3 = (1 + pad2[0], 1 + pad2[1]) + self.block1 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad1, dilation=dil1), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block2 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad2, dilation=dil2), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block3 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad3, dilation=dil3), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.pooling = nn.AdaptiveAvgPool2d((1, 1)) # Action Context. + self.compress = nn.Conv2d(out_ch * 3 + in_ch, + out_ch, + kernel_size=(1, 1)) # PRELU is outside the loop, check at the end of the code. + + def forward(self, x): + b, dim, joints, seq = x.shape + global_action = F.interpolate(self.pooling(x), (joints, seq)) + out = torch.cat((self.block1(x), self.block2(x), self.block3(x), global_action), dim=1) + out = self.compress(out) + return out + + +def mish(x): + return (x * torch.tanh(F.softplus(x))) + + +class ConvTemporalGraphical(nn.Module): + # Source : https://github.com/yysijie/st-gcn/blob/master/net/st_gcn.py + r"""The basic module for applying a graph convolution. + Args: + Shape: + - Input: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Output: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + """ + + def __init__(self, time_dim, joints_dim, domain, interpratable): + super(ConvTemporalGraphical, self).__init__() + + if domain == "time": + # learnable, graph-agnostic 3-d adjacency matrix(or edge importance matrix) + size = joints_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(time_dim, size, size)) + self.domain = 'nctv,tvw->nctw' + else: + self.domain = 'nctv,ntvw->nctw' + elif domain == "space": + size = time_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(joints_dim, size, size)) + self.domain = 'nctv,vtq->ncqv' + else: + self.domain = 'nctv,nvtq->ncqv' + if not interpratable: + stdv = 1. / math.sqrt(self.A.size(1)) + self.A.data.uniform_(-stdv, stdv) + + def forward(self, x): + x = torch.einsum(self.domain, (x, self.A)) + return x.contiguous() + + +class Map2Adj(nn.Module): + def __init__(self, + in_ch, + time_dim, + joints_dim, + domain, + dropout, + ): + super(Map2Adj, self).__init__() + self.domain = domain + inter_ch = in_ch // 2 + self.time_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(time_dim, 1), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, time_dim, kernel_size=1, bias=False), + ) + self.joint_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(1, joints_dim), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, joints_dim, kernel_size=1, bias=False), + ) + + if self.domain == "space": + ch = joints_dim + self.perm1 = (0, 1, 2, 3) + self.perm2 = (0, 3, 2, 1) + if self.domain == "time": + ch = time_dim + self.perm1 = (0, 2, 1, 3) + self.perm2 = (0, 1, 2, 3) + + inter_ch = ch # // 2 + self.expansor = nn.Sequential(nn.Conv2d(ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(inter_ch, ch, kernel_size=1, bias=False), + ) + self.time_compress.apply(self._init_weights) + self.joint_compress.apply(self._init_weights) + self.expansor.apply(self._init_weights) + + def _init_weights(self, m, gain=0.05): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + if isinstance(m, (nn.Conv2d, nn.Conv1d)): + torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, dims, seq, joints = x.shape + dim_seq = self.time_compress(x) + dim_space = self.joint_compress(x) + o = torch.matmul(dim_space.permute(self.perm1), dim_seq.permute(self.perm2)) + Adj = self.expansor(o) + return Adj + + +class Domain_GCNN_layer(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + kernel_size, + stride, + time_dim, + joints_dim, + domain, + interpratable, + dropout, + bias=True): + + super(Domain_GCNN_layer, self).__init__() + self.kernel_size = kernel_size + assert self.kernel_size[0] % 2 == 1 + assert self.kernel_size[1] % 2 == 1 + padding = ((self.kernel_size[0] - 1) // 2, (self.kernel_size[1] - 1) // 2) + self.interpratable = interpratable + self.domain = domain + + self.gcn = ConvTemporalGraphical(time_dim, joints_dim, domain, interpratable) + self.tcn = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + (self.kernel_size[0], self.kernel_size[1]), + (stride, stride), + padding, + ), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ) + + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + if self.interpratable: + self.map_to_adj = Map2Adj(in_ch, + time_dim, + joints_dim, + domain, + dropout, + ) + else: + self.map_to_adj = nn.Identity() + self.prelu = nn.PReLU() + + def forward(self, x): + # assert A.shape[0] == self.kernel_size[1], print(A.shape[0],self.kernel_size) + res = self.residual(x) + self.Adj = self.map_to_adj(x) + if self.interpratable: + self.gcn.A = self.Adj + x1 = self.gcn(x) + x2 = self.tcn(x1) + x3 = x2 + res + x4 = self.prelu(x3) + return x4 + + +# Dynamic SpatioTemporal Decompose Graph Convolutions (DSTD-GC) +class DSTD_GC(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + : in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + interpratable, + kernel_size, + stride, + time_dim, + joints_dim, + reduction, + dropout): + super(DSTD_GC, self).__init__() + self.dsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "space", interpratable, dropout) + self.tsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "time", interpratable, dropout) + + self.compressor = nn.Sequential(nn.Conv2d(out_ch * 2, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch), + nn.PReLU(), + SE.SELayer2d(out_ch, reduction=reduction), + ) + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + + # Weighting features + out_ch_c = out_ch // 2 if out_ch // 2 > 1 else 1 + self.global_norm = nn.BatchNorm2d(in_ch) + self.conv_s = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.conv_t = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map_s = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.map_t = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.prelu1 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + self.prelu2 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + + def _get_stats_(self, x): + global_avg_pool = x.mean((3, 2)).mean(1, keepdims=True) + global_avg_pool_features = x.mean(3).mean(1) + global_std_pool = x.std((3, 2)).std(1, keepdims=True) + global_std_pool_features = x.std(3).std(1) + return torch.cat(( + global_avg_pool, + global_avg_pool_features, + global_std_pool, + global_std_pool_features, + ), + dim=1) + + def forward(self, x): + b, dim, seq, joints = x.shape # 64, 3, 10, 22 + xn = self.global_norm(x) + + stats = self._get_stats_(xn) + w1 = torch.cat((self.conv_s(xn).view(b, -1), stats), dim=1) + stats = self._get_stats_(xn) + w2 = torch.cat((self.conv_t(xn).view(b, -1), stats), dim=1) + self.w1 = self.map_s(w1) + self.w2 = self.map_t(w2) + w1 = self.w1[..., None, None] + w2 = self.w2[..., None, None] + + x1 = self.dsgn(xn) + x2 = self.tsgn(xn) + out = torch.cat((self.prelu1(w1 * x1), self.prelu2(w2 * x2)), dim=1) + out = self.compressor(out) + return out + self.residual(xn) + + +class ContextLayer(nn.Module): + def __init__(self, + in_ch, + hidden_ch, + output_seq, + input_seq, + joints, + dims=3, + reduction=8, + dropout=0.1, + ): + super(ContextLayer, self).__init__() + self.n_output = output_seq + self.n_joints = joints + self.n_input = input_seq + self.context_conv1 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + + self.context_conv2 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, (input_seq, 1), bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.context_conv3 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.map1 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map2 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map3 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fmap_s = nn.Sequential(nn.Linear(self.n_output * 3, self.n_joints, bias=False), + nn.BatchNorm1d(self.n_joints), + nn.Dropout(dropout, inplace=True), ) + + self.fmap_t = nn.Sequential(nn.Linear(self.n_output * 3, self.n_output, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), ) + + # inter_ch = self.n_joints # // 2 + self.norm_map = nn.Sequential(nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + SE.SELayer1d(self.n_output, reduction=reduction), + nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fconv = nn.Sequential(nn.Conv2d(1, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + nn.Conv2d(dims, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + ) + self.SE = SE.SELayer2d(self.n_output, reduction=reduction) + + def forward(self, x): + b, _, seq, joint_dim = x.shape + y1 = self.context_conv1(x).max(-1)[0].max(-1)[0] + y2 = self.context_conv2(x).view(b, -1, joint_dim).max(-1)[0] + ym = self.context_conv3(x).mean((2, 3)) + y = torch.cat((self.map1(y1), self.map2(y2), self.map3(ym)), dim=1) + self.joints = self.fmap_s(y) + self.displacements = self.fmap_t(y) # .cumsum(1) + self.seq_joints = torch.bmm(self.displacements.unsqueeze(2), self.joints.unsqueeze(1)) + self.seq_joints_n = self.norm_map(self.seq_joints) + self.seq_joints_dims = self.fconv(self.seq_joints_n.view(b, 1, self.n_output, self.n_joints)) + o = self.SE(self.seq_joints_dims.permute(0, 2, 3, 1)) + return o + + +class MlpMixer_ext(nn.Module): + """ + Shape: + - Input[0]: Input sequence in :math:`(N, in_ch,T_in, V)` format + - Output[0]: Output sequence in :math:`(N,T_out,in_ch, V)` format + where + :math:`N` is a batch size, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch=number of channels for the coordiantes(default=3) + + + """ + + def __init__(self, arch, learn): + super(MlpMixer_ext, self).__init__() + self.clipping = arch.model_params.clipping + + self.n_input = arch.model_params.input_n + self.n_output = arch.model_params.output_n + self.n_joints = arch.model_params.joints + self.n_txcnn_layers = arch.model_params.n_txcnn_layers + self.txc_kernel_size = [arch.model_params.txc_kernel_size] * 2 + self.input_gcn = arch.model_params.input_gcn + self.output_gcn = arch.model_params.output_gcn + self.reduction = arch.model_params.reduction + self.hidden_dim = arch.model_params.hidden_dim + + self.st_gcnns = nn.ModuleList() + self.txcnns = nn.ModuleList() + self.se = nn.ModuleList() + + self.in_conv = nn.ModuleList() + self.context_layer = nn.ModuleList() + self.trans = nn.ModuleList() + self.in_ch = 10 + self.model_tx = self.input_gcn.model_complexity.copy() + self.model_tx.insert(0, 1) # add 1 in the position 0. + + self.input_gcn.model_complexity.insert(0, self.in_ch) + self.input_gcn.model_complexity.append(self.in_ch) + # self.input_gcn.interpretable.insert(0, True) + # self.input_gcn.interpretable.append(False) + for i in range(len(self.input_gcn.model_complexity) - 1): + self.st_gcnns.append(DSTD_GC(self.input_gcn.model_complexity[i], + self.input_gcn.model_complexity[i + 1], + self.input_gcn.interpretable[i], + [1, 1], 1, self.n_input, self.n_joints, self.reduction, learn.dropout)) + + self.context_layer = ContextLayer(1, self.hidden_dim, + self.n_output, self.n_output, self.n_joints, + 3, self.reduction, learn.dropout + ) + + # at this point, we must permute the dimensions of the gcn network, from (N,C,T,V) into (N,T,C,V) + # with kernel_size[3,3] the dimensions of C,V will be maintained + self.txcnns.append(FPN(self.n_input, self.n_output, self.txc_kernel_size, 0., self.reduction)) + for i in range(1, self.n_txcnn_layers): + self.txcnns.append(FPN(self.n_output, self.n_output, self.txc_kernel_size, 0., self.reduction)) + + self.prelus = nn.ModuleList() + for j in range(self.n_txcnn_layers): + self.prelus.append(nn.PReLU()) + + self.dim_conversor = nn.Sequential(nn.Conv2d(self.in_ch, 3, 1, bias=False), + nn.BatchNorm2d(3), + nn.PReLU(), + nn.Conv2d(3, 3, 1, bias=False), + nn.PReLU(3), ) + + self.st_gcnns_o = nn.ModuleList() + self.output_gcn.model_complexity.insert(0, 3) + for i in range(len(self.output_gcn.model_complexity) - 1): + self.st_gcnns_o.append(DSTD_GC(self.output_gcn.model_complexity[i], + self.output_gcn.model_complexity[i + 1], + self.output_gcn.interpretable[i], + [1, 1], 1, self.n_joints, self.n_output, self.reduction, learn.dropout)) + + self.st_gcnns_o.apply(self._init_weights) + self.st_gcnns.apply(self._init_weights) + self.txcnns.apply(self._init_weights) + + def _init_weights(self, m, gain=0.1): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, seq, joints, dim = x.shape + vel = torch.zeros_like(x) + vel[:, :-1] = torch.diff(x, dim=1) + vel[:, -1] = x[:, -1] + acc = torch.zeros_like(x) + acc[:, :-1] = torch.diff(vel, dim=1) + acc[:, -1] = vel[:, -1] + x1 = torch.cat((x, acc, vel, torch.norm(vel, dim=-1, keepdim=True)), dim=-1) + x2 = x1.permute((0, 3, 1, 2)) # (torch.Size([64, 10, 22, 7]) + x3 = x2 + + for i in range(len(self.st_gcnns)): + x3 = self.st_gcnns[i](x3) + + x5 = x3.permute(0, 2, 1, 3) # prepare the input for the Time-Extrapolator-CNN (NCTV->NTCV) + + x6 = self.prelus[0](self.txcnns[0](x5)) + for i in range(1, self.n_txcnn_layers): + x6 = self.prelus[i](self.txcnns[i](x6)) + x6 # residual connection + + x6 = self.dim_conversor(x6.permute(0, 2, 1, 3)).permute(0, 2, 3, 1) + x7 = x6.cumsum(1) + + act = self.context_layer(x7.reshape(b, 1, self.n_output, joints * x7.shape[-1])) + x8 = x7.permute(0, 3, 2, 1) + for i in range(len(self.st_gcnns_o)): + x8 = self.st_gcnns_o[i](x8) + x9 = x8.permute(0, 3, 2, 1) + act + + return x[:, -1:] + x9, diff --git a/h36m_detailed/64/metric_full_original_test.xlsx b/h36m_detailed/64/metric_full_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..d1c2c91a0c5e5069b4f9082539cb52246fe070a5 --- /dev/null +++ b/h36m_detailed/64/metric_full_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9aefe76333ce12af037e4f29835216bc0db80886e20b960fd57e45d470109553 +size 2048676 diff --git a/h36m_detailed/64/metric_original_test.xlsx b/h36m_detailed/64/metric_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..6e492ee11e88c74c2d646e806358b355425e977f --- /dev/null +++ b/h36m_detailed/64/metric_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da19096e8911209f49ac526dc4872e037a1b7d9f4eeee11b687767a6810692c5 +size 2050608 diff --git a/h36m_detailed/64/metric_test.xlsx b/h36m_detailed/64/metric_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..81e2d0d2cbd63b6625ccf3daee3c1db9dcb6fc3c --- /dev/null +++ b/h36m_detailed/64/metric_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:997d02572ea23ab0ef99b70bcb2b9345333a705d5cd3689b2dab68359c1aecb1 +size 2049626 diff --git a/h36m_detailed/64/metric_train.xlsx b/h36m_detailed/64/metric_train.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..fd82a828a04d98aa1132e0baf9cd737b69e0a4ac --- /dev/null +++ b/h36m_detailed/64/metric_train.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffcfdea4a175e5155f74f620170ade50139b455d338a51ffa64d84b0f923a1df +size 1844301 diff --git a/h36m_detailed/64/sample_original_test.xlsx b/h36m_detailed/64/sample_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..6455f79d4d28341c98b7f8d3c467ed7b89cba417 --- /dev/null +++ b/h36m_detailed/64/sample_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a343ba69d0bea916b9e4825e1f2bf27621c008665699bd3d2645d01fbacf8826 +size 29608760 diff --git a/h36m_detailed/8/files/CISTGCN-benchmark-best.pth.tar b/h36m_detailed/8/files/CISTGCN-benchmark-best.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..d90b13999f9ad96742653eb879b398243a627f44 --- /dev/null +++ b/h36m_detailed/8/files/CISTGCN-benchmark-best.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47b28248ab629ce18f5908f0c39c1d4700d12c5539f64828ffe4b73ee9c3c5af +size 5339339 diff --git a/h36m_detailed/8/files/CISTGCN-benchmark-last.pth.tar b/h36m_detailed/8/files/CISTGCN-benchmark-last.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..d5c5f6ae35352933328a51a4c79f4688717788a9 --- /dev/null +++ b/h36m_detailed/8/files/CISTGCN-benchmark-last.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e275f51a3e51882421ab65244fe61a41109e9b60ab88df2aad79b4bbb676d75f +size 5343499 diff --git a/h36m_detailed/8/files/config-20221116_2202-id6444.yaml b/h36m_detailed/8/files/config-20221116_2202-id6444.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64cf5003a2b629632699692b834ea0da51dc3c96 --- /dev/null +++ b/h36m_detailed/8/files/config-20221116_2202-id6444.yaml @@ -0,0 +1,105 @@ +architecture_config: + model: MlpMixer_ext_1 + model_params: + input_n: 10 + joints: 22 + output_n: 25 + n_txcnn_layers: 4 + txc_kernel_size: 3 + reduction: 8 + hidden_dim: 64 + input_gcn: + model_complexity: + - 8 + - 8 + - 8 + - 8 + interpretable: + - true + - true + - true + - true + - true + output_gcn: + model_complexity: + - 3 + interpretable: + - true + clipping: 15 +learning_config: + WarmUp: 100 + normalize: false + dropout: 0.1 + weight_decay: 1e-4 + epochs: 50 + lr: 0.01 +# max_norm: 3 + scheduler: + type: StepLR + params: + step_size: 3000 + gamma: 0.8 + loss: + weights: "" + type: "mpjpe" + augmentations: + random_scale: + x: + - 0.95 + - 1.05 + y: + - 0.90 + - 1.10 + z: + - 0.95 + - 1.05 + random_noise: "" + random_flip: + x: true + y: "" + z: true + random_rotation: + x: + - -5 + - 5 + y: + - -180 + - 180 + z: + - -5 + - 5 + random_translation: + x: + - -0.10 + - 0.10 + y: + - -0.10 + - 0.10 + z: + - -0.10 + - 0.10 +environment_config: + actions: all + evaluate_from: 0 + is_norm: true + job: 16 + sample_rate: 2 + return_all_joints: true + save_grads: false + test_batch: 128 + train_batch: 128 +general_config: + data_dir: /ai-research/datasets/attention/ann_h3.6m/ + experiment_name: STSGCN-tests + load_model_path: '' + log_path: /ai-research/notebooks/testing_repos/logdir/ + model_name_rel_path: STSGCN-benchmark + save_all_intermediate_models: false + save_models: true + tensorboard: + num_mesh: 4 +meta_config: + comment: Testing a new architecture based on STSGCN paper. + project: Attention + task: 3d keypoint prediction + version: 0.1.1 diff --git a/h36m_detailed/8/files/model.py b/h36m_detailed/8/files/model.py new file mode 100644 index 0000000000000000000000000000000000000000..810b55431f52d972af0b3897abf5d1b9e0e4c602 --- /dev/null +++ b/h36m_detailed/8/files/model.py @@ -0,0 +1,597 @@ +import math + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from ..layers import deformable_conv, SE + +torch.manual_seed(0) + + +# This is the simple CNN layer,that performs a 2-D convolution while maintaining the dimensions of the input(except for the features dimension) +class CNN_layer(nn.Module): + def __init__(self, + in_ch, + out_ch, + kernel_size, + dropout, + bias=True): + super(CNN_layer, self).__init__() + self.kernel_size = kernel_size + padding = ( + (kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) # padding so that both dimensions are maintained + assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1 + + self.block1 = [nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=padding, dilation=(1, 1)), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ] + + self.block1 = nn.Sequential(*self.block1) + + def forward(self, x): + output = self.block1(x) + return output + + +class FPN(nn.Module): + def __init__(self, in_ch, + out_ch, + kernel, # (3,1) + dropout, + reduction, + ): + super(FPN, self).__init__() + kernel_size = kernel if isinstance(kernel, (tuple, list)) else (kernel, kernel) + padding = ((kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) + pad1 = (padding[0], padding[1]) + pad2 = (padding[0] + pad1[0], padding[1] + pad1[1]) + pad3 = (padding[0] + pad2[0], padding[1] + pad2[1]) + dil1 = (1, 1) + dil2 = (1 + pad1[0], 1 + pad1[1]) + dil3 = (1 + pad2[0], 1 + pad2[1]) + self.block1 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad1, dilation=dil1), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block2 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad2, dilation=dil2), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block3 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad3, dilation=dil3), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.pooling = nn.AdaptiveAvgPool2d((1, 1)) # Action Context. + self.compress = nn.Conv2d(out_ch * 3 + in_ch, + out_ch, + kernel_size=(1, 1)) # PRELU is outside the loop, check at the end of the code. + + def forward(self, x): + b, dim, joints, seq = x.shape + global_action = F.interpolate(self.pooling(x), (joints, seq)) + out = torch.cat((self.block1(x), self.block2(x), self.block3(x), global_action), dim=1) + out = self.compress(out) + return out + + +def mish(x): + return (x * torch.tanh(F.softplus(x))) + + +class ConvTemporalGraphical(nn.Module): + # Source : https://github.com/yysijie/st-gcn/blob/master/net/st_gcn.py + r"""The basic module for applying a graph convolution. + Args: + Shape: + - Input: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Output: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + """ + + def __init__(self, time_dim, joints_dim, domain, interpratable): + super(ConvTemporalGraphical, self).__init__() + + if domain == "time": + # learnable, graph-agnostic 3-d adjacency matrix(or edge importance matrix) + size = joints_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(time_dim, size, size)) + self.domain = 'nctv,tvw->nctw' + else: + self.domain = 'nctv,ntvw->nctw' + elif domain == "space": + size = time_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(joints_dim, size, size)) + self.domain = 'nctv,vtq->ncqv' + else: + self.domain = 'nctv,nvtq->ncqv' + if not interpratable: + stdv = 1. / math.sqrt(self.A.size(1)) + self.A.data.uniform_(-stdv, stdv) + + def forward(self, x): + x = torch.einsum(self.domain, (x, self.A)) + return x.contiguous() + + +class Map2Adj(nn.Module): + def __init__(self, + in_ch, + time_dim, + joints_dim, + domain, + dropout, + ): + super(Map2Adj, self).__init__() + self.domain = domain + inter_ch = in_ch // 2 + self.time_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(time_dim, 1), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, time_dim, kernel_size=1, bias=False), + ) + self.joint_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(1, joints_dim), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, joints_dim, kernel_size=1, bias=False), + ) + + if self.domain == "space": + ch = joints_dim + self.perm1 = (0, 1, 2, 3) + self.perm2 = (0, 3, 2, 1) + if self.domain == "time": + ch = time_dim + self.perm1 = (0, 2, 1, 3) + self.perm2 = (0, 1, 2, 3) + + inter_ch = ch # // 2 + self.expansor = nn.Sequential(nn.Conv2d(ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(inter_ch, ch, kernel_size=1, bias=False), + ) + self.time_compress.apply(self._init_weights) + self.joint_compress.apply(self._init_weights) + self.expansor.apply(self._init_weights) + + def _init_weights(self, m, gain=0.05): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + if isinstance(m, (nn.Conv2d, nn.Conv1d)): + torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, dims, seq, joints = x.shape + dim_seq = self.time_compress(x) + dim_space = self.joint_compress(x) + o = torch.matmul(dim_space.permute(self.perm1), dim_seq.permute(self.perm2)) + Adj = self.expansor(o) + return Adj + + +class Domain_GCNN_layer(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + kernel_size, + stride, + time_dim, + joints_dim, + domain, + interpratable, + dropout, + bias=True): + + super(Domain_GCNN_layer, self).__init__() + self.kernel_size = kernel_size + assert self.kernel_size[0] % 2 == 1 + assert self.kernel_size[1] % 2 == 1 + padding = ((self.kernel_size[0] - 1) // 2, (self.kernel_size[1] - 1) // 2) + self.interpratable = interpratable + self.domain = domain + + self.gcn = ConvTemporalGraphical(time_dim, joints_dim, domain, interpratable) + self.tcn = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + (self.kernel_size[0], self.kernel_size[1]), + (stride, stride), + padding, + ), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ) + + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + if self.interpratable: + self.map_to_adj = Map2Adj(in_ch, + time_dim, + joints_dim, + domain, + dropout, + ) + else: + self.map_to_adj = nn.Identity() + self.prelu = nn.PReLU() + + def forward(self, x): + # assert A.shape[0] == self.kernel_size[1], print(A.shape[0],self.kernel_size) + res = self.residual(x) + self.Adj = self.map_to_adj(x) + if self.interpratable: + self.gcn.A = self.Adj + x1 = self.gcn(x) + x2 = self.tcn(x1) + x3 = x2 + res + x4 = self.prelu(x3) + return x4 + + +# Dynamic SpatioTemporal Decompose Graph Convolutions (DSTD-GC) +class DSTD_GC(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + : in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + interpratable, + kernel_size, + stride, + time_dim, + joints_dim, + reduction, + dropout): + super(DSTD_GC, self).__init__() + self.dsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "space", interpratable, dropout) + self.tsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "time", interpratable, dropout) + + self.compressor = nn.Sequential(nn.Conv2d(out_ch * 2, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch), + nn.PReLU(), + SE.SELayer2d(out_ch, reduction=reduction), + ) + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + + # Weighting features + out_ch_c = out_ch // 2 if out_ch // 2 > 1 else 1 + self.global_norm = nn.BatchNorm2d(in_ch) + self.conv_s = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.conv_t = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map_s = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.map_t = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.prelu1 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + self.prelu2 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + + def _get_stats_(self, x): + global_avg_pool = x.mean((3, 2)).mean(1, keepdims=True) + global_avg_pool_features = x.mean(3).mean(1) + global_std_pool = x.std((3, 2)).std(1, keepdims=True) + global_std_pool_features = x.std(3).std(1) + return torch.cat(( + global_avg_pool, + global_avg_pool_features, + global_std_pool, + global_std_pool_features, + ), + dim=1) + + def forward(self, x): + b, dim, seq, joints = x.shape # 64, 3, 10, 22 + xn = self.global_norm(x) + + stats = self._get_stats_(xn) + w1 = torch.cat((self.conv_s(xn).view(b, -1), stats), dim=1) + stats = self._get_stats_(xn) + w2 = torch.cat((self.conv_t(xn).view(b, -1), stats), dim=1) + self.w1 = self.map_s(w1) + self.w2 = self.map_t(w2) + w1 = self.w1[..., None, None] + w2 = self.w2[..., None, None] + + x1 = self.dsgn(xn) + x2 = self.tsgn(xn) + out = torch.cat((self.prelu1(w1 * x1), self.prelu2(w2 * x2)), dim=1) + out = self.compressor(out) + return torch.clip(out + self.residual(xn), -1e5, 1e5) + + +class ContextLayer(nn.Module): + def __init__(self, + in_ch, + hidden_ch, + output_seq, + input_seq, + joints, + dims=3, + reduction=8, + dropout=0.1, + ): + super(ContextLayer, self).__init__() + self.n_output = output_seq + self.n_joints = joints + self.n_input = input_seq + self.context_conv1 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + + self.context_conv2 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, (input_seq, 1), bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.context_conv3 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.map1 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map2 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map3 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fmap_s = nn.Sequential(nn.Linear(self.n_output * 3, self.n_joints, bias=False), + nn.BatchNorm1d(self.n_joints), + nn.Dropout(dropout, inplace=True), ) + + self.fmap_t = nn.Sequential(nn.Linear(self.n_output * 3, self.n_output, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), ) + + # inter_ch = self.n_joints # // 2 + self.norm_map = nn.Sequential(nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + SE.SELayer1d(self.n_output, reduction=reduction), + nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fconv = nn.Sequential(nn.Conv2d(1, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + nn.Conv2d(dims, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + ) + self.SE = SE.SELayer2d(self.n_output, reduction=reduction) + + def forward(self, x): + b, _, seq, joint_dim = x.shape + y1 = self.context_conv1(x).max(-1)[0].max(-1)[0] + y2 = self.context_conv2(x).view(b, -1, joint_dim).max(-1)[0] + ym = self.context_conv3(x).mean((2, 3)) + y = torch.cat((self.map1(y1), self.map2(y2), self.map3(ym)), dim=1) + self.joints = self.fmap_s(y) + self.displacements = self.fmap_t(y) # .cumsum(1) + self.seq_joints = torch.bmm(self.displacements.unsqueeze(2), self.joints.unsqueeze(1)) + self.seq_joints_n = self.norm_map(self.seq_joints) + self.seq_joints_dims = self.fconv(self.seq_joints_n.view(b, 1, self.n_output, self.n_joints)) + o = self.SE(self.seq_joints_dims.permute(0, 2, 3, 1)) + return o + + +class MlpMixer_ext(nn.Module): + """ + Shape: + - Input[0]: Input sequence in :math:`(N, in_ch,T_in, V)` format + - Output[0]: Output sequence in :math:`(N,T_out,in_ch, V)` format + where + :math:`N` is a batch size, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch=number of channels for the coordiantes(default=3) + + + """ + + def __init__(self, arch, learn): + super(MlpMixer_ext, self).__init__() + self.clipping = arch.model_params.clipping + + self.n_input = arch.model_params.input_n + self.n_output = arch.model_params.output_n + self.n_joints = arch.model_params.joints + self.n_txcnn_layers = arch.model_params.n_txcnn_layers + self.txc_kernel_size = [arch.model_params.txc_kernel_size] * 2 + self.input_gcn = arch.model_params.input_gcn + self.output_gcn = arch.model_params.output_gcn + self.reduction = arch.model_params.reduction + self.hidden_dim = arch.model_params.hidden_dim + + self.st_gcnns = nn.ModuleList() + self.txcnns = nn.ModuleList() + self.se = nn.ModuleList() + + self.in_conv = nn.ModuleList() + self.context_layer = nn.ModuleList() + self.trans = nn.ModuleList() + self.in_ch = 10 + self.model_tx = self.input_gcn.model_complexity.copy() + self.model_tx.insert(0, 1) # add 1 in the position 0. + + self.input_gcn.model_complexity.insert(0, self.in_ch) + self.input_gcn.model_complexity.append(self.in_ch) + # self.input_gcn.interpretable.insert(0, True) + # self.input_gcn.interpretable.append(False) + for i in range(len(self.input_gcn.model_complexity) - 1): + self.st_gcnns.append(DSTD_GC(self.input_gcn.model_complexity[i], + self.input_gcn.model_complexity[i + 1], + self.input_gcn.interpretable[i], + [1, 1], 1, self.n_input, self.n_joints, self.reduction, learn.dropout)) + + self.context_layer = ContextLayer(1, self.hidden_dim, + self.n_output, self.n_output, self.n_joints, + 3, self.reduction, learn.dropout + ) + + # at this point, we must permute the dimensions of the gcn network, from (N,C,T,V) into (N,T,C,V) + # with kernel_size[3,3] the dimensions of C,V will be maintained + self.txcnns.append(FPN(self.n_input, self.n_output, self.txc_kernel_size, 0., self.reduction)) + for i in range(1, self.n_txcnn_layers): + self.txcnns.append(FPN(self.n_output, self.n_output, self.txc_kernel_size, 0., self.reduction)) + + self.prelus = nn.ModuleList() + for j in range(self.n_txcnn_layers): + self.prelus.append(nn.PReLU()) + + self.dim_conversor = nn.Sequential(nn.Conv2d(self.in_ch, 3, 1, bias=False), + nn.BatchNorm2d(3), + nn.PReLU(), + nn.Conv2d(3, 3, 1, bias=False), + nn.PReLU(3), ) + + self.st_gcnns_o = nn.ModuleList() + self.output_gcn.model_complexity.insert(0, 3) + for i in range(len(self.output_gcn.model_complexity) - 1): + self.st_gcnns_o.append(DSTD_GC(self.output_gcn.model_complexity[i], + self.output_gcn.model_complexity[i + 1], + self.output_gcn.interpretable[i], + [1, 1], 1, self.n_joints, self.n_output, self.reduction, learn.dropout)) + + self.st_gcnns_o.apply(self._init_weights) + self.st_gcnns.apply(self._init_weights) + self.txcnns.apply(self._init_weights) + + def _init_weights(self, m, gain=0.1): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, seq, joints, dim = x.shape + vel = torch.zeros_like(x) + vel[:, :-1] = torch.diff(x, dim=1) + vel[:, -1] = x[:, -1] + acc = torch.zeros_like(x) + acc[:, :-1] = torch.diff(vel, dim=1) + acc[:, -1] = vel[:, -1] + x1 = torch.cat((x, acc, vel, torch.norm(vel, dim=-1, keepdim=True)), dim=-1) + x2 = x1.permute((0, 3, 1, 2)) # (torch.Size([64, 10, 22, 7]) + x3 = x2 + + for i in range(len(self.st_gcnns)): + x3 = self.st_gcnns[i](x3) + + x5 = x3.permute(0, 2, 1, 3) # prepare the input for the Time-Extrapolator-CNN (NCTV->NTCV) + + x6 = self.prelus[0](self.txcnns[0](x5)) + for i in range(1, self.n_txcnn_layers): + x6 = self.prelus[i](self.txcnns[i](x6)) + x6 # residual connection + + x6 = self.dim_conversor(x6.permute(0, 2, 1, 3)).permute(0, 2, 3, 1) + x7 = x6.cumsum(1) + + act = self.context_layer(x7.reshape(b, 1, self.n_output, joints * x7.shape[-1])) + x8 = x7.permute(0, 3, 2, 1) + for i in range(len(self.st_gcnns_o)): + x8 = self.st_gcnns_o[i](x8) + x9 = x8.permute(0, 3, 2, 1) + act + + return x[:, -1:] + x9, diff --git a/h36m_detailed/8/metric_full_original_test.xlsx b/h36m_detailed/8/metric_full_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..87530efa91e1c696f9907f46e95cd7165685eb38 --- /dev/null +++ b/h36m_detailed/8/metric_full_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16a38c585d516b280c90903d153360888df3095c405b65d0b9c08d9016d0cc64 +size 2048156 diff --git a/h36m_detailed/8/metric_original_test.xlsx b/h36m_detailed/8/metric_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..b3ad0de24faf984c4b45fb435ff82d7919477e69 --- /dev/null +++ b/h36m_detailed/8/metric_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5945740c0478dbc8abcd9475bb8a345783130c1c222a64ae2a448f5929a8c626 +size 2051725 diff --git a/h36m_detailed/8/metric_test.xlsx b/h36m_detailed/8/metric_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..f6049f557be1163fdaea4a99cb89565bc7af07b1 --- /dev/null +++ b/h36m_detailed/8/metric_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a707cc259f4fab0438ac0ab986cefde36430ecb99a2459800b9ed0eb74e4efc +size 2050259 diff --git a/h36m_detailed/8/metric_train.xlsx b/h36m_detailed/8/metric_train.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..0a1301d5c5c95f5b4ca1c160527e5cd6e18b8e36 --- /dev/null +++ b/h36m_detailed/8/metric_train.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:888b454adf3d8d974f97db5eb2bba1964fd2a01899625a7569198654c4db73df +size 1899301 diff --git a/h36m_detailed/8/sample_original_test.xlsx b/h36m_detailed/8/sample_original_test.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..3c02a26732175ec1a35c4c16f47114be6f85d582 --- /dev/null +++ b/h36m_detailed/8/sample_original_test.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca4d06df4567333f2d18034b39df732c3f0ea390663d9ef1fc6724b797fef964 +size 29585393 diff --git a/h36m_detailed/short-400ms/16/files/config-20230104_1806-id2293.yaml b/h36m_detailed/short-400ms/16/files/config-20230104_1806-id2293.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0df0c64e72ef31dc31adece0a73be38ef273270c --- /dev/null +++ b/h36m_detailed/short-400ms/16/files/config-20230104_1806-id2293.yaml @@ -0,0 +1,106 @@ +architecture_config: + model: CISTGCN_0 + model_params: + input_n: 10 + joints: 22 + output_n: 10 + n_txcnn_layers: 4 + txc_kernel_size: 3 + reduction: 8 + hidden_dim: 64 + input_gcn: + model_complexity: + - 16 + - 16 + - 16 + - 16 + interpretable: + - true + - true + - true + - true + - true + output_gcn: + model_complexity: + - 3 + interpretable: + - true + clipping: 15 +learning_config: + WarmUp: 100 + normalize: false + dropout: 0.1 + weight_decay: 1e-4 + epochs: 50 + lr: 0.01 +# max_norm: 3 + scheduler: + type: StepLR + params: + step_size: 3000 + gamma: 0.8 + loss: + weights: "" + type: "mpjpe" + augmentations: + random_scale: + x: + - 0.95 + - 1.05 + y: + - 0.90 + - 1.10 + z: + - 0.95 + - 1.05 + random_noise: "" + random_flip: + x: true + y: "" + z: true + random_rotation: + x: + - -5 + - 5 + y: + - -180 + - 180 + z: + - -5 + - 5 + random_translation: + x: + - -0.10 + - 0.10 + y: + - -0.10 + - 0.10 + z: + - -0.10 + - 0.10 +environment_config: + actions: all + protocol: "pro1" # only on ExPI 'pro1: common action split; 0-6: single action split; pro3: unseen action split' + evaluate_from: 0 + is_norm: true + job: 16 + sample_rate: 2 + return_all_joints: true + save_grads: false + test_batch: 128 + train_batch: 128 +general_config: + data_dir: /ai-research/datasets/attention/ann_h3.6m/ + experiment_name: short-STSGCN + load_model_path: '' + log_path: /ai-research/notebooks/testing_repos/logdir/ + model_name_rel_path: short-STSGCN + save_all_intermediate_models: false + save_models: true + tensorboard: + num_mesh: 4 +meta_config: + comment: Adding Benchmarking for H3.6M, AMASS, CMU and 3DPW, ExPI on our new architecture + project: Attention + task: 3d motion prediction on 18, 22 and 25 joints testing on 18 and 32 joints + version: 0.1.3 \ No newline at end of file diff --git a/h36m_detailed/short-400ms/16/files/model.py b/h36m_detailed/short-400ms/16/files/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b17fb78904d8c8b6644e1fe17bff1425c5cc19d7 --- /dev/null +++ b/h36m_detailed/short-400ms/16/files/model.py @@ -0,0 +1,597 @@ +import math + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from ..layers import deformable_conv, SE + +torch.manual_seed(0) + + +# This is the simple CNN layer,that performs a 2-D convolution while maintaining the dimensions of the input(except for the features dimension) +class CNN_layer(nn.Module): + def __init__(self, + in_ch, + out_ch, + kernel_size, + dropout, + bias=True): + super(CNN_layer, self).__init__() + self.kernel_size = kernel_size + padding = ( + (kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) # padding so that both dimensions are maintained + assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1 + + self.block1 = [nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=padding, dilation=(1, 1)), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ] + + self.block1 = nn.Sequential(*self.block1) + + def forward(self, x): + output = self.block1(x) + return output + + +class FPN(nn.Module): + def __init__(self, in_ch, + out_ch, + kernel, # (3,1) + dropout, + reduction, + ): + super(FPN, self).__init__() + kernel_size = kernel if isinstance(kernel, (tuple, list)) else (kernel, kernel) + padding = ((kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) + pad1 = (padding[0], padding[1]) + pad2 = (padding[0] + pad1[0], padding[1] + pad1[1]) + pad3 = (padding[0] + pad2[0], padding[1] + pad2[1]) + dil1 = (1, 1) + dil2 = (1 + pad1[0], 1 + pad1[1]) + dil3 = (1 + pad2[0], 1 + pad2[1]) + self.block1 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad1, dilation=dil1), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block2 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad2, dilation=dil2), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block3 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad3, dilation=dil3), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.pooling = nn.AdaptiveAvgPool2d((1, 1)) # Action Context. + self.compress = nn.Conv2d(out_ch * 3 + in_ch, + out_ch, + kernel_size=(1, 1)) # PRELU is outside the loop, check at the end of the code. + + def forward(self, x): + b, dim, joints, seq = x.shape + global_action = F.interpolate(self.pooling(x), (joints, seq)) + out = torch.cat((self.block1(x), self.block2(x), self.block3(x), global_action), dim=1) + out = self.compress(out) + return out + + +def mish(x): + return (x * torch.tanh(F.softplus(x))) + + +class ConvTemporalGraphical(nn.Module): + # Source : https://github.com/yysijie/st-gcn/blob/master/net/st_gcn.py + r"""The basic module for applying a graph convolution. + Args: + Shape: + - Input: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Output: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + """ + + def __init__(self, time_dim, joints_dim, domain, interpratable): + super(ConvTemporalGraphical, self).__init__() + + if domain == "time": + # learnable, graph-agnostic 3-d adjacency matrix(or edge importance matrix) + size = joints_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(time_dim, size, size)) + self.domain = 'nctv,tvw->nctw' + else: + self.domain = 'nctv,ntvw->nctw' + elif domain == "space": + size = time_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(joints_dim, size, size)) + self.domain = 'nctv,vtq->ncqv' + else: + self.domain = 'nctv,nvtq->ncqv' + if not interpratable: + stdv = 1. / math.sqrt(self.A.size(1)) + self.A.data.uniform_(-stdv, stdv) + + def forward(self, x): + x = torch.einsum(self.domain, (x, self.A)) + return x.contiguous() + + +class Map2Adj(nn.Module): + def __init__(self, + in_ch, + time_dim, + joints_dim, + domain, + dropout, + ): + super(Map2Adj, self).__init__() + self.domain = domain + inter_ch = in_ch // 2 + self.time_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(time_dim, 1), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, time_dim, kernel_size=1, bias=False), + ) + self.joint_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(1, joints_dim), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, joints_dim, kernel_size=1, bias=False), + ) + + if self.domain == "space": + ch = joints_dim + self.perm1 = (0, 1, 2, 3) + self.perm2 = (0, 3, 2, 1) + if self.domain == "time": + ch = time_dim + self.perm1 = (0, 2, 1, 3) + self.perm2 = (0, 1, 2, 3) + + inter_ch = ch # // 2 + self.expansor = nn.Sequential(nn.Conv2d(ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(inter_ch, ch, kernel_size=1, bias=False), + ) + self.time_compress.apply(self._init_weights) + self.joint_compress.apply(self._init_weights) + self.expansor.apply(self._init_weights) + + def _init_weights(self, m, gain=0.05): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + if isinstance(m, (nn.Conv2d, nn.Conv1d)): + torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, dims, seq, joints = x.shape + dim_seq = self.time_compress(x) + dim_space = self.joint_compress(x) + o = torch.matmul(dim_space.permute(self.perm1), dim_seq.permute(self.perm2)) + Adj = self.expansor(o) + return Adj + + +class Domain_GCNN_layer(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + kernel_size, + stride, + time_dim, + joints_dim, + domain, + interpratable, + dropout, + bias=True): + + super(Domain_GCNN_layer, self).__init__() + self.kernel_size = kernel_size + assert self.kernel_size[0] % 2 == 1 + assert self.kernel_size[1] % 2 == 1 + padding = ((self.kernel_size[0] - 1) // 2, (self.kernel_size[1] - 1) // 2) + self.interpratable = interpratable + self.domain = domain + + self.gcn = ConvTemporalGraphical(time_dim, joints_dim, domain, interpratable) + self.tcn = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + (self.kernel_size[0], self.kernel_size[1]), + (stride, stride), + padding, + ), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ) + + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + if self.interpratable: + self.map_to_adj = Map2Adj(in_ch, + time_dim, + joints_dim, + domain, + dropout, + ) + else: + self.map_to_adj = nn.Identity() + self.prelu = nn.PReLU() + + def forward(self, x): + # assert A.shape[0] == self.kernel_size[1], print(A.shape[0],self.kernel_size) + res = self.residual(x) + self.Adj = self.map_to_adj(x) + if self.interpratable: + self.gcn.A = self.Adj + x1 = self.gcn(x) + x2 = self.tcn(x1) + x3 = x2 + res + x4 = self.prelu(x3) + return x4 + + +# Dynamic SpatioTemporal Decompose Graph Convolutions (DSTD-GC) +class DSTD_GC(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + : in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + interpratable, + kernel_size, + stride, + time_dim, + joints_dim, + reduction, + dropout): + super(DSTD_GC, self).__init__() + self.dsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "space", interpratable, dropout) + self.tsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "time", interpratable, dropout) + + self.compressor = nn.Sequential(nn.Conv2d(out_ch * 2, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch), + nn.PReLU(), + SE.SELayer2d(out_ch, reduction=reduction), + ) + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + + # Weighting features + out_ch_c = out_ch // 2 if out_ch // 2 > 1 else 1 + self.global_norm = nn.BatchNorm2d(in_ch) + self.conv_s = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.conv_t = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map_s = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.map_t = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.prelu1 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + self.prelu2 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + + def _get_stats_(self, x): + global_avg_pool = x.mean((3, 2)).mean(1, keepdims=True) + global_avg_pool_features = x.mean(3).mean(1) + global_std_pool = x.std((3, 2)).std(1, keepdims=True) + global_std_pool_features = x.std(3).std(1) + return torch.cat(( + global_avg_pool, + global_avg_pool_features, + global_std_pool, + global_std_pool_features, + ), + dim=1) + + def forward(self, x): + b, dim, seq, joints = x.shape # 64, 3, 10, 22 + xn = self.global_norm(x) + + stats = self._get_stats_(xn) + w1 = torch.cat((self.conv_s(xn).view(b, -1), stats), dim=1) + stats = self._get_stats_(xn) + w2 = torch.cat((self.conv_t(xn).view(b, -1), stats), dim=1) + self.w1 = self.map_s(w1) + self.w2 = self.map_t(w2) + w1 = self.w1[..., None, None] + w2 = self.w2[..., None, None] + + x1 = self.dsgn(xn) + x2 = self.tsgn(xn) + out = torch.cat((self.prelu1(w1 * x1), self.prelu2(w2 * x2)), dim=1) + out = self.compressor(out) + return torch.clip(out + self.residual(xn), -1e5, 1e5) + + +class ContextLayer(nn.Module): + def __init__(self, + in_ch, + hidden_ch, + output_seq, + input_seq, + joints, + dims=3, + reduction=8, + dropout=0.1, + ): + super(ContextLayer, self).__init__() + self.n_output = output_seq + self.n_joints = joints + self.n_input = input_seq + self.context_conv1 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + + self.context_conv2 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, (input_seq, 1), bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.context_conv3 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.map1 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map2 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map3 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fmap_s = nn.Sequential(nn.Linear(self.n_output * 3, self.n_joints, bias=False), + nn.BatchNorm1d(self.n_joints), + nn.Dropout(dropout, inplace=True), ) + + self.fmap_t = nn.Sequential(nn.Linear(self.n_output * 3, self.n_output, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), ) + + # inter_ch = self.n_joints # // 2 + self.norm_map = nn.Sequential(nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + SE.SELayer1d(self.n_output, reduction=reduction), + nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fconv = nn.Sequential(nn.Conv2d(1, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + nn.Conv2d(dims, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + ) + self.SE = SE.SELayer2d(self.n_output, reduction=reduction) + + def forward(self, x): + b, _, seq, joint_dim = x.shape + y1 = self.context_conv1(x).max(-1)[0].max(-1)[0] + y2 = self.context_conv2(x).view(b, -1, joint_dim).max(-1)[0] + ym = self.context_conv3(x).mean((2, 3)) + y = torch.cat((self.map1(y1), self.map2(y2), self.map3(ym)), dim=1) + self.joints = self.fmap_s(y) + self.displacements = self.fmap_t(y) # .cumsum(1) + self.seq_joints = torch.bmm(self.displacements.unsqueeze(2), self.joints.unsqueeze(1)) + self.seq_joints_n = self.norm_map(self.seq_joints) + self.seq_joints_dims = self.fconv(self.seq_joints_n.view(b, 1, self.n_output, self.n_joints)) + o = self.SE(self.seq_joints_dims.permute(0, 2, 3, 1)) + return o + + +class CISTGCN(nn.Module): + """ + Shape: + - Input[0]: Input sequence in :math:`(N, in_ch,T_in, V)` format + - Output[0]: Output sequence in :math:`(N,T_out,in_ch, V)` format + where + :math:`N` is a batch size, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch=number of channels for the coordiantes(default=3) + + + """ + + def __init__(self, arch, learn): + super(CISTGCN, self).__init__() + self.clipping = arch.model_params.clipping + + self.n_input = arch.model_params.input_n + self.n_output = arch.model_params.output_n + self.n_joints = arch.model_params.joints + self.n_txcnn_layers = arch.model_params.n_txcnn_layers + self.txc_kernel_size = [arch.model_params.txc_kernel_size] * 2 + self.input_gcn = arch.model_params.input_gcn + self.output_gcn = arch.model_params.output_gcn + self.reduction = arch.model_params.reduction + self.hidden_dim = arch.model_params.hidden_dim + + self.st_gcnns = nn.ModuleList() + self.txcnns = nn.ModuleList() + self.se = nn.ModuleList() + + self.in_conv = nn.ModuleList() + self.context_layer = nn.ModuleList() + self.trans = nn.ModuleList() + self.in_ch = 10 + self.model_tx = self.input_gcn.model_complexity.copy() + self.model_tx.insert(0, 1) # add 1 in the position 0. + + self.input_gcn.model_complexity.insert(0, self.in_ch) + self.input_gcn.model_complexity.append(self.in_ch) + # self.input_gcn.interpretable.insert(0, True) + # self.input_gcn.interpretable.append(False) + for i in range(len(self.input_gcn.model_complexity) - 1): + self.st_gcnns.append(DSTD_GC(self.input_gcn.model_complexity[i], + self.input_gcn.model_complexity[i + 1], + self.input_gcn.interpretable[i], + [1, 1], 1, self.n_input, self.n_joints, self.reduction, learn.dropout)) + + self.context_layer = ContextLayer(1, self.hidden_dim, + self.n_output, self.n_output, self.n_joints, + 3, self.reduction, learn.dropout + ) + + # at this point, we must permute the dimensions of the gcn network, from (N,C,T,V) into (N,T,C,V) + # with kernel_size[3,3] the dimensions of C,V will be maintained + self.txcnns.append(FPN(self.n_input, self.n_output, self.txc_kernel_size, 0., self.reduction)) + for i in range(1, self.n_txcnn_layers): + self.txcnns.append(FPN(self.n_output, self.n_output, self.txc_kernel_size, 0., self.reduction)) + + self.prelus = nn.ModuleList() + for j in range(self.n_txcnn_layers): + self.prelus.append(nn.PReLU()) + + self.dim_conversor = nn.Sequential(nn.Conv2d(self.in_ch, 3, 1, bias=False), + nn.BatchNorm2d(3), + nn.PReLU(), + nn.Conv2d(3, 3, 1, bias=False), + nn.PReLU(3), ) + + self.st_gcnns_o = nn.ModuleList() + self.output_gcn.model_complexity.insert(0, 3) + for i in range(len(self.output_gcn.model_complexity) - 1): + self.st_gcnns_o.append(DSTD_GC(self.output_gcn.model_complexity[i], + self.output_gcn.model_complexity[i + 1], + self.output_gcn.interpretable[i], + [1, 1], 1, self.n_joints, self.n_output, self.reduction, learn.dropout)) + + self.st_gcnns_o.apply(self._init_weights) + self.st_gcnns.apply(self._init_weights) + self.txcnns.apply(self._init_weights) + + def _init_weights(self, m, gain=0.1): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, seq, joints, dim = x.shape + vel = torch.zeros_like(x) + vel[:, :-1] = torch.diff(x, dim=1) + vel[:, -1] = x[:, -1] + acc = torch.zeros_like(x) + acc[:, :-1] = torch.diff(vel, dim=1) + acc[:, -1] = vel[:, -1] + x1 = torch.cat((x, acc, vel, torch.norm(vel, dim=-1, keepdim=True)), dim=-1) + x2 = x1.permute((0, 3, 1, 2)) # (torch.Size([64, 10, 22, 7]) + x3 = x2 + + for i in range(len(self.st_gcnns)): + x3 = self.st_gcnns[i](x3) + + x5 = x3.permute(0, 2, 1, 3) # prepare the input for the Time-Extrapolator-CNN (NCTV->NTCV) + + x6 = self.prelus[0](self.txcnns[0](x5)) + for i in range(1, self.n_txcnn_layers): + x6 = self.prelus[i](self.txcnns[i](x6)) + x6 # residual connection + + x6 = self.dim_conversor(x6.permute(0, 2, 1, 3)).permute(0, 2, 3, 1) + x7 = x6.cumsum(1) + + act = self.context_layer(x7.reshape(b, 1, self.n_output, joints * x7.shape[-1])) + x8 = x7.permute(0, 3, 2, 1) + for i in range(len(self.st_gcnns_o)): + x8 = self.st_gcnns_o[i](x8) + x9 = x8.permute(0, 3, 2, 1) + act + + return x[:, -1:] + x9, diff --git a/h36m_detailed/short-400ms/16/files/short-STSGCN-20230104_1806-id2293_best.pth.tar b/h36m_detailed/short-400ms/16/files/short-STSGCN-20230104_1806-id2293_best.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..6376618bfcdeb7c25c8aa0a9bb7c50bc1599b557 --- /dev/null +++ b/h36m_detailed/short-400ms/16/files/short-STSGCN-20230104_1806-id2293_best.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c161bc7186d800db0d372133d13ac4bdf01ca89ca7d165e22386890088e64e6 +size 3827665 diff --git a/h36m_detailed/short-400ms/16/files/short-STSGCN-20230104_1806-id2293_last.pth.tar b/h36m_detailed/short-400ms/16/files/short-STSGCN-20230104_1806-id2293_last.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..6376618bfcdeb7c25c8aa0a9bb7c50bc1599b557 --- /dev/null +++ b/h36m_detailed/short-400ms/16/files/short-STSGCN-20230104_1806-id2293_last.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c161bc7186d800db0d372133d13ac4bdf01ca89ca7d165e22386890088e64e6 +size 3827665 diff --git a/h36m_detailed/short-400ms/32/files/config-20230105_1400-id6760.yaml b/h36m_detailed/short-400ms/32/files/config-20230105_1400-id6760.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e683dcb386c4631bbe412cfff13c9160491756b4 --- /dev/null +++ b/h36m_detailed/short-400ms/32/files/config-20230105_1400-id6760.yaml @@ -0,0 +1,105 @@ +architecture_config: + model: CISTGCN_0 + model_params: + input_n: 10 + joints: 22 + output_n: 10 + n_txcnn_layers: 4 + txc_kernel_size: 3 + reduction: 8 + hidden_dim: 64 + input_gcn: + model_complexity: + - 32 + - 32 + - 32 + - 32 + interpretable: + - true + - true + - true + - true + - true + output_gcn: + model_complexity: + - 3 + interpretable: + - true + clipping: 15 +learning_config: + WarmUp: 100 + normalize: false + dropout: 0.1 + weight_decay: 1e-4 + epochs: 50 + lr: 0.01 +# max_norm: 3 + scheduler: + type: StepLR + params: + step_size: 3000 + gamma: 0.8 + loss: + weights: "" + type: "mpjpe" + augmentations: + random_scale: + x: + - 0.95 + - 1.05 + y: + - 0.90 + - 1.10 + z: + - 0.95 + - 1.05 + random_noise: "" + random_flip: + x: true + y: "" + z: true + random_rotation: + x: + - -5 + - 5 + y: + - -180 + - 180 + z: + - -5 + - 5 + random_translation: + x: + - -0.10 + - 0.10 + y: + - -0.10 + - 0.10 + z: + - -0.10 + - 0.10 +environment_config: + actions: all + evaluate_from: 0 + is_norm: true + job: 16 + sample_rate: 2 + return_all_joints: true + save_grads: false + test_batch: 128 + train_batch: 128 +general_config: + data_dir: /ai-research/datasets/attention/ann_h3.6m/ + experiment_name: short-STSGCN + load_model_path: '' + log_path: /ai-research/notebooks/testing_repos/logdir/ + model_name_rel_path: short-STSGCN + save_all_intermediate_models: false + save_models: true + tensorboard: + num_mesh: 4 +meta_config: + comment: Adding Benchmarking for H3.6M, AMASS, CMU and 3DPW, ExPI on our new architecture + project: Attention + task: 3d motion prediction on 18, 22 and 25 joints testing on 18 and 32 joints + version: 0.1.3 \ No newline at end of file diff --git a/h36m_detailed/short-400ms/32/files/model.py b/h36m_detailed/short-400ms/32/files/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b17fb78904d8c8b6644e1fe17bff1425c5cc19d7 --- /dev/null +++ b/h36m_detailed/short-400ms/32/files/model.py @@ -0,0 +1,597 @@ +import math + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from ..layers import deformable_conv, SE + +torch.manual_seed(0) + + +# This is the simple CNN layer,that performs a 2-D convolution while maintaining the dimensions of the input(except for the features dimension) +class CNN_layer(nn.Module): + def __init__(self, + in_ch, + out_ch, + kernel_size, + dropout, + bias=True): + super(CNN_layer, self).__init__() + self.kernel_size = kernel_size + padding = ( + (kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) # padding so that both dimensions are maintained + assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1 + + self.block1 = [nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=padding, dilation=(1, 1)), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ] + + self.block1 = nn.Sequential(*self.block1) + + def forward(self, x): + output = self.block1(x) + return output + + +class FPN(nn.Module): + def __init__(self, in_ch, + out_ch, + kernel, # (3,1) + dropout, + reduction, + ): + super(FPN, self).__init__() + kernel_size = kernel if isinstance(kernel, (tuple, list)) else (kernel, kernel) + padding = ((kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) + pad1 = (padding[0], padding[1]) + pad2 = (padding[0] + pad1[0], padding[1] + pad1[1]) + pad3 = (padding[0] + pad2[0], padding[1] + pad2[1]) + dil1 = (1, 1) + dil2 = (1 + pad1[0], 1 + pad1[1]) + dil3 = (1 + pad2[0], 1 + pad2[1]) + self.block1 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad1, dilation=dil1), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block2 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad2, dilation=dil2), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.block3 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=pad3, dilation=dil3), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.pooling = nn.AdaptiveAvgPool2d((1, 1)) # Action Context. + self.compress = nn.Conv2d(out_ch * 3 + in_ch, + out_ch, + kernel_size=(1, 1)) # PRELU is outside the loop, check at the end of the code. + + def forward(self, x): + b, dim, joints, seq = x.shape + global_action = F.interpolate(self.pooling(x), (joints, seq)) + out = torch.cat((self.block1(x), self.block2(x), self.block3(x), global_action), dim=1) + out = self.compress(out) + return out + + +def mish(x): + return (x * torch.tanh(F.softplus(x))) + + +class ConvTemporalGraphical(nn.Module): + # Source : https://github.com/yysijie/st-gcn/blob/master/net/st_gcn.py + r"""The basic module for applying a graph convolution. + Args: + Shape: + - Input: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Output: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + """ + + def __init__(self, time_dim, joints_dim, domain, interpratable): + super(ConvTemporalGraphical, self).__init__() + + if domain == "time": + # learnable, graph-agnostic 3-d adjacency matrix(or edge importance matrix) + size = joints_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(time_dim, size, size)) + self.domain = 'nctv,tvw->nctw' + else: + self.domain = 'nctv,ntvw->nctw' + elif domain == "space": + size = time_dim + if not interpratable: + self.A = nn.Parameter(torch.FloatTensor(joints_dim, size, size)) + self.domain = 'nctv,vtq->ncqv' + else: + self.domain = 'nctv,nvtq->ncqv' + if not interpratable: + stdv = 1. / math.sqrt(self.A.size(1)) + self.A.data.uniform_(-stdv, stdv) + + def forward(self, x): + x = torch.einsum(self.domain, (x, self.A)) + return x.contiguous() + + +class Map2Adj(nn.Module): + def __init__(self, + in_ch, + time_dim, + joints_dim, + domain, + dropout, + ): + super(Map2Adj, self).__init__() + self.domain = domain + inter_ch = in_ch // 2 + self.time_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(time_dim, 1), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, time_dim, kernel_size=1, bias=False), + ) + self.joint_compress = nn.Sequential(nn.Conv2d(in_ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.PReLU(), + nn.Conv2d(inter_ch, inter_ch, kernel_size=(1, joints_dim), bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.Conv2d(inter_ch, joints_dim, kernel_size=1, bias=False), + ) + + if self.domain == "space": + ch = joints_dim + self.perm1 = (0, 1, 2, 3) + self.perm2 = (0, 3, 2, 1) + if self.domain == "time": + ch = time_dim + self.perm1 = (0, 2, 1, 3) + self.perm2 = (0, 1, 2, 3) + + inter_ch = ch # // 2 + self.expansor = nn.Sequential(nn.Conv2d(ch, inter_ch, kernel_size=1, bias=False), + nn.BatchNorm2d(inter_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(inter_ch, ch, kernel_size=1, bias=False), + ) + self.time_compress.apply(self._init_weights) + self.joint_compress.apply(self._init_weights) + self.expansor.apply(self._init_weights) + + def _init_weights(self, m, gain=0.05): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + if isinstance(m, (nn.Conv2d, nn.Conv1d)): + torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, dims, seq, joints = x.shape + dim_seq = self.time_compress(x) + dim_space = self.joint_compress(x) + o = torch.matmul(dim_space.permute(self.perm1), dim_seq.permute(self.perm2)) + Adj = self.expansor(o) + return Adj + + +class Domain_GCNN_layer(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + kernel_size, + stride, + time_dim, + joints_dim, + domain, + interpratable, + dropout, + bias=True): + + super(Domain_GCNN_layer, self).__init__() + self.kernel_size = kernel_size + assert self.kernel_size[0] % 2 == 1 + assert self.kernel_size[1] % 2 == 1 + padding = ((self.kernel_size[0] - 1) // 2, (self.kernel_size[1] - 1) // 2) + self.interpratable = interpratable + self.domain = domain + + self.gcn = ConvTemporalGraphical(time_dim, joints_dim, domain, interpratable) + self.tcn = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + (self.kernel_size[0], self.kernel_size[1]), + (stride, stride), + padding, + ), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + ) + + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + if self.interpratable: + self.map_to_adj = Map2Adj(in_ch, + time_dim, + joints_dim, + domain, + dropout, + ) + else: + self.map_to_adj = nn.Identity() + self.prelu = nn.PReLU() + + def forward(self, x): + # assert A.shape[0] == self.kernel_size[1], print(A.shape[0],self.kernel_size) + res = self.residual(x) + self.Adj = self.map_to_adj(x) + if self.interpratable: + self.gcn.A = self.Adj + x1 = self.gcn(x) + x2 = self.tcn(x1) + x3 = x2 + res + x4 = self.prelu(x3) + return x4 + + +# Dynamic SpatioTemporal Decompose Graph Convolutions (DSTD-GC) +class DSTD_GC(nn.Module): + """ + Shape: + - Input[0]: Input graph sequence in :math:`(N, in_ch, T_{in}, V)` format + - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format + - Output[0]: Outpu graph sequence in :math:`(N, out_ch, T_{out}, V)` format + where + :math:`N` is a batch size, + :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + : in_ch= dimension of coordinates + : out_ch=dimension of coordinates + + + """ + + def __init__(self, + in_ch, + out_ch, + interpratable, + kernel_size, + stride, + time_dim, + joints_dim, + reduction, + dropout): + super(DSTD_GC, self).__init__() + self.dsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "space", interpratable, dropout) + self.tsgn = Domain_GCNN_layer(in_ch, out_ch, kernel_size, stride, + time_dim, joints_dim, "time", interpratable, dropout) + + self.compressor = nn.Sequential(nn.Conv2d(out_ch * 2, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch), + nn.PReLU(), + SE.SELayer2d(out_ch, reduction=reduction), + ) + if stride != 1 or in_ch != out_ch: + self.residual = nn.Sequential(nn.Conv2d(in_ch, + out_ch, + kernel_size=1, + stride=(1, 1)), + nn.BatchNorm2d(out_ch), + ) + else: + self.residual = nn.Identity() + + # Weighting features + out_ch_c = out_ch // 2 if out_ch // 2 > 1 else 1 + self.global_norm = nn.BatchNorm2d(in_ch) + self.conv_s = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.conv_t = nn.Sequential(nn.Conv2d(in_ch, out_ch_c, (time_dim, 1), bias=False), + nn.BatchNorm2d(out_ch_c), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Conv2d(out_ch_c, out_ch, (1, joints_dim), bias=False), + nn.BatchNorm2d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map_s = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.map_t = nn.Sequential(nn.Linear(out_ch + 2 + time_dim * 2, out_ch, bias=False), + nn.BatchNorm1d(out_ch), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + nn.Linear(out_ch, out_ch, bias=False), + ) + self.prelu1 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + self.prelu2 = nn.Sequential(nn.BatchNorm2d(out_ch), + nn.PReLU(), + ) + + def _get_stats_(self, x): + global_avg_pool = x.mean((3, 2)).mean(1, keepdims=True) + global_avg_pool_features = x.mean(3).mean(1) + global_std_pool = x.std((3, 2)).std(1, keepdims=True) + global_std_pool_features = x.std(3).std(1) + return torch.cat(( + global_avg_pool, + global_avg_pool_features, + global_std_pool, + global_std_pool_features, + ), + dim=1) + + def forward(self, x): + b, dim, seq, joints = x.shape # 64, 3, 10, 22 + xn = self.global_norm(x) + + stats = self._get_stats_(xn) + w1 = torch.cat((self.conv_s(xn).view(b, -1), stats), dim=1) + stats = self._get_stats_(xn) + w2 = torch.cat((self.conv_t(xn).view(b, -1), stats), dim=1) + self.w1 = self.map_s(w1) + self.w2 = self.map_t(w2) + w1 = self.w1[..., None, None] + w2 = self.w2[..., None, None] + + x1 = self.dsgn(xn) + x2 = self.tsgn(xn) + out = torch.cat((self.prelu1(w1 * x1), self.prelu2(w2 * x2)), dim=1) + out = self.compressor(out) + return torch.clip(out + self.residual(xn), -1e5, 1e5) + + +class ContextLayer(nn.Module): + def __init__(self, + in_ch, + hidden_ch, + output_seq, + input_seq, + joints, + dims=3, + reduction=8, + dropout=0.1, + ): + super(ContextLayer, self).__init__() + self.n_output = output_seq + self.n_joints = joints + self.n_input = input_seq + self.context_conv1 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + + self.context_conv2 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, (input_seq, 1), bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.context_conv3 = nn.Sequential(nn.Conv2d(in_ch, hidden_ch, 1, bias=False), + nn.BatchNorm2d(hidden_ch), + nn.PReLU(), + ) + self.map1 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map2 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + self.map3 = nn.Sequential(nn.Linear(hidden_ch, self.n_output, bias=False), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fmap_s = nn.Sequential(nn.Linear(self.n_output * 3, self.n_joints, bias=False), + nn.BatchNorm1d(self.n_joints), + nn.Dropout(dropout, inplace=True), ) + + self.fmap_t = nn.Sequential(nn.Linear(self.n_output * 3, self.n_output, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), ) + + # inter_ch = self.n_joints # // 2 + self.norm_map = nn.Sequential(nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + SE.SELayer1d(self.n_output, reduction=reduction), + nn.Conv1d(self.n_output, self.n_output, 1, bias=False), + nn.BatchNorm1d(self.n_output), + nn.Dropout(dropout, inplace=True), + nn.PReLU(), + ) + + self.fconv = nn.Sequential(nn.Conv2d(1, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + nn.Conv2d(dims, dims, 1, bias=False), + nn.BatchNorm2d(dims), + nn.PReLU(), + ) + self.SE = SE.SELayer2d(self.n_output, reduction=reduction) + + def forward(self, x): + b, _, seq, joint_dim = x.shape + y1 = self.context_conv1(x).max(-1)[0].max(-1)[0] + y2 = self.context_conv2(x).view(b, -1, joint_dim).max(-1)[0] + ym = self.context_conv3(x).mean((2, 3)) + y = torch.cat((self.map1(y1), self.map2(y2), self.map3(ym)), dim=1) + self.joints = self.fmap_s(y) + self.displacements = self.fmap_t(y) # .cumsum(1) + self.seq_joints = torch.bmm(self.displacements.unsqueeze(2), self.joints.unsqueeze(1)) + self.seq_joints_n = self.norm_map(self.seq_joints) + self.seq_joints_dims = self.fconv(self.seq_joints_n.view(b, 1, self.n_output, self.n_joints)) + o = self.SE(self.seq_joints_dims.permute(0, 2, 3, 1)) + return o + + +class CISTGCN(nn.Module): + """ + Shape: + - Input[0]: Input sequence in :math:`(N, in_ch,T_in, V)` format + - Output[0]: Output sequence in :math:`(N,T_out,in_ch, V)` format + where + :math:`N` is a batch size, + :math:`T_{in}/T_{out}` is a length of input/output sequence, + :math:`V` is the number of graph nodes. + :in_ch=number of channels for the coordiantes(default=3) + + + """ + + def __init__(self, arch, learn): + super(CISTGCN, self).__init__() + self.clipping = arch.model_params.clipping + + self.n_input = arch.model_params.input_n + self.n_output = arch.model_params.output_n + self.n_joints = arch.model_params.joints + self.n_txcnn_layers = arch.model_params.n_txcnn_layers + self.txc_kernel_size = [arch.model_params.txc_kernel_size] * 2 + self.input_gcn = arch.model_params.input_gcn + self.output_gcn = arch.model_params.output_gcn + self.reduction = arch.model_params.reduction + self.hidden_dim = arch.model_params.hidden_dim + + self.st_gcnns = nn.ModuleList() + self.txcnns = nn.ModuleList() + self.se = nn.ModuleList() + + self.in_conv = nn.ModuleList() + self.context_layer = nn.ModuleList() + self.trans = nn.ModuleList() + self.in_ch = 10 + self.model_tx = self.input_gcn.model_complexity.copy() + self.model_tx.insert(0, 1) # add 1 in the position 0. + + self.input_gcn.model_complexity.insert(0, self.in_ch) + self.input_gcn.model_complexity.append(self.in_ch) + # self.input_gcn.interpretable.insert(0, True) + # self.input_gcn.interpretable.append(False) + for i in range(len(self.input_gcn.model_complexity) - 1): + self.st_gcnns.append(DSTD_GC(self.input_gcn.model_complexity[i], + self.input_gcn.model_complexity[i + 1], + self.input_gcn.interpretable[i], + [1, 1], 1, self.n_input, self.n_joints, self.reduction, learn.dropout)) + + self.context_layer = ContextLayer(1, self.hidden_dim, + self.n_output, self.n_output, self.n_joints, + 3, self.reduction, learn.dropout + ) + + # at this point, we must permute the dimensions of the gcn network, from (N,C,T,V) into (N,T,C,V) + # with kernel_size[3,3] the dimensions of C,V will be maintained + self.txcnns.append(FPN(self.n_input, self.n_output, self.txc_kernel_size, 0., self.reduction)) + for i in range(1, self.n_txcnn_layers): + self.txcnns.append(FPN(self.n_output, self.n_output, self.txc_kernel_size, 0., self.reduction)) + + self.prelus = nn.ModuleList() + for j in range(self.n_txcnn_layers): + self.prelus.append(nn.PReLU()) + + self.dim_conversor = nn.Sequential(nn.Conv2d(self.in_ch, 3, 1, bias=False), + nn.BatchNorm2d(3), + nn.PReLU(), + nn.Conv2d(3, 3, 1, bias=False), + nn.PReLU(3), ) + + self.st_gcnns_o = nn.ModuleList() + self.output_gcn.model_complexity.insert(0, 3) + for i in range(len(self.output_gcn.model_complexity) - 1): + self.st_gcnns_o.append(DSTD_GC(self.output_gcn.model_complexity[i], + self.output_gcn.model_complexity[i + 1], + self.output_gcn.interpretable[i], + [1, 1], 1, self.n_joints, self.n_output, self.reduction, learn.dropout)) + + self.st_gcnns_o.apply(self._init_weights) + self.st_gcnns.apply(self._init_weights) + self.txcnns.apply(self._init_weights) + + def _init_weights(self, m, gain=0.1): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform_(m.weight, gain=gain) + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # torch.nn.init.xavier_normal_(m.weight, gain=gain) + if isinstance(m, nn.PReLU): + torch.nn.init.constant_(m.weight, 0.25) + + def forward(self, x): + b, seq, joints, dim = x.shape + vel = torch.zeros_like(x) + vel[:, :-1] = torch.diff(x, dim=1) + vel[:, -1] = x[:, -1] + acc = torch.zeros_like(x) + acc[:, :-1] = torch.diff(vel, dim=1) + acc[:, -1] = vel[:, -1] + x1 = torch.cat((x, acc, vel, torch.norm(vel, dim=-1, keepdim=True)), dim=-1) + x2 = x1.permute((0, 3, 1, 2)) # (torch.Size([64, 10, 22, 7]) + x3 = x2 + + for i in range(len(self.st_gcnns)): + x3 = self.st_gcnns[i](x3) + + x5 = x3.permute(0, 2, 1, 3) # prepare the input for the Time-Extrapolator-CNN (NCTV->NTCV) + + x6 = self.prelus[0](self.txcnns[0](x5)) + for i in range(1, self.n_txcnn_layers): + x6 = self.prelus[i](self.txcnns[i](x6)) + x6 # residual connection + + x6 = self.dim_conversor(x6.permute(0, 2, 1, 3)).permute(0, 2, 3, 1) + x7 = x6.cumsum(1) + + act = self.context_layer(x7.reshape(b, 1, self.n_output, joints * x7.shape[-1])) + x8 = x7.permute(0, 3, 2, 1) + for i in range(len(self.st_gcnns_o)): + x8 = self.st_gcnns_o[i](x8) + x9 = x8.permute(0, 3, 2, 1) + act + + return x[:, -1:] + x9, diff --git a/h36m_detailed/short-400ms/32/files/short-STSGCN-20230105_1400-id6760_best.pth.tar b/h36m_detailed/short-400ms/32/files/short-STSGCN-20230105_1400-id6760_best.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..e66437e510bc5b29af69867ea9b63988363caa63 --- /dev/null +++ b/h36m_detailed/short-400ms/32/files/short-STSGCN-20230105_1400-id6760_best.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:565aa3f07715a52021a481065af53bf6b6f2e438a1fb8ea1cc5ea3ed0ccbd715 +size 6026705 diff --git a/h36m_detailed/short-400ms/32/files/short-STSGCN-20230105_1400-id6760_last.pth.tar b/h36m_detailed/short-400ms/32/files/short-STSGCN-20230105_1400-id6760_last.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..e66437e510bc5b29af69867ea9b63988363caa63 --- /dev/null +++ b/h36m_detailed/short-400ms/32/files/short-STSGCN-20230105_1400-id6760_last.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:565aa3f07715a52021a481065af53bf6b6f2e438a1fb8ea1cc5ea3ed0ccbd715 +size 6026705