File size: 3,457 Bytes
43c53fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
from functools import partial
import sys

sys.path.append("lib")
from lib.metrics import sce_loss
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.nn as dglnn


class GMae(nn.Module):
    def __init__(self, encoder, decoder,

                 in_dim, hidden_dim, out_dim, mask_rate=0.3, replace_rate=0.1, alpha_l=2,

                 embedding_layer_classes=5, embedding_layer_dim=4):
        super(GMae, self).__init__()
        self.Z_embedding = nn.Embedding(embedding_layer_classes, embedding_layer_dim)
        self.encoder = encoder
        self.decoder = decoder
        self.mask_rate = mask_rate
        self.replace_rate = replace_rate
        self.alpha_l = alpha_l
        self.in_dim = in_dim
        self.hidden_dim = hidden_dim
        self.out_dim = out_dim
        self.embedding_layer_classes = embedding_layer_classes
        self.embedding_layer_dim = embedding_layer_dim
        self.enc_mask_token = nn.Parameter(torch.zeros(1, in_dim))
        self.criterion = partial(sce_loss, alpha=alpha_l)
        self.encoder_to_decoder = nn.Linear(hidden_dim, hidden_dim, bias=False)

    def encode_atom_index(self, Z_index):
        return self.Z_embedding(Z_index)

    def encoding_mask_noise(self, g, x, mask_rate=0.3):
        num_nodes = g.num_nodes()
        perm = torch.randperm(num_nodes, device=x.device)
        # random masking
        num_mask_nodes = int(mask_rate * num_nodes)
        mask_nodes = perm[: num_mask_nodes]
        keep_nodes = perm[num_mask_nodes:]

        if self.replace_rate > 0:
            num_noise_nodes = int(self.replace_rate * num_mask_nodes)
            perm_mask = torch.randperm(num_mask_nodes, device=x.device)
            token_nodes = mask_nodes[perm_mask[: int((1 - self.replace_rate) * num_mask_nodes)]]
            noise_nodes = mask_nodes[perm_mask[-int(self.replace_rate * num_mask_nodes):]]
            noise_to_be_chosen = torch.randperm(num_nodes, device=x.device)[:num_noise_nodes]
            out_x = x.clone()
            out_x[token_nodes] = 0.0
            out_x[noise_nodes] = x[noise_to_be_chosen]
        else:
            out_x = x.clone()
            token_nodes = mask_nodes
            out_x[mask_nodes] = 0.0

        out_x[token_nodes] += self.enc_mask_token
        use_g = g.clone()

        return use_g, out_x, (mask_nodes, keep_nodes)

    def mask_attr_prediction(self, g, x):
        use_g, use_x, (mask_nodes, keep_nodes) = self.encoding_mask_noise(g, x, self.mask_rate)
        enc_rep = self.encoder(use_g, use_x)
        # ---- attribute reconstruction ----
        rep = self.encoder_to_decoder(enc_rep)
        recon = self.decoder(use_g, rep)
        x_init = x[mask_nodes]
        x_rec = recon[mask_nodes]
        loss = self.criterion(x_rec, x_init)
        return loss

    def embed(self, g, x):
        rep = self.encoder(g, x)
        return rep


class SimpleGnn(nn.Module):
    def __init__(self, in_feats, hid_feats, out_feats):
        super().__init__()
        self.conv1 = dglnn.SAGEConv(
            in_feats=in_feats, out_feats=hid_feats, aggregator_type="mean")
        self.conv2 = dglnn.SAGEConv(
            in_feats=hid_feats, out_feats=out_feats, aggregator_type="mean")

    def forward(self, graph, inputs):
        h = self.conv1(graph, inputs)
        h = F.relu(h)
        h = self.conv2(graph, h)
        return h