HUANGYIFEI
commited on
add model.py
Browse files- Graph/GraphMAE_MQ9/model.py +90 -0
Graph/GraphMAE_MQ9/model.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import partial
|
2 |
+
import sys
|
3 |
+
|
4 |
+
sys.path.append("lib")
|
5 |
+
from lib.metrics import sce_loss
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import dgl.nn as dglnn
|
10 |
+
|
11 |
+
|
12 |
+
class GMae(nn.Module):
|
13 |
+
def __init__(self, encoder, decoder,
|
14 |
+
in_dim, hidden_dim, out_dim, mask_rate=0.3, replace_rate=0.1, alpha_l=2,
|
15 |
+
embedding_layer_classes=5, embedding_layer_dim=4):
|
16 |
+
super(GMae, self).__init__()
|
17 |
+
self.Z_embedding = nn.Embedding(embedding_layer_classes, embedding_layer_dim)
|
18 |
+
self.encoder = encoder
|
19 |
+
self.decoder = decoder
|
20 |
+
self.mask_rate = mask_rate
|
21 |
+
self.replace_rate = replace_rate
|
22 |
+
self.alpha_l = alpha_l
|
23 |
+
self.in_dim = in_dim
|
24 |
+
self.hidden_dim = hidden_dim
|
25 |
+
self.out_dim = out_dim
|
26 |
+
self.embedding_layer_classes = embedding_layer_classes
|
27 |
+
self.embedding_layer_dim = embedding_layer_dim
|
28 |
+
self.enc_mask_token = nn.Parameter(torch.zeros(1, in_dim))
|
29 |
+
self.criterion = partial(sce_loss, alpha=alpha_l)
|
30 |
+
self.encoder_to_decoder = nn.Linear(hidden_dim, hidden_dim, bias=False)
|
31 |
+
|
32 |
+
def encode_atom_index(self, Z_index):
|
33 |
+
return self.Z_embedding(Z_index)
|
34 |
+
|
35 |
+
def encoding_mask_noise(self, g, x, mask_rate=0.3):
|
36 |
+
num_nodes = g.num_nodes()
|
37 |
+
perm = torch.randperm(num_nodes, device=x.device)
|
38 |
+
# random masking
|
39 |
+
num_mask_nodes = int(mask_rate * num_nodes)
|
40 |
+
mask_nodes = perm[: num_mask_nodes]
|
41 |
+
keep_nodes = perm[num_mask_nodes:]
|
42 |
+
|
43 |
+
if self.replace_rate > 0:
|
44 |
+
num_noise_nodes = int(self.replace_rate * num_mask_nodes)
|
45 |
+
perm_mask = torch.randperm(num_mask_nodes, device=x.device)
|
46 |
+
token_nodes = mask_nodes[perm_mask[: int((1 - self.replace_rate) * num_mask_nodes)]]
|
47 |
+
noise_nodes = mask_nodes[perm_mask[-int(self.replace_rate * num_mask_nodes):]]
|
48 |
+
noise_to_be_chosen = torch.randperm(num_nodes, device=x.device)[:num_noise_nodes]
|
49 |
+
out_x = x.clone()
|
50 |
+
out_x[token_nodes] = 0.0
|
51 |
+
out_x[noise_nodes] = x[noise_to_be_chosen]
|
52 |
+
else:
|
53 |
+
out_x = x.clone()
|
54 |
+
token_nodes = mask_nodes
|
55 |
+
out_x[mask_nodes] = 0.0
|
56 |
+
|
57 |
+
out_x[token_nodes] += self.enc_mask_token
|
58 |
+
use_g = g.clone()
|
59 |
+
|
60 |
+
return use_g, out_x, (mask_nodes, keep_nodes)
|
61 |
+
|
62 |
+
def mask_attr_prediction(self, g, x):
|
63 |
+
use_g, use_x, (mask_nodes, keep_nodes) = self.encoding_mask_noise(g, x, self.mask_rate)
|
64 |
+
enc_rep = self.encoder(use_g, use_x)
|
65 |
+
# ---- attribute reconstruction ----
|
66 |
+
rep = self.encoder_to_decoder(enc_rep)
|
67 |
+
recon = self.decoder(use_g, rep)
|
68 |
+
x_init = x[mask_nodes]
|
69 |
+
x_rec = recon[mask_nodes]
|
70 |
+
loss = self.criterion(x_rec, x_init)
|
71 |
+
return loss
|
72 |
+
|
73 |
+
def embed(self, g, x):
|
74 |
+
rep = self.encoder(g, x)
|
75 |
+
return rep
|
76 |
+
|
77 |
+
|
78 |
+
class SimpleGnn(nn.Module):
|
79 |
+
def __init__(self, in_feats, hid_feats, out_feats):
|
80 |
+
super().__init__()
|
81 |
+
self.conv1 = dglnn.SAGEConv(
|
82 |
+
in_feats=in_feats, out_feats=hid_feats, aggregator_type="mean")
|
83 |
+
self.conv2 = dglnn.SAGEConv(
|
84 |
+
in_feats=hid_feats, out_feats=out_feats, aggregator_type="mean")
|
85 |
+
|
86 |
+
def forward(self, graph, inputs):
|
87 |
+
h = self.conv1(graph, inputs)
|
88 |
+
h = F.relu(h)
|
89 |
+
h = self.conv2(graph, h)
|
90 |
+
return h
|