File size: 8,475 Bytes
e3d777b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
import os
import time
from rdkit import Chem
from rdkit import RDLogger;
from torch.utils.data import Dataset
import torch.nn.functional as F
from tqdm import tqdm
RDLogger.DisableLog('rdApp.*')
import torch
import torch.nn as nn
import torch.optim as optim
import pickle
import numpy as np
import matplotlib.pyplot as plt
import math
import dgl
import networkx as nx


atom_number_index_dict ={
    1:0, # H
    6:1, # C
    7:2, # N
    8:3, # O
    9:4  # F
} 
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
atom_index_number_dict = {v: k for k, v in atom_number_index_dict.items()}
max_atom_number = max(atom_number_index_dict.keys())
atom_number2index_tensor = torch.full((max_atom_number + 1,), -1)
for k, v in atom_number_index_dict.items():
    atom_number2index_tensor[k] = v

atom_index2number_tensor = torch.tensor([atom_index_number_dict[i] for i in range(len(atom_index_number_dict))])
def atom_number2index(atom_numbers):
    return atom_number2index_tensor[atom_numbers]
def atom_index2number(atom_indexes):
    return atom_index2number_tensor[atom_indexes]


from dgl.data import QM9Dataset
from torch.utils.data import SubsetRandomSampler
from dgl.dataloading import GraphDataLoader
from multiprocessing import Pool

dataset = QM9Dataset(label_keys=['mu', 'gap'], cutoff=5.0)
dataset_length = len(dataset)
train_idx = torch.arange(dataset_length)
# def preprocess_graph(data):
#     g, label = data
#     g.ndata["Z_index"] = atom_number2index(g.ndata["Z"])
#     return g, label

# def preprocess_dataset(dataset):
#     with Pool(processes=4) as pool:  # 设置进程数
#         with tqdm(total=len(dataset)) as pbar:  # 初始化进度条
#             results = []
#             for result in pool.imap(preprocess_graph, dataset):  # 使用 imap 逐步处理
#                 results.append(result)
#                 pbar.update(1)  # 更新进度条
#     return results

# # 使用多进程预处理数据集
# dataset = preprocess_dataset(dataset)

# def collate_fn(batch):
#     # print(batch)
#     graphs, labels = map(list, zip(*batch))
#     for g in graphs:
#         pass
#         # g.ndata["Z_index"] = atom_number2index(g.ndata["Z"])
#         # g.ndata["R"]->the coordinates of each atom[num_nodes,3], g.ndata["Z"]->the atomic number(H:1,C:6) [num_nodes]
#         # g.ndata["Z_index"] = torch.tensor([atom_number2index(z.item()) for z in g.ndata["Z"]])
#     batched_graph = dgl.batch(graphs)
#     return batched_graph, torch.stack(labels)
myGLoader = GraphDataLoader(dataset,batch_size=32,pin_memory=True,num_workers=8)


# for batch in tqdm(myGLoader):
#     pass
#     # print(batch)
    


from functools import partial
import sys
sys.path.append("lib")
from lib.metrics import sce_loss

class GMae(nn.Module):
    def __init__(self, encoder,decoder,
                 in_dim,hidden_dim,out_dim,mask_rate=0.3,replace_rate=0.1,alpha_l=2,
                 embedding_layer_classes=5,embedding_layer_dim=4):
        super(GMae, self).__init__()
        self.Z_embedding = nn.Embedding(embedding_layer_classes,embedding_layer_dim)
        self.encoder = encoder
        self.decoder = decoder
        self.mask_rate = mask_rate
        self.replace_rate = replace_rate
        self.alpha_l = alpha_l
        self.in_dim = in_dim
        self.hidden_dim = hidden_dim
        self.out_dim = out_dim
        self.embedding_layer_classes = embedding_layer_classes
        self.embedding_layer_dim = embedding_layer_dim
        self.enc_mask_token = nn.Parameter(torch.zeros(1,in_dim))
        self.criterion = partial(sce_loss, alpha=alpha_l)
        self.encoder_to_decoder = nn.Linear(hidden_dim, hidden_dim, bias=False)
    def encode_atom_index(self,Z_index):
        return self.Z_embedding(Z_index)
    def encoding_mask_noise(self, g, x, mask_rate=0.3):
        num_nodes = g.num_nodes()
        perm = torch.randperm(num_nodes, device=x.device)
        # random masking
        num_mask_nodes = int(mask_rate * num_nodes)
        mask_nodes = perm[: num_mask_nodes]
        keep_nodes = perm[num_mask_nodes: ]

        if self.replace_rate > 0:
            num_noise_nodes = int(self.replace_rate * num_mask_nodes)
            perm_mask = torch.randperm(num_mask_nodes, device=x.device)
            token_nodes = mask_nodes[perm_mask[: int((1-self.replace_rate) * num_mask_nodes)]]
            noise_nodes = mask_nodes[perm_mask[-int(self.replace_rate * num_mask_nodes):]]
            noise_to_be_chosen = torch.randperm(num_nodes, device=x.device)[:num_noise_nodes]
            out_x = x.clone()
            out_x[token_nodes] = 0.0
            out_x[noise_nodes] = x[noise_to_be_chosen]
        else:
            out_x = x.clone()
            token_nodes = mask_nodes
            out_x[mask_nodes] = 0.0

        out_x[token_nodes] += self.enc_mask_token
        use_g = g.clone()

        return use_g, out_x, (mask_nodes, keep_nodes)    
    def mask_attr_prediction(self, g, x):
        use_g, use_x, (mask_nodes, keep_nodes) = self.encoding_mask_noise(g, x, self.mask_rate)
        enc_rep = self.encoder(use_g, use_x)
        # ---- attribute reconstruction ----
        rep = self.encoder_to_decoder(enc_rep)
        recon = self.decoder(use_g, rep)
        x_init = x[mask_nodes]
        x_rec = recon[mask_nodes]
        loss = self.criterion(x_rec, x_init)
        return loss

    def embed(self, g, x):
        rep = self.encoder(g, x)
        return rep
    


import dgl.nn as dglnn
import torch.nn as nn
import torch.nn.functional as F
class SimpleGNN(nn.Module):
    def __init__(self, in_feats, hid_feats, out_feats):
        super().__init__()
        self.conv1 = dglnn.SAGEConv(
            in_feats=in_feats, out_feats=hid_feats,aggregator_type="mean")
        self.conv2 = dglnn.SAGEConv(
            in_feats=hid_feats, out_feats=out_feats,aggregator_type="mean")

    def forward(self, graph, inputs):
        # 输入是节点的特征
        h = self.conv1(graph, inputs)
        h = F.relu(h)
        h = self.conv2(graph, h)
        return h


sage_enc = SimpleGNN(in_feats=7,hid_feats=4,out_feats=4)
sage_dec = SimpleGNN(in_feats=4,hid_feats=4,out_feats=7)
gmae = GMae(sage_enc,sage_dec,7,4,7,replace_rate=0)
epoches = 20
optimizer = optim.Adam(gmae.parameters(), lr=1e-3)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


print(f"epoch {0} started!")
gmae.train()
gmae.encoder.train()
gmae.decoder.train()
gmae.to(device)
loss_epoch = 0
import os
os.environ["CUDA_LAUNCH_BLOCKING"]="1"
for batch in tqdm(myGLoader):
    optimizer.zero_grad()
    batch_g, _ = batch
    R = batch_g.ndata["R"].to(device)
    Z_index = batch_g.ndata["Z"].to(device)
    Z_emb = gmae.encode_atom_index(Z_index)
    # feat = torch.cat([R,Z_emb],dim=1)
    # batch_g = batch_g.to(device)
    # loss = gmae.mask_attr_prediction(batch_g, feat)
    # loss.backward()
    # optimizer.step()
    # loss_epoch+=loss.item()



from datetime import datetime

current_time = datetime.now().strftime("%m-%d@%H_%M")
best_loss = 10000
for epoch in range(epoches):
    print(f"epoch {epoch} started!")
    gmae.train()
    gmae.encoder.train()
    gmae.decoder.train()
    gmae.to(device)
    loss_epoch = 0
    for batch in myGLoader:
        optimizer.zero_grad()
        batch_g, _ = batch
        R = batch_g.ndata["R"].to(device)
        # Z_index = batch_g.ndata["Z_index"].to(device)
        Z_index = batch_g.ndata["Z_index"].to(device)
        Z_emb = gmae.encode_atom_index(Z_index)
        feat = torch.cat([R,Z_emb],dim=1)
        batch_g = batch_g.to(device)
        loss = gmae.mask_attr_prediction(batch_g, feat)
        loss.backward()
        optimizer.step()
        loss_epoch+=loss.item()
    if loss_epoch < best_loss:
        formatted_loss_epoch = f"{loss_epoch:.3f}"
        save_path = f"./experiments/consumption/gmae/{current_time}/gmae_epoch-{epoch}-{formatted_loss_epoch}.pt"
        save_dir = os.path.dirname(save_path)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir,exist_ok=True)
        torch.save(gmae.state_dict(), save_path)
        best_loss = loss_epoch
        print(f"best model saved-loss:{formatted_loss_epoch}-save_path:{save_path}")
    print(f"epoch {epoch}: loss {loss_epoch}")