HUANGYIFEI
commited on
Delete .virtual_documents
Browse files
.virtual_documents/DataInspect.ipynb
DELETED
@@ -1,237 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import time
|
3 |
-
from rdkit import Chem
|
4 |
-
from rdkit import RDLogger;
|
5 |
-
from torch.utils.data import Dataset
|
6 |
-
import torch.nn.functional as F
|
7 |
-
from tqdm import tqdm
|
8 |
-
RDLogger.DisableLog('rdApp.*')
|
9 |
-
import torch
|
10 |
-
import torch.nn as nn
|
11 |
-
import torch.optim as optim
|
12 |
-
import pickle
|
13 |
-
import numpy as np
|
14 |
-
import matplotlib.pyplot as plt
|
15 |
-
import math
|
16 |
-
import dgl
|
17 |
-
import networkx as nx
|
18 |
-
|
19 |
-
|
20 |
-
atom_number_index_dict ={
|
21 |
-
1:0, # H
|
22 |
-
6:1, # C
|
23 |
-
7:2, # N
|
24 |
-
8:3, # O
|
25 |
-
9:4 # F
|
26 |
-
}
|
27 |
-
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
28 |
-
atom_index_number_dict = {v: k for k, v in atom_number_index_dict.items()}
|
29 |
-
max_atom_number = max(atom_number_index_dict.keys())
|
30 |
-
atom_number2index_tensor = torch.full((max_atom_number + 1,), -1)
|
31 |
-
for k, v in atom_number_index_dict.items():
|
32 |
-
atom_number2index_tensor[k] = v
|
33 |
-
|
34 |
-
atom_index2number_tensor = torch.tensor([atom_index_number_dict[i] for i in range(len(atom_index_number_dict))])
|
35 |
-
def atom_number2index(atom_numbers):
|
36 |
-
return atom_number2index_tensor[atom_numbers]
|
37 |
-
def atom_index2number(atom_indexes):
|
38 |
-
return atom_index2number_tensor[atom_indexes]
|
39 |
-
|
40 |
-
|
41 |
-
from dgl.data import QM9Dataset
|
42 |
-
from torch.utils.data import SubsetRandomSampler
|
43 |
-
from dgl.dataloading import GraphDataLoader
|
44 |
-
from multiprocessing import Pool
|
45 |
-
|
46 |
-
dataset = QM9Dataset(label_keys=['mu', 'gap'], cutoff=5.0)
|
47 |
-
dataset_length = len(dataset)
|
48 |
-
train_idx = torch.arange(dataset_length)
|
49 |
-
# def preprocess_graph(data):
|
50 |
-
# g, label = data
|
51 |
-
# g.ndata["Z_index"] = atom_number2index(g.ndata["Z"])
|
52 |
-
# return g, label
|
53 |
-
|
54 |
-
# def preprocess_dataset(dataset):
|
55 |
-
# with Pool(processes=4) as pool: # 设置进程数
|
56 |
-
# with tqdm(total=len(dataset)) as pbar: # 初始化进度条
|
57 |
-
# results = []
|
58 |
-
# for result in pool.imap(preprocess_graph, dataset): # 使用 imap 逐步处理
|
59 |
-
# results.append(result)
|
60 |
-
# pbar.update(1) # 更新进度条
|
61 |
-
# return results
|
62 |
-
|
63 |
-
# # 使用多进程预处理数据集
|
64 |
-
# dataset = preprocess_dataset(dataset)
|
65 |
-
|
66 |
-
# def collate_fn(batch):
|
67 |
-
# # print(batch)
|
68 |
-
# graphs, labels = map(list, zip(*batch))
|
69 |
-
# for g in graphs:
|
70 |
-
# pass
|
71 |
-
# # g.ndata["Z_index"] = atom_number2index(g.ndata["Z"])
|
72 |
-
# # g.ndata["R"]->the coordinates of each atom[num_nodes,3], g.ndata["Z"]->the atomic number(H:1,C:6) [num_nodes]
|
73 |
-
# # g.ndata["Z_index"] = torch.tensor([atom_number2index(z.item()) for z in g.ndata["Z"]])
|
74 |
-
# batched_graph = dgl.batch(graphs)
|
75 |
-
# return batched_graph, torch.stack(labels)
|
76 |
-
myGLoader = GraphDataLoader(dataset,batch_size=32,pin_memory=True,num_workers=8)
|
77 |
-
|
78 |
-
|
79 |
-
# for batch in tqdm(myGLoader):
|
80 |
-
# pass
|
81 |
-
# # print(batch)
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
from functools import partial
|
86 |
-
import sys
|
87 |
-
sys.path.append("lib")
|
88 |
-
from lib.metrics import sce_loss
|
89 |
-
|
90 |
-
class GMae(nn.Module):
|
91 |
-
def __init__(self, encoder,decoder,
|
92 |
-
in_dim,hidden_dim,out_dim,mask_rate=0.3,replace_rate=0.1,alpha_l=2,
|
93 |
-
embedding_layer_classes=5,embedding_layer_dim=4):
|
94 |
-
super(GMae, self).__init__()
|
95 |
-
self.Z_embedding = nn.Embedding(embedding_layer_classes,embedding_layer_dim)
|
96 |
-
self.encoder = encoder
|
97 |
-
self.decoder = decoder
|
98 |
-
self.mask_rate = mask_rate
|
99 |
-
self.replace_rate = replace_rate
|
100 |
-
self.alpha_l = alpha_l
|
101 |
-
self.in_dim = in_dim
|
102 |
-
self.hidden_dim = hidden_dim
|
103 |
-
self.out_dim = out_dim
|
104 |
-
self.embedding_layer_classes = embedding_layer_classes
|
105 |
-
self.embedding_layer_dim = embedding_layer_dim
|
106 |
-
self.enc_mask_token = nn.Parameter(torch.zeros(1,in_dim))
|
107 |
-
self.criterion = partial(sce_loss, alpha=alpha_l)
|
108 |
-
self.encoder_to_decoder = nn.Linear(hidden_dim, hidden_dim, bias=False)
|
109 |
-
def encode_atom_index(self,Z_index):
|
110 |
-
return self.Z_embedding(Z_index)
|
111 |
-
def encoding_mask_noise(self, g, x, mask_rate=0.3):
|
112 |
-
num_nodes = g.num_nodes()
|
113 |
-
perm = torch.randperm(num_nodes, device=x.device)
|
114 |
-
# random masking
|
115 |
-
num_mask_nodes = int(mask_rate * num_nodes)
|
116 |
-
mask_nodes = perm[: num_mask_nodes]
|
117 |
-
keep_nodes = perm[num_mask_nodes: ]
|
118 |
-
|
119 |
-
if self.replace_rate > 0:
|
120 |
-
num_noise_nodes = int(self.replace_rate * num_mask_nodes)
|
121 |
-
perm_mask = torch.randperm(num_mask_nodes, device=x.device)
|
122 |
-
token_nodes = mask_nodes[perm_mask[: int((1-self.replace_rate) * num_mask_nodes)]]
|
123 |
-
noise_nodes = mask_nodes[perm_mask[-int(self.replace_rate * num_mask_nodes):]]
|
124 |
-
noise_to_be_chosen = torch.randperm(num_nodes, device=x.device)[:num_noise_nodes]
|
125 |
-
out_x = x.clone()
|
126 |
-
out_x[token_nodes] = 0.0
|
127 |
-
out_x[noise_nodes] = x[noise_to_be_chosen]
|
128 |
-
else:
|
129 |
-
out_x = x.clone()
|
130 |
-
token_nodes = mask_nodes
|
131 |
-
out_x[mask_nodes] = 0.0
|
132 |
-
|
133 |
-
out_x[token_nodes] += self.enc_mask_token
|
134 |
-
use_g = g.clone()
|
135 |
-
|
136 |
-
return use_g, out_x, (mask_nodes, keep_nodes)
|
137 |
-
def mask_attr_prediction(self, g, x):
|
138 |
-
use_g, use_x, (mask_nodes, keep_nodes) = self.encoding_mask_noise(g, x, self.mask_rate)
|
139 |
-
enc_rep = self.encoder(use_g, use_x)
|
140 |
-
# ---- attribute reconstruction ----
|
141 |
-
rep = self.encoder_to_decoder(enc_rep)
|
142 |
-
recon = self.decoder(use_g, rep)
|
143 |
-
x_init = x[mask_nodes]
|
144 |
-
x_rec = recon[mask_nodes]
|
145 |
-
loss = self.criterion(x_rec, x_init)
|
146 |
-
return loss
|
147 |
-
|
148 |
-
def embed(self, g, x):
|
149 |
-
rep = self.encoder(g, x)
|
150 |
-
return rep
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
import dgl.nn as dglnn
|
155 |
-
import torch.nn as nn
|
156 |
-
import torch.nn.functional as F
|
157 |
-
class SimpleGNN(nn.Module):
|
158 |
-
def __init__(self, in_feats, hid_feats, out_feats):
|
159 |
-
super().__init__()
|
160 |
-
self.conv1 = dglnn.SAGEConv(
|
161 |
-
in_feats=in_feats, out_feats=hid_feats,aggregator_type="mean")
|
162 |
-
self.conv2 = dglnn.SAGEConv(
|
163 |
-
in_feats=hid_feats, out_feats=out_feats,aggregator_type="mean")
|
164 |
-
|
165 |
-
def forward(self, graph, inputs):
|
166 |
-
# 输入是节点的特征
|
167 |
-
h = self.conv1(graph, inputs)
|
168 |
-
h = F.relu(h)
|
169 |
-
h = self.conv2(graph, h)
|
170 |
-
return h
|
171 |
-
|
172 |
-
|
173 |
-
sage_enc = SimpleGNN(in_feats=7,hid_feats=4,out_feats=4)
|
174 |
-
sage_dec = SimpleGNN(in_feats=4,hid_feats=4,out_feats=7)
|
175 |
-
gmae = GMae(sage_enc,sage_dec,7,4,7,replace_rate=0)
|
176 |
-
epoches = 20
|
177 |
-
optimizer = optim.Adam(gmae.parameters(), lr=1e-3)
|
178 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
179 |
-
|
180 |
-
|
181 |
-
print(f"epoch {0} started!")
|
182 |
-
gmae.train()
|
183 |
-
gmae.encoder.train()
|
184 |
-
gmae.decoder.train()
|
185 |
-
gmae.to(device)
|
186 |
-
loss_epoch = 0
|
187 |
-
import os
|
188 |
-
os.environ["CUDA_LAUNCH_BLOCKING"]="1"
|
189 |
-
for batch in tqdm(myGLoader):
|
190 |
-
optimizer.zero_grad()
|
191 |
-
batch_g, _ = batch
|
192 |
-
R = batch_g.ndata["R"].to(device)
|
193 |
-
Z_index = batch_g.ndata["Z"].to(device)
|
194 |
-
Z_emb = gmae.encode_atom_index(Z_index)
|
195 |
-
# feat = torch.cat([R,Z_emb],dim=1)
|
196 |
-
# batch_g = batch_g.to(device)
|
197 |
-
# loss = gmae.mask_attr_prediction(batch_g, feat)
|
198 |
-
# loss.backward()
|
199 |
-
# optimizer.step()
|
200 |
-
# loss_epoch+=loss.item()
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
from datetime import datetime
|
205 |
-
|
206 |
-
current_time = datetime.now().strftime("%m-%d@%H_%M")
|
207 |
-
best_loss = 10000
|
208 |
-
for epoch in range(epoches):
|
209 |
-
print(f"epoch {epoch} started!")
|
210 |
-
gmae.train()
|
211 |
-
gmae.encoder.train()
|
212 |
-
gmae.decoder.train()
|
213 |
-
gmae.to(device)
|
214 |
-
loss_epoch = 0
|
215 |
-
for batch in myGLoader:
|
216 |
-
optimizer.zero_grad()
|
217 |
-
batch_g, _ = batch
|
218 |
-
R = batch_g.ndata["R"].to(device)
|
219 |
-
# Z_index = batch_g.ndata["Z_index"].to(device)
|
220 |
-
Z_index = batch_g.ndata["Z_index"].to(device)
|
221 |
-
Z_emb = gmae.encode_atom_index(Z_index)
|
222 |
-
feat = torch.cat([R,Z_emb],dim=1)
|
223 |
-
batch_g = batch_g.to(device)
|
224 |
-
loss = gmae.mask_attr_prediction(batch_g, feat)
|
225 |
-
loss.backward()
|
226 |
-
optimizer.step()
|
227 |
-
loss_epoch+=loss.item()
|
228 |
-
if loss_epoch < best_loss:
|
229 |
-
formatted_loss_epoch = f"{loss_epoch:.3f}"
|
230 |
-
save_path = f"./experiments/consumption/gmae/{current_time}/gmae_epoch-{epoch}-{formatted_loss_epoch}.pt"
|
231 |
-
save_dir = os.path.dirname(save_path)
|
232 |
-
if not os.path.exists(save_dir):
|
233 |
-
os.makedirs(save_dir,exist_ok=True)
|
234 |
-
torch.save(gmae.state_dict(), save_path)
|
235 |
-
best_loss = loss_epoch
|
236 |
-
print(f"best model saved-loss:{formatted_loss_epoch}-save_path:{save_path}")
|
237 |
-
print(f"epoch {epoch}: loss {loss_epoch}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|