File size: 1,467 Bytes
43c53fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import os
from tqdm import tqdm
import networkx as nx
import torch
from torch.utils.data import Dataset
atom_number_index_dict = {
1: 0, # H
6: 1, # C
7: 2, # N
8: 3, # O
9: 4 # F
}
atom_index_number_dict = {v: k for k, v in atom_number_index_dict.items()}
max_atom_number = max(atom_number_index_dict.keys())
def atom_number2index(atom_number):
return atom_number_index_dict[atom_number]
def atom_index2number(atom_index):
return atom_index_number_dict[atom_index]
class PreprocessedQM9Dataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.processed_data = []
if dataset is not None:
self._preprocess()
def _preprocess(self):
i = 0
for g, label in tqdm(self.dataset):
g.ndata["Z_index"] = torch.tensor([atom_number2index(z.item()) for z in g.ndata["Z"]])
g.ndata["sample_idx"] = i
self.processed_data.append((g, label))
def __len__(self):
return len(self.processed_data)
def __getitem__(self, idx):
return self.processed_data[idx]
def save_dataset(self, save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(self.processed_data, os.path.join(save_dir,"QM9_dataset_processed.pt"))
def load_dataset(self, dataset_path):
self.processed_data = torch.load(dataset_path) |