trrt8 commited on
Commit
87bf904
1 Parent(s): 225f016

initial commit

Browse files
.gitattributes CHANGED
@@ -57,3 +57,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ davis.csv filter=lfs diff=lfs merge=lfs -text
61
+ kiba.csv filter=lfs diff=lfs merge=lfs -text
62
+ pdbbind-2020-refined.csv filter=lfs diff=lfs merge=lfs -text
63
+ bindingdb-ic50.csv filter=lfs diff=lfs merge=lfs -text
64
+ bindingdb-kd.csv filter=lfs diff=lfs merge=lfs -text
65
+ davis-filter.csv filter=lfs diff=lfs merge=lfs -text
66
+ bindingdb-ki.csv filter=lfs diff=lfs merge=lfs -text
67
+ glaser.csv filter=lfs diff=lfs merge=lfs -text
68
+ pdbbind-2020-combined.csv filter=lfs diff=lfs merge=lfs -text
bindingdb-ic50.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25a6488d678b3a1a2b83d78ab900fe5c49b96ad6b116efde158b5c5bdd39b557
3
+ size 899074544
bindingdb-kd.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e18aaba78a1e2c0e9584fbc831954312d65e02deca70ce01b7514b5004a4c216
3
+ size 57281302
bindingdb-ki.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8b08f64f5954c6189561014f802c103a86bb78d6644460e3f6f31697376a1f6
3
+ size 259031100
data_sources.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ davis-filter.csv: https://www.kaggle.com/datasets/christang0002/davis-and-kiba
2
+ bindingdb-ic50.csv: https://tdcommons.ai/ (tdc python package)
3
+ bindingdb-kd.csv: https://tdcommons.ai/ (tdc python package)
4
+ bindingdb-ki.csv: https://tdcommons.ai/ (tdc python package)
5
+ davis.csv: https://tdcommons.ai/ (tdc python package)
6
+ kiba.csv: https://tdcommons.ai/ (tdc python package)
7
+ pdbbind-2020-combined.csv: https://www.pdbbind.org.cn/
8
+ pdbbind-2020-refined.csv: https://www.pdbbind.org.cn/
9
+ glaser.csv: https://huggingface.co/datasets/jglaser/binding_affinity
davis-filter.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:484eaefff41f82de0f39811b589db7ed390061c952bc4199353d9e1c3700cfff
3
+ size 8948685
davis.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34f698180a386c92ecc160da984d9d2c31bf1d153cb26a2d91d8eab4d7ec5fd7
3
+ size 22441866
glaser.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73550c83f7fc9315bbcfa363304231f00f88dbd503222d241a47c8551ef65cd8
3
+ size 1433278852
kiba.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56ac9893cbdc98ddc137a7ef4f5a5f00756da6ec5792b7ce9b2ccd270e4244d2
3
+ size 102460723
pdbbind-2020-combined.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7221192655553cdfd07df4a71386aad31873bb8e0f27ec7fe26536a56a9df868
3
+ size 11802666
pdbbind-2020-refined.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46ef8fb491e31ca6b9e6256742300b14d0cc545d0ebdf83cdd097155f98bbd99
3
+ size 3025349
standardize_data.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from rdkit import Chem
3
+ from rdkit.Chem import MolToSmiles
4
+ from scipy.stats import zscore
5
+ from tqdm import tqdm
6
+ import numpy as np
7
+
8
+ def load_binding_affinity_dataset(csv_path,
9
+ protein_col_idx,
10
+ smiles_col_idx,
11
+ affinity_col_idx,
12
+ is_log10_affinity=True,
13
+ canonicalize_smiles=True,
14
+ affinity_unit="uM",
15
+ delimiter=','):
16
+ """
17
+ Load a protein-ligand binding affinity dataset and preprocess it.
18
+
19
+ Args:
20
+ csv_path (str): Path to the CSV file.
21
+ protein_col_idx (int): Column index containing protein sequences.
22
+ smiles_col_idx (int): Column index containing molecule SMILES.
23
+ affinity_col_idx (int): Column index containing binding affinities.
24
+ is_log10_affinity (bool): Whether affinities are in log10. Default is True.
25
+ canonicalize_smiles (bool): Whether to canonicalize SMILES. Default is True.
26
+ delimiter (str): Delimiter for the CSV file. Default is ','.
27
+
28
+ Returns:
29
+ pd.DataFrame: Processed DataFrame with columns "seq", "smiles_can",
30
+ "affinity_uM", "neg_log10_affinity_M", and "affinity_norm".
31
+ """
32
+ # Load dataset
33
+ df = pd.read_csv(csv_path, delimiter=delimiter)
34
+
35
+ # Extract relevant columns
36
+ df = df.iloc[:, [protein_col_idx, smiles_col_idx, affinity_col_idx]]
37
+ df.columns = ["seq", "smiles", "affinity"]
38
+
39
+ # Canonicalize SMILES
40
+ if canonicalize_smiles:
41
+ def canonicalize(smiles):
42
+ try:
43
+ mol = Chem.MolFromSmiles(smiles)
44
+ return MolToSmiles(mol, canonical=True) if mol else None
45
+ except:
46
+ return None
47
+
48
+ from tqdm import tqdm
49
+ tqdm.pandas()
50
+ df["smiles_can"] = df["smiles"].progress_apply(canonicalize)
51
+ df = df[df["smiles_can"].notna()]
52
+ else:
53
+ df["smiles_can"] = df["smiles"]
54
+
55
+ # Process affinities
56
+ if not is_log10_affinity:
57
+ # Convert plain Kd value to neg log10(M)
58
+ df["affinity_uM"] = df["affinity"]/(1e3 if affinity_unit == "nM" else 1)
59
+
60
+ df["neg_log10_affinity_M"] = -df["affinity_uM"].apply(lambda x: np.log10(x/1e6) if x > 0 else np.nan)
61
+ else:
62
+ # Convert log10 values to plain uM for clarity
63
+ df["neg_log10_affinity_M"] = df["affinity"]
64
+ df["affinity_uM"] = df["neg_log10_affinity_M"].apply(lambda x: (10**(-x))*1e6)
65
+
66
+ df.dropna(inplace=True)
67
+
68
+ # Z-score normalization
69
+ df["affinity_norm"] = zscore(df["neg_log10_affinity_M"])
70
+
71
+ # Select and reorder columns
72
+ df = df[["seq", "smiles_can", "affinity_uM", "neg_log10_affinity_M", "affinity_norm"]]
73
+
74
+ # Add normalization parameters as columns for reference
75
+ df["affinity_mean"] = df["neg_log10_affinity_M"].mean()
76
+ df["affinity_std"] = df["neg_log10_affinity_M"].std()
77
+
78
+ return df.sort_values(by="affinity_norm", ascending=False)
79
+
80
+ dataset = load_binding_affinity_dataset(
81
+ csv_path="data/raw_data/bindingdb_ic50.csv",
82
+ protein_col_idx=3,
83
+ smiles_col_idx=1,
84
+ affinity_col_idx=4,
85
+ is_log10_affinity=False, # Specify if Kd values are plain
86
+ canonicalize_smiles=True,
87
+ affinity_unit="nM",
88
+ delimiter=","
89
+ )
90
+ dataset.to_csv("data/bindingdb-ic50.csv", index=False)
91
+
92
+ # View processed dataset
93
+ print(dataset.head())
94
+
95
+
96
+ # Code for loading tdc data:
97
+ # import pandas as pd
98
+ # from rdkit import Chem
99
+ # from tdc.multi_pred import DTI
100
+
101
+ # def process_dataset(name):
102
+ # data = DTI(name=name)
103
+ # data.harmonize_affinities(mode='mean')
104
+ # data.convert_to_log()
105
+ # df = data.get_data()
106
+ # df['smiles_can'] = df['Drug'].apply(lambda s: Chem.MolToSmiles(Chem.MolFromSmiles(s), isomericSmiles=True, canonical=True) if Chem.MolFromSmiles(s) else None)
107
+ # return df[['smiles_can', 'Target', 'Y']].dropna(subset=['smiles_can']).rename(columns={'Target': 'seq', 'Y': 'neg_log_10_affinity'})
108
+
109
+ # datasets = ['BindingDB_Ki', 'BindingDB_Kd', 'BindingDB_IC50', 'DAVIS', 'KIBA']
110
+ # processed_data = [process_dataset(name) for name in datasets]
111
+
112
+ # binding_db = pd.concat(processed_data[:3]).drop_duplicates().reset_index(drop=True)
113
+ # binding_db.to_csv("data/bindingdb.csv", index=False)
114
+ # processed_data[3].to_csv("data/davis.csv", index=False)
115
+ # processed_data[4].to_csv("data/kiba.csv", index=False)
116
+
117
+ # code for loading pdbbind data:
118
+ # import os
119
+ # from pathlib import Path
120
+ # from Bio import PDB
121
+ # from Bio.PDB.Polypeptide import PPBuilder
122
+ # from rdkit import Chem
123
+ # from rdkit.Chem import AllChem
124
+ # import pandas as pd
125
+ # from tqdm import tqdm
126
+
127
+ # ppb = PPBuilder()
128
+
129
+ # def get_protein_sequence(structure):
130
+ # """Extract protein sequence from a PDB structure."""
131
+ # sequence = ""
132
+ # for pp in ppb.build_peptides(structure):
133
+ # sequence += str(pp.get_sequence())
134
+ # return sequence
135
+
136
+ # def get_canonical_smiles(mol):
137
+ # """Convert RDKit molecule to canonical SMILES."""
138
+ # return Chem.MolToSmiles(mol, isomericSmiles=True, canonical=True)
139
+
140
+ # def process_pdbbind_data(pdbbind_dir, index_file):
141
+ # pdbbind_dir = Path(pdbbind_dir).expanduser()
142
+ # parser = PDB.PDBParser(QUIET=True)
143
+ # data = []
144
+
145
+ # # Read the index file
146
+ # df_index = pd.read_csv(index_file, sep='\s+', header=None, comment= "#", usecols=[0,1,2,3,4,6,7],
147
+ # names=['PDB_ID', 'Resolution', 'Release_Year', '-logKd/Ki', 'Kd/Ki', 'Reference', 'Ligand_Name'])
148
+
149
+ # # Get the total number of entries for progress tracking
150
+ # total_entries = len(df_index)
151
+
152
+ # # Use tqdm for progress tracking
153
+ # with tqdm(total=total_entries, desc="Processing PDBbind data") as pbar:
154
+ # for _, row in df_index.iterrows():
155
+ # pdb_id = row['PDB_ID']
156
+ # subdir = pdbbind_dir / pdb_id
157
+
158
+ # if subdir.is_dir():
159
+ # # Process protein
160
+ # protein_file = subdir / f"{pdb_id}_protein.pdb"
161
+ # if protein_file.exists():
162
+ # structure = parser.get_structure(pdb_id, protein_file)
163
+ # sequence = get_protein_sequence(structure)
164
+
165
+ # # Process ligand
166
+ # ligand_file = subdir / f"{pdb_id}_ligand.mol2"
167
+ # if ligand_file.exists():
168
+ # mol = Chem.MolFromMol2File(str(ligand_file))
169
+ # if mol is not None:
170
+ # smiles = get_canonical_smiles(mol)
171
+
172
+ # # Get binding affinity
173
+ # neg_log_10_affinity = row['-logKd/Ki']
174
+
175
+ # # Add to data list
176
+ # data.append({
177
+ # 'smiles_can': smiles,
178
+ # 'seq': sequence,
179
+ # 'neg_log_10_affinity_M': neg_log_10_affinity
180
+ # })
181
+
182
+ # pbar.update(1)
183
+
184
+ # return pd.DataFrame(data)
185
+
186
+ # # Process data from PDBbind refined set
187
+ # pdbbind_refined_dir = "~/Data/PDBBind/PDBbind_v2020_refined"
188
+ # index_refined_file = "/home/tyler/Data/PDBBind/index/INDEX_refined_data.2020"
189
+ # df_refined = process_pdbbind_data(pdbbind_refined_dir, index_refined_file)
190
+
191
+ # # Process data from PDBbind general set
192
+ # pdbbind_general_dir = "~/Data/PDBBind/PDBbind_v2020_other_PL"
193
+ # index_general_file = "/home/tyler/Data/PDBBind/index/INDEX_general_PL_data.2020"
194
+ # df_general = process_pdbbind_data(pdbbind_general_dir, index_general_file)
195
+
196
+ # # Combine dataframes
197
+ # df_combined = pd.concat([df_refined, df_general], ignore_index=True)
198
+
199
+ # # Remove duplicates (if any) and reset index
200
+ # df_combined = df_combined.drop_duplicates().reset_index(drop=True)
201
+
202
+ # # Save to CSV
203
+ # output_file = "data/pdbbind_2020_combined.csv"
204
+ # df_combined.to_csv(output_file, index=False)
205
+ # print(f"Saved {len(df_combined)} entries to {output_file}")