trrt8 commited on
Commit
5ee44a9
1 Parent(s): ef2a92c

convert to parquet

Browse files
.gitattributes CHANGED
@@ -69,3 +69,13 @@ pdbbind-2020-combined.csv filter=lfs diff=lfs merge=lfs -text
69
  affinity-data-combined.parquet filter=lfs diff=lfs merge=lfs -text
70
  bindingdb-kd-filtered.csv filter=lfs diff=lfs merge=lfs -text
71
  davis-filtered.csv filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
69
  affinity-data-combined.parquet filter=lfs diff=lfs merge=lfs -text
70
  bindingdb-kd-filtered.csv filter=lfs diff=lfs merge=lfs -text
71
  davis-filtered.csv filter=lfs diff=lfs merge=lfs -text
72
+ bindingdb-kd.parquet filter=lfs diff=lfs merge=lfs -text
73
+ bindingdb-ki.parquet filter=lfs diff=lfs merge=lfs -text
74
+ davis-filtered.parquet filter=lfs diff=lfs merge=lfs -text
75
+ pdbbind-2020-combined.parquet filter=lfs diff=lfs merge=lfs -text
76
+ pdbbind-2020-refined.parquet filter=lfs diff=lfs merge=lfs -text
77
+ bindingdb-ic50.parquet filter=lfs diff=lfs merge=lfs -text
78
+ davis.parquet filter=lfs diff=lfs merge=lfs -text
79
+ glaser.parquet filter=lfs diff=lfs merge=lfs -text
80
+ kiba.parquet filter=lfs diff=lfs merge=lfs -text
81
+ bindingdb-kd-filtered.parquet filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -11,35 +11,35 @@ configs:
11
  - split: train
12
  path: "affinity-data-combined.parquet"
13
  - split: davis
14
- path: "davis.csv"
15
  - split: davis_filtered
16
- path: "davis-filtered.csv"
17
  - split: kiba
18
- path: "kiba.csv"
19
  - split: pdbbind_2020_combined
20
- path: "pdbbind-2020-combined.csv"
21
  - split: pdbbind_2020_refined
22
- path: "pdbbind-2020-refined.csv"
23
  - split: bindingdb_ic50
24
- path: "bindingdb-ic50.csv"
25
  - split: bindingdb_ki
26
- path: "bindingdb-ki.csv"
27
  - split: bindingdb_kd_filtered
28
- path: "bindingdb-kd-filtered.csv"
29
  - split: bindingdb_kd
30
- path: "bindingdb-kd.csv"
31
  - split: glaser
32
- path: "glaser.csv"
33
  ---
34
 
35
  ## Data Sources
36
- bindingdb_ic50.csv: https://tdcommons.ai/ (tdc python package)
37
- bindingdb_kd.csv: https://tdcommons.ai/ (tdc python package)
38
- bindingdb_kd_filtered.csv: filtered manually (see standardize_data.ipynb)
39
- bindingdb_ki.csv: https://tdcommons.ai/ (tdc python package)
40
- davis.csv: https://tdcommons.ai/ (tdc python package)
41
- davis_filtered.csv: https://www.kaggle.com/datasets/christang0002/davis-and-kiba
42
- kiba.csv: https://tdcommons.ai/ (tdc python package)
43
- pdbbind_2020_combined.csv: https://www.pdbbind.org.cn/
44
- pdbbind_2020_refined.csv: https://www.pdbbind.org.cn/
45
- glaser.csv: https://huggingface.co/datasets/jglaser/binding_affinity
 
11
  - split: train
12
  path: "affinity-data-combined.parquet"
13
  - split: davis
14
+ path: "davis.parquet"
15
  - split: davis_filtered
16
+ path: "davis-filtered.parquet"
17
  - split: kiba
18
+ path: "kiba.parquet"
19
  - split: pdbbind_2020_combined
20
+ path: "pdbbind-2020-combined.parquet"
21
  - split: pdbbind_2020_refined
22
+ path: "pdbbind-2020-refined.parquet"
23
  - split: bindingdb_ic50
24
+ path: "bindingdb-ic50.parquet"
25
  - split: bindingdb_ki
26
+ path: "bindingdb-ki.parquet"
27
  - split: bindingdb_kd_filtered
28
+ path: "bindingdb-kd-filtered.parquet"
29
  - split: bindingdb_kd
30
+ path: "bindingdb-kd.parquet"
31
  - split: glaser
32
+ path: "glaser.parquet"
33
  ---
34
 
35
  ## Data Sources
36
+ bindingdb_ic50.parquet: https://tdcommons.ai/ (tdc python package)
37
+ bindingdb_kd.parquet: https://tdcommons.ai/ (tdc python package)
38
+ bindingdb_kd_filtered.parquet: filtered manually (see standardize_data.ipynb)
39
+ bindingdb_ki.parquet: https://tdcommons.ai/ (tdc python package)
40
+ davis.parquet: https://tdcommons.ai/ (tdc python package)
41
+ davis_filtered.parquet: https://www.kaggle.com/datasets/christang0002/davis-and-kiba
42
+ kiba.parquet: https://tdcommons.ai/ (tdc python package)
43
+ pdbbind_2020_combined.parquet: https://www.pdbbind.org.cn/
44
+ pdbbind_2020_refined.parquet: https://www.pdbbind.org.cn/
45
+ glaser.parquet: https://huggingface.co/datasets/jglaser/binding_affinity
bindingdb-ic50.csv → bindingdb-ic50.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25a6488d678b3a1a2b83d78ab900fe5c49b96ad6b116efde158b5c5bdd39b557
3
- size 899074544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1d2e2220fecf27a4127de3e038d4d248cdf10dcb4e051f4f5b49bb7ff6fbdf2
3
+ size 567925247
davis-filtered.csv → bindingdb-kd-filtered.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:484eaefff41f82de0f39811b589db7ed390061c952bc4199353d9e1c3700cfff
3
- size 8948685
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a9b82dbe2d1349d8f0383d01c0eef8f1ac4896fcc4eb23ffbf2510821c0eb43
3
+ size 1359656
pdbbind-2020-refined.csv → bindingdb-kd.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46ef8fb491e31ca6b9e6256742300b14d0cc545d0ebdf83cdd097155f98bbd99
3
- size 3025349
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:302cbd91af1ab1b414f79d683cda68786bb0fdb769343cf6761776878ceb5185
3
+ size 1438234
bindingdb-kd-filtered.csv → bindingdb-ki.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:541f7d1392c64a8b74f51bc08dbc5f8c619d832cb1a982f6f5d45837890adef7
3
- size 29878994
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c11ebb80d61ccd6d6dc631c5f5badd2c6dc584c9682d71cefed5638e8fc2b8f
3
+ size 77601662
combine_data.py CHANGED
@@ -32,5 +32,5 @@ def combine_csv_files(csv_file_list):
32
 
33
  return combined_df
34
 
35
- combined_df = combine_csv_files(['glaser.csv', 'davis-filtered.csv', 'pdbbind-2020-combined.csv', 'bindingdb-kd-filtered.csv', 'bindingdb-ki.csv', 'bindingdb-ic50.csv'])
36
  combined_df.to_parquet('affinity-data-combined.parquet', index=False)
 
32
 
33
  return combined_df
34
 
35
+ combined_df = combine_csv_files(['glaser.parquet', 'davis-filtered.parquet', 'pdbbind-2020-combined.parquet', 'bindingdb-kd-filtered.parquet', 'bindingdb-ki.parquet', 'bindingdb-ic50.parquet'])
36
  combined_df.to_parquet('affinity-data-combined.parquet', index=False)
bindingdb-kd.csv → davis-filtered.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e18aaba78a1e2c0e9584fbc831954312d65e02deca70ce01b7514b5004a4c216
3
- size 57281302
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e12377bc3c56a0cffd9eb48b58c221172e87e3d42f2ee28929b08417dcc96dc0
3
+ size 319846
davis.csv → davis.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34f698180a386c92ecc160da984d9d2c31bf1d153cb26a2d91d8eab4d7ec5fd7
3
- size 22441866
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5136c1949cfd332496e87db8029664b3d255f69a1cb69b96094a140d5c9e009e
3
+ size 345287
glaser.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:73550c83f7fc9315bbcfa363304231f00f88dbd503222d241a47c8551ef65cd8
3
- size 1433278852
 
 
 
 
bindingdb-ki.csv → glaser.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8b08f64f5954c6189561014f802c103a86bb78d6644460e3f6f31697376a1f6
3
- size 259031100
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f91591b1896928fb860e03ced086ad225b3f66ac1d13363c6c6b59d0d8bc351
3
+ size 1034508980
kiba.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:56ac9893cbdc98ddc137a7ef4f5a5f00756da6ec5792b7ce9b2ccd270e4244d2
3
- size 102460723
 
 
 
 
kiba.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39c9b84755218c501647b3e5a7a4885192d061f043d57532dd7d9aa081b7e35a
3
+ size 597576
pdbbind-2020-combined.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7221192655553cdfd07df4a71386aad31873bb8e0f27ec7fe26536a56a9df868
3
- size 11802666
 
 
 
 
pdbbind-2020-combined.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82ea985d30e9e7102effdf8053275fec7a7473a32437857d3cf748642ee01814
3
+ size 7215274
pdbbind-2020-refined.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d4fc1c1eae3a0c9b9a5db6dc6469dfce35672173b45605b19fa47c4a1a44b79
3
+ size 1432703
standardize_data.ipynb CHANGED
@@ -102,7 +102,7 @@
102
  " affinity_unit=\"nM\",\n",
103
  " delimiter=\",\"\n",
104
  ")\n",
105
- "dataset.to_csv(\"data/bindingdb-ic50.csv\", index=False)\n"
106
  ]
107
  },
108
  {
@@ -323,6 +323,48 @@
323
  "bindingdb_df.to_csv('bindingdb-kd-filtered.csv', index=False)\n"
324
  ]
325
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  {
327
  "cell_type": "code",
328
  "execution_count": null,
 
102
  " affinity_unit=\"nM\",\n",
103
  " delimiter=\",\"\n",
104
  ")\n",
105
+ "dataset.to_parquet(\"data/bindingdb-ic50.parquet\", index=False)\n"
106
  ]
107
  },
108
  {
 
323
  "bindingdb_df.to_csv('bindingdb-kd-filtered.csv', index=False)\n"
324
  ]
325
  },
326
+ {
327
+ "cell_type": "code",
328
+ "execution_count": 96,
329
+ "metadata": {},
330
+ "outputs": [
331
+ {
332
+ "name": "stdout",
333
+ "output_type": "stream",
334
+ "text": [
335
+ "Converted bindingdb-ki.csv to bindingdb-ki.parquet\n",
336
+ "Converted pdbbind-2020-refined.csv to pdbbind-2020-refined.parquet\n",
337
+ "Converted davis-filtered.csv to davis-filtered.parquet\n",
338
+ "Converted bindingdb-kd-filtered.csv to bindingdb-kd-filtered.parquet\n",
339
+ "Converted glaser.csv to glaser.parquet\n",
340
+ "Converted bindingdb-ic50.csv to bindingdb-ic50.parquet\n",
341
+ "Converted pdbbind-2020-combined.csv to pdbbind-2020-combined.parquet\n",
342
+ "Converted bindingdb-kd.csv to bindingdb-kd.parquet\n",
343
+ "Converted davis.csv to davis.parquet\n",
344
+ "Converted kiba.csv to kiba.parquet\n"
345
+ ]
346
+ }
347
+ ],
348
+ "source": [
349
+ "import pandas as pd\n",
350
+ "import glob\n",
351
+ "\n",
352
+ "# Get all CSV files in current directory\n",
353
+ "csv_files = glob.glob('*.csv')\n",
354
+ "\n",
355
+ "# Convert each CSV to parquet\n",
356
+ "for csv_file in csv_files:\n",
357
+ " # Read CSV\n",
358
+ " df = pd.read_csv(csv_file)\n",
359
+ " \n",
360
+ " # Create parquet filename by replacing .csv extension\n",
361
+ " parquet_file = csv_file.replace('.csv', '.parquet')\n",
362
+ " \n",
363
+ " # Save as parquet\n",
364
+ " df.to_parquet(parquet_file, index=False)\n",
365
+ " print(f\"Converted {csv_file} to {parquet_file}\")"
366
+ ]
367
+ },
368
  {
369
  "cell_type": "code",
370
  "execution_count": null,