maom commited on
Commit
44c820a
1 Parent(s): 3eb0fcf

add initial curation code

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *~
src/00_setup_curation.sh ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+
5
+ # from a base directory
6
+
7
+
8
+ mkdir data
9
+ mkdir intermediate
10
+
11
+ git clone https://<user_name>:<security_token>huggingface.co/RosettaCommons/MegaScale
12
+
13
+ # needed to get splits
14
+ git clone https://github.com/Kuhlman-Lab/ThermoMPNN.git
15
+
16
+
17
+
18
+
19
+ # Run each numbered script in MegaScale/src/ in order (starting with this one)
20
+ #
21
+ # Tips:
22
+ # 1) Make sure to set the working directory to the base directory (outside of the HF repo)
23
+ # 2) While most of the scripts should work, I recommend running them interactively
24
+ # 3) Some stages require more memory than others, all can be done with < 400GB of memory
25
+ # but perhaps more more could reduce memory requirements
26
+ # 4)
27
+
28
+
29
+
src/01_gather_data.R ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # Download data from: https://zenodo.org/records/7992926
4
+ # Mega-scale experimental analysis of protein folding stability in biology and design
5
+
6
+ # 1.2 GB
7
+ system("cd data && curl -o data.zip https://zenodo.org/api/records/7992926/files-archive")
8
+
9
+ md5sum_expected <- "b76b523d10e12d34b481916b1f57f31c data/data.zip"
10
+ md5sum <- system(
11
+ "md5sum data/data.zip",
12
+ intern = TRUE)
13
+ if (md5sum != md5sum_expected) {
14
+ cat("Expected and obtained md5sum values don't match\n")
15
+ }
16
+
17
+ system("cd data && unzip data.zip && rm data.zip")
18
+
19
+
src/02.1_assemble_K50_dG_dataset.R ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ system("cd data; unzip Processed_K50_dG_datasets.zip")
2
+
3
+
4
+ ### Dataset1 ###
5
+ dataset1 <- readr::read_csv(
6
+ file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset1_20230416.csv",
7
+ col_types = readr::cols(
8
+ name = readr::col_character(),
9
+ dna_seq = readr::col_character(),
10
+ log10_K50_t = readr::col_double(),
11
+ log10_K50_t_95CI_high = readr::col_double(),
12
+ log10_K50_t_95CI_low = readr::col_double(),
13
+ log10_K50_t_95CI = readr::col_double(),
14
+ fitting_error_t = readr::col_double(),
15
+ log10_K50unfolded_t = readr::col_double(),
16
+ deltaG_t = readr::col_double(),
17
+ deltaG_t_95CI_high = readr::col_double(),
18
+ deltaG_t_95CI_low = readr::col_double(),
19
+ deltaG_t_95CI = readr::col_double(),
20
+ log10_K50_c = readr::col_double(),
21
+ log10_K50_c_95CI_high = readr::col_double(),
22
+ log10_K50_c_95CI_low = readr::col_double(),
23
+ log10_K50_c_95CI = readr::col_double(),
24
+ fitting_error_c = readr::col_double(),
25
+ log10_K50unfolded_c = readr::col_double(),
26
+ deltaG_c = readr::col_double(),
27
+ deltaG_c_95CI_high = readr::col_double(),
28
+ deltaG_c_95CI_low = readr::col_double(),
29
+ deltaG_c_95CI = readr::col_double(),
30
+ deltaG = readr::col_double(),
31
+ deltaG_95CI_high = readr::col_double(),
32
+ deltaG_95CI_low = readr::col_double(),
33
+ deltaG_95CI = readr::col_double(),
34
+ log10_K50_trypsin_ML = readr::col_double(),
35
+ log10_K50_chymotrypsin_ML = readr::col_double()))
36
+
37
+ # note that some of the log10_K50_trypsin_ML and log10_K50_chmotrypsin_ML values are "-" and ">2.5".
38
+ # These are parsed as NA values"
39
+
40
+ dataset1 |>
41
+ arrow::write_parquet(
42
+ "intermediate/dataset1.parquet")
43
+
44
+
45
+
46
+ dataset23 <- readr::read_csv(
47
+ file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset2_Dataset3_20230416.csv"
48
+
49
+
50
+ ####
51
+
52
+ system("cd data && unzip AlphaFold_model_PDBs.zip")
53
+
54
+
55
+ assemble_models <- function(
56
+ data_path,
57
+ dataset_tag,
58
+ pattern,
59
+ output_path) {
60
+
61
+ cat(
62
+ "data path: ", data_path, "\n",
63
+ "dataset_tag: ", dataset_tag, "\n",
64
+ "pattern: ", pattern, "\n",
65
+ "output path: ", output_path, "\n",
66
+ sep = "")
67
+
68
+ file_index <- 1
69
+ models <- list.files(
70
+ path = data_path,
71
+ full.names = TRUE,
72
+ pattern = pattern,
73
+ recursive = TRUE) |>
74
+ purrr::map_dfr(.f = function(path) {
75
+ file_handle <- path |>
76
+ file(open = "rb") |>
77
+ gzcon()
78
+
79
+ if( file_index %% 10 == 0) {
80
+ cat("Reading '", path, "' ", file_index, "\n", sep = "")
81
+ }
82
+ file_index <<- file_index + 1
83
+
84
+ lines <- file_handle |> readLines()
85
+ file_handle |> close()
86
+
87
+ data.frame(
88
+ dataset_tag = dataset_tag,
89
+ id = path |> basename() |> stringr::str_replace(".pdb", ""),
90
+ pdb = lines |> paste0(collapse = "\n"))
91
+ })
92
+ models |> arrow::write_parquet(output_path)
93
+ }
94
+
95
+
96
+ assemble_models(
97
+ data_path = "data/AlphaFold_model_PDBs",
98
+ dataset_tag = "all",
99
+ pattern = "*.pdb",
100
+ output_path = "intermediate/all_pdbs.parquet")
101
+
102
+ #
103
+ # assemble_models(
104
+ # data_path = "data/AlphaFold_model_PDBs",
105
+ # dataset_tag = "EA",
106
+ # pattern = "EA[:]run.*pdb",
107
+ # output_path = "intermediate/EA_pdbs.parquet")
108
+ #
109
+ #
110
+ # assemble_models(
111
+ # data_path = "data/AlphaFold_model_PDBs",
112
+ # dataset_tag = "EEHEE",
113
+ # pattern = "EEHEE.*pdb",
114
+ # output_path = "intermediate/EEHEE_pdbs.parquet")
115
+ #
116
+ #
117
+ # assemble_models(
118
+ # data_path = "data/AlphaFold_model_PDBs",
119
+ # dataset_tag = "EHEE",
120
+ # pattern = "EHEE.*pdb",
121
+ # output_path = "intermediate/EHEE_pdbs.parquet")
122
+ #
123
+ #
124
+ # assemble_models(
125
+ # data_path = "data/AlphaFold_model_PDBs",
126
+ # dataset_tag = "GG",
127
+ # pattern = "GG[:]run.*pdb",
128
+ # output_path = "intermediate/GG_pdbs.parquet")
129
+ #
130
+ #
131
+ # assemble_models(
132
+ # data_path = "data/AlphaFold_model_PDBs",
133
+ # dataset_tag = "HEEH_KT",
134
+ # pattern = "HEEH_KT_rd.*pdb",
135
+ # output_path = "intermediate/HEEH_KT_pdbs.parquet")
136
+ #
137
+ # assemble_models(
138
+ # data_path = "data/AlphaFold_model_PDBs",
139
+ # dataset_tag = "HEEH",
140
+ # pattern = "HEEH_rd.*pdb",
141
+ # output_path = "intermediate/HEEH_pdbs.parquet")
142
+
src/02.8_gather_ThermoMPNN_splits.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import pandas
4
+ import pyarrow.parquet
5
+ import pickle
6
+
7
+ with open("ThermoMPNN/dataset_splits/mega_splits.pkl", "rb") as f:
8
+ mega_splits = pickle.load(f)
9
+
10
+ splits = []
11
+ for split_name, split_ids in mega_splits.items():
12
+ splits.append(
13
+ pandas.DataFrame({
14
+ 'split_name': split_name,
15
+ 'id': split_ids}))
16
+
17
+ splits = pandas.concat(splits)
18
+ pyarrow.parquet.write_table(
19
+ pyarrow.Table.from_pandas(splits),
20
+ where = "intermediate/ThermoMPNN_splits.parquet")