trrt8 commited on
Commit
e360c4f
1 Parent(s): a3665d3

updated combined dataset

Browse files
Files changed (2) hide show
  1. affinity-data-combined.parquet +2 -2
  2. combine_data.py +13 -2
affinity-data-combined.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23fc7754161e313ca4f702a5730fd1265dae9f877f2a2047a02039a3d385b036
3
- size 1786935004
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fe95d2ceff77b1785c3280f6f8a8dd3ca0c442035049061d1a69ae0c36e0330
3
+ size 909234669
combine_data.py CHANGED
@@ -1,12 +1,23 @@
1
  import pandas as pd
 
2
 
3
  def combine_csv_files(csv_file_list):
4
  # Read and combine all CSV files into a single dataframe
5
  combined_df = pd.concat([pd.read_csv(file) for file in csv_file_list], ignore_index=True)
6
 
7
  # Remove duplicate rows
8
- combined_df = combined_df.drop_duplicates()
9
-
 
 
 
 
 
 
 
 
 
 
10
  # Sort by neg_log10_affinity_M
11
  combined_df = combined_df.sort_values('neg_log10_affinity_M', ascending=False)
12
 
 
1
  import pandas as pd
2
+ import numpy as np
3
 
4
  def combine_csv_files(csv_file_list):
5
  # Read and combine all CSV files into a single dataframe
6
  combined_df = pd.concat([pd.read_csv(file) for file in csv_file_list], ignore_index=True)
7
 
8
  # Remove duplicate rows
9
+ combined_df = combined_df.drop_duplicates(subset=['seq', 'smiles_can', 'neg_log10_affinity_M'])
10
+
11
+ # Aggregate duplicates more efficiently using agg function
12
+ combined_df = combined_df.groupby(['seq', 'smiles_can']).agg(
13
+ neg_log10_affinity_M=('neg_log10_affinity_M', 'mean')
14
+ ).reset_index()
15
+
16
+ np.random.seed(42)
17
+ mask_value_5 = combined_df['neg_log10_affinity_M'] == 5
18
+ rows_to_keep = ~mask_value_5 | (mask_value_5 & (np.random.rand(len(combined_df)) < 0.3))
19
+ combined_df = combined_df[rows_to_keep].reset_index(drop=True)
20
+
21
  # Sort by neg_log10_affinity_M
22
  combined_df = combined_df.sort_values('neg_log10_affinity_M', ascending=False)
23