File size: 1,675 Bytes
4d1a63f
e360c4f
4d1a63f
 
 
 
 
 
e360c4f
 
 
 
 
 
 
 
 
 
 
 
4d1a63f
 
a3665d3
 
 
 
 
 
 
 
 
4d1a63f
 
 
5ee44a9
4d1a63f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import pandas as pd
import numpy as np

def combine_csv_files(csv_file_list):
    # Read and combine all CSV files into a single dataframe
    combined_df = pd.concat([pd.read_csv(file) for file in csv_file_list], ignore_index=True)
    
    # Remove duplicate rows
    combined_df = combined_df.drop_duplicates(subset=['seq', 'smiles_can', 'neg_log10_affinity_M'])

    # Aggregate duplicates more efficiently using agg function
    combined_df = combined_df.groupby(['seq', 'smiles_can']).agg(
        neg_log10_affinity_M=('neg_log10_affinity_M', 'mean')
    ).reset_index()

    np.random.seed(42)
    mask_value_5 = combined_df['neg_log10_affinity_M'] == 5
    rows_to_keep = ~mask_value_5 | (mask_value_5 & (np.random.rand(len(combined_df)) < 0.3))
    combined_df = combined_df[rows_to_keep].reset_index(drop=True)

    # Sort by neg_log10_affinity_M
    combined_df = combined_df.sort_values('neg_log10_affinity_M', ascending=False)

    # Calculate mean and standard deviation of affinities
    affinity_mean = combined_df['neg_log10_affinity_M'].mean()
    affinity_std = combined_df['neg_log10_affinity_M'].std()
    
    # Add columns for mean, std and normalized values
    combined_df['affinity_mean'] = affinity_mean
    combined_df['affinity_std'] = affinity_std
    combined_df['affinity_norm'] = (combined_df['neg_log10_affinity_M'] - affinity_mean) / affinity_std
    
    return combined_df

combined_df = combine_csv_files(['glaser.parquet', 'davis-filtered.parquet', 'pdbbind-2020-combined.parquet', 'bindingdb-kd-filtered.parquet', 'bindingdb-ki.parquet', 'bindingdb-ic50.parquet'])
combined_df.to_parquet('affinity-data-combined.parquet', index=False)