Boldini2024 / Boldini2024 Preprocessing.py
haneulpark's picture
Update Boldini2024 Preprocessing.py
eb2d4ab verified
## Script to sanitize and split Boldini2024 dataset
#1. Import modules
pip install rdkit
pip install molvs
import pandas as pd
import numpy as np
import urllib.request
import tqdm
import rdkit
from rdkit import Chem
import molvs
standardizer = molvs.Standardizer()
fragment_remover = molvs.fragment.FragmentRemover()
#2. Download the original datasets
# Download the original datasets from the paper
#. Machine Learning Assisted Hit Prioritization for High Throughput Screening in Drug Discovery
#. Davide Boldini, Lukas Friedrich, Daniel Kuhn, and Stephan A. Sieber*
#. https://github.com/dahvida/AIC_Finder/tree/main/Datasets
#3. Import one of the 17 datasets
#. Here we chose GPCR.csv for example
df = pd.read_csv("GPCR.csv")
#4. Sanitize with MolVS and print problems
df['X'] = [ \
rdkit.Chem.MolToSmiles(
fragment_remover.remove(
standardizer.standardize(
rdkit.Chem.MolFromSmiles(
smiles))))
for smiles in df['smiles']]
problems = []
for index, row in tqdm.tqdm(df.iterrows()):
result = molvs.validate_smiles(row['X'])
if len(result) == 0:
continue
problems.append( (row['ID'], result) )
# Most are because it includes the salt form and/or it is not neutralized
for id, alert in problems:
print(f"ID: {id}, problem: {alert[0]}")
# Result interpretation
# - Can't kekulize mol: The error message means that kekulization would break the molecules down, so it couldn't proceed
# It doesn't mean that the molecules are bad, it just means that normalization failed
#5. Select columns and rename the dataset
df.rename(columns={'X': 'SMILES'}, inplace=True)
df[['SMILES', 'Primary', 'Score', 'Confirmatory']].to_csv('GPCR_sanitized.csv', index=False)