File size: 3,844 Bytes
548dcee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from datasets import load_dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data_files = {\n",
" \"train\": \"./train.txt\",\n",
" \"val\": \"./val.txt\",\n",
" \"test\": \"./test.txt\",\n",
"}\n",
"ds = load_dataset(\"text\", data_files=data_files)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds['train'] = ds['train'].rename_column('text', 'SMILE')\n",
"ds['val'] = ds['val'].rename_column('text', 'SMILE')\n",
"ds['test'] = ds['test'].rename_column('text', 'SMILE')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import selfies as sf\n",
"\n",
"def try_convert(row):\n",
" selfie = None\n",
" try:\n",
" selfie = sf.encoder(row['SMILE'])\n",
" except:\n",
" pass\n",
"\n",
" return {'SELFIE': selfie}\n",
"\n",
"# Alongside the SMILES, we also need to convert them to SELFIES\n",
"# ds['train'] = ds['train'].add_column('SELFIE', ds['train'].map(try_convert, num_proc=8))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds['train'] = ds['train'].map(try_convert, num_proc=8)\n",
"ds['val'] = ds['val'].map(try_convert, num_proc=8)\n",
"ds['test'] = ds['test'].map(try_convert, num_proc=8)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Drop the rows where the conversion failed\n",
"ds['train'] = ds['train'].filter(lambda row: row['SELFIE'] is not None)\n",
"ds['val'] = ds['val'].filter(lambda row: row['SELFIE'] is not None)\n",
"ds['test'] = ds['test'].filter(lambda row: row['SELFIE'] is not None)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tokenizers import Tokenizer\n",
"\n",
"tokenizer = Tokenizer.from_pretrained(\"haydn-jones/GuacamolSELFIETokenizer\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"unk_id = tokenizer.token_to_id('<UNK>')\n",
"\n",
"# Drop any rows where the tokenization has an <UNK> token\n",
"ds['train'] = ds['train'].filter(lambda row: unk_id not in tokenizer.encode(row['SELFIE']).ids, num_proc=8)\n",
"ds['val'] = ds['val'].filter(lambda row: unk_id not in tokenizer.encode(row['SELFIE']).ids, num_proc=8)\n",
"ds['test'] = ds['test'].filter(lambda row: unk_id not in tokenizer.encode(row['SELFIE']).ids, num_proc=8)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds.save_to_disk('./guacamol')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"repo_id = \"haydn-jones/Guacamol\"\n",
"\n",
"# Push the dataset to the repo\n",
"ds.push_to_hub(repo_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "ddpm",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|