etrop commited on
Commit
bde1607
1 Parent(s): ec300e6

remove urls.txt and update dataloading script

Browse files
Files changed (2) hide show
  1. multi_species_genomes.py +10 -9
  2. urls.txt +0 -0
multi_species_genomes.py CHANGED
@@ -19,6 +19,7 @@ from typing import List
19
  import datasets
20
  import pandas as pd
21
  from Bio import SeqIO
 
22
 
23
 
24
  # Find for instance the citation on arxiv or on the dataset repo/website
@@ -128,17 +129,17 @@ class MultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
128
 
129
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
130
 
131
- urls_filepath = dl_manager.download_and_extract('urls.txt')
132
- with open(urls_filepath) as urls_file:
133
- urls = [line.rstrip() for line in urls_file]
134
 
135
- test_urls = urls[-50:] # 50 genomes for test set
136
- validation_urls = urls[-100:-50] # 50 genomes for validation set
137
- train_urls = urls[:-100] # 800 genomes for training
138
 
139
- train_downloaded_files = dl_manager.download_and_extract(train_urls)
140
- test_downloaded_files = dl_manager.download_and_extract(test_urls)
141
- validation_downloaded_files = dl_manager.download_and_extract(validation_urls)
142
 
143
  return [
144
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_downloaded_files, "chunk_length": self.config.chunk_length}),
 
19
  import datasets
20
  import pandas as pd
21
  from Bio import SeqIO
22
+ import os
23
 
24
 
25
  # Find for instance the citation on arxiv or on the dataset repo/website
 
129
 
130
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
131
 
132
+ genome_file_paths = dl_manager.download_and_extract('genomes.txt')
133
+ with open(genome_file_paths) as genome_files:
134
+ genomes = [os.path.join('genomes',line.rstrip()) for line in genome_files]
135
 
136
+ test_genomes = genomes[-50:] # 50 genomes for test set
137
+ validation_genomes = genomes[-100:-50] # 50 genomes for validation set
138
+ train_genomes = genomes[:-100] # 800 genomes for training
139
 
140
+ train_downloaded_files = dl_manager.download_and_extract(train_genomes)
141
+ test_downloaded_files = dl_manager.download_and_extract(test_genomes)
142
+ validation_downloaded_files = dl_manager.download_and_extract(validation_genomes)
143
 
144
  return [
145
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_downloaded_files, "chunk_length": self.config.chunk_length}),
urls.txt DELETED
The diff for this file is too large to render. See raw diff