suke-sho commited on
Commit
ccd574e
1 Parent(s): d575960

Rename model-plant-reference-genome-corpus.py to arabidopsis_genome_corpus.py

Browse files
model-plant-reference-genome-corpus.py → arabidopsis_genome_corpus.py RENAMED
@@ -5,17 +5,13 @@ import datasets
5
  from Bio import SeqIO
6
  import os
7
 
8
-
9
- # Find for instance the citation on arxiv or on the dataset repo/website
10
  _CITATION = ""
11
 
12
- # You can copy an official description
13
  _DESCRIPTION = """\
14
  Dataset made of model plants genomes available on NCBI.
15
  Default configuration "6kbp" yields chunks of 6.2kbp (100bp overlap on each side). The chunks of DNA are cleaned and processed so that
16
  they can only contain the letters A, T, C, G and N.
17
  """
18
-
19
  _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/"
20
 
21
  _LICENSE = "https://www.ncbi.nlm.nih.gov/home/about/policies/"
@@ -43,6 +39,7 @@ def clean_sequence(seq: str) -> str:
43
  seq = ''.join(list(seq))
44
  return seq
45
 
 
46
  class PlantSingleSpeciesGenomesConfig(datasets.BuilderConfig):
47
  """BuilderConfig for the Plant Single Species Pre-training Dataset."""
48
 
@@ -83,26 +80,17 @@ class PlantSingleSpeciesGenomes(datasets.GeneratorBasedBuilder):
83
  }
84
  )
85
  return datasets.DatasetInfo(
86
- # This is the description that will appear on the datasets page.
87
  description=_DESCRIPTION,
88
- # This defines the different columns of the dataset and their types
89
  features=features,
90
- # Homepage of the dataset for documentation
91
  homepage=_HOMEPAGE,
92
- # License for the dataset if available
93
  license=_LICENSE,
94
- # Citation for the dataset
95
  citation=_CITATION,
96
  )
97
 
98
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
99
-
100
- filepath = dl_manager.download_and_extract('plant_genome_file_name.txt')
101
- with open(filepath) as f:
102
- filepath = os.path.join("plant_genomes", f.read().strip())
103
-
104
  downloaded_file = dl_manager.download_and_extract(filepath)
105
-
106
  return [
107
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file": downloaded_file, "chunk_length": self.config.chunk_length})
108
  ]
@@ -114,8 +102,6 @@ class PlantSingleSpeciesGenomes(datasets.GeneratorBasedBuilder):
114
  fasta_sequences = SeqIO.parse(f, 'fasta')
115
 
116
  for record in fasta_sequences:
117
-
118
- # parse descriptions in the fasta file
119
  sequence, description = str(record.seq), record.description
120
 
121
  # clean chromosome sequence
 
5
  from Bio import SeqIO
6
  import os
7
 
 
 
8
  _CITATION = ""
9
 
 
10
  _DESCRIPTION = """\
11
  Dataset made of model plants genomes available on NCBI.
12
  Default configuration "6kbp" yields chunks of 6.2kbp (100bp overlap on each side). The chunks of DNA are cleaned and processed so that
13
  they can only contain the letters A, T, C, G and N.
14
  """
 
15
  _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/"
16
 
17
  _LICENSE = "https://www.ncbi.nlm.nih.gov/home/about/policies/"
 
39
  seq = ''.join(list(seq))
40
  return seq
41
 
42
+
43
  class PlantSingleSpeciesGenomesConfig(datasets.BuilderConfig):
44
  """BuilderConfig for the Plant Single Species Pre-training Dataset."""
45
 
 
80
  }
81
  )
82
  return datasets.DatasetInfo(
 
83
  description=_DESCRIPTION,
 
84
  features=features,
 
85
  homepage=_HOMEPAGE,
 
86
  license=_LICENSE,
 
87
  citation=_CITATION,
88
  )
89
 
90
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
91
+ # A.thaliana reference genome
92
+ filepath = "GCF_000001735.4_TAIR10.1_genomic.fna.gz"
 
 
 
93
  downloaded_file = dl_manager.download_and_extract(filepath)
 
94
  return [
95
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file": downloaded_file, "chunk_length": self.config.chunk_length})
96
  ]
 
102
  fasta_sequences = SeqIO.parse(f, 'fasta')
103
 
104
  for record in fasta_sequences:
 
 
105
  sequence, description = str(record.seq), record.description
106
 
107
  # clean chromosome sequence