Datasets:
File size: 5,906 Bytes
2888a9c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
from datasets import DatasetBuilder, SplitGenerator, Split, Features, Value, ClassLabel, BuilderConfig, Version, DatasetInfo, DownloadManager, ArrowBasedBuilder
import glob
import json
import multiprocessing as mp
import os
import pyarrow as pa
import pyarrow.parquet as pq
import pandas as pd
import pyarrow as pa
import pyarrow.json
# jsonl
pattern="*.bz2"
paths=glob.glob(pattern)
# exclude txt files
paths=[file for file in paths if not ".txt." in file]
n_files=len(paths)
# labels are file names without the extension .jsonl.bz2
labels=[file.replace(".jsonl.bz2","") for file in paths]
## handle parquet conversion
# create parquet directory
dl_manager = DownloadManager()
parquet_dir="parquet"
def convert_jsonl_to_parquet(file_list, parquet_dir, chunk_size=100000):
"""Converts JSONL files to Parquet with memory efficiency.
Args:
file_list (list): List of JSONL file paths.
parquet_dir (str): Path to store output Parquet files.
chunk_size (int): Number of records to write to each Parquet file.
"""
os.makedirs(parquet_dir, exist_ok=True) # Create output directory
parquet_file_index = 0
current_records = []
file_index = 0
for file in file_list:
# try:
reader = pa.json.read_json(file) # PyArrow JSON reader
for batch in reader:
pandas_df = batch.to_pandas()
print(pandas_df.shape)
current_records.extend(pandas_df.to_dict('list'))
if len(current_records) >= chunk_size:
table = pa.Table.from_pandas(pd.DataFrame(current_records))
parquet_filename = f"output_{parquet_file_index}.parquet"
parquet_path = os.path.join(parquet_dir, parquet_filename)
pq.write_table(table, parquet_path)
current_records = []
parquet_file_index += 1
# except Exception as e:
# print(f"Error in file {file} with error {e}")
file_index += 1
print(f"Finished processing file {file_index} of {len(file_list)}")
print(f"Writing last chunk to parquet file {parquet_file_index}")
# Write any remaining data in the last chunk
if current_records:
table = pa.Table.from_pandas(pd.DataFrame(current_records))
parquet_filename = f"output_{parquet_file_index}.parquet"
parquet_path = os.path.join(parquet_dir, parquet_filename)
pq.write_table(table, parquet_path)
print(f"Conversion complete, wrote {parquet_file_index + 1} Parquet files.")
class UsenetConfig(BuilderConfig):
def __init__(self, version, **kwargs):
().__init__(version, **kwargs)
class UsenetArchiveIt(ArrowBasedBuilder):
VERSION = "1.0.0" # Example version
BUILDER_CONFIG_CLASS = UsenetConfig
BUILDER_CONFIGS = [
UsenetConfig(
name="usenet_archive_it",
version=Version("1.0.0"),
description="Usenet Archive-It dataset",
),
]
def _info(self):
# Specify dataset features here
return DatasetInfo(
features=Features({
"title": Value("string"),
"author": Value("string"),
"id": Value("int32"),
"timestamp": Value("string"),
"progressive_number": Value("int32"),
"original_url": Value("string"),
"newsgroup": Value("string"), # this could be a label but difficult to get all possible labels
"text": Value("string"),
}),)
def _split_generators(self, dl_manager):
n = mp.cpu_count()//10 # Number of paths to process at a time
print(f"Extracting {n} files at a time")
if not os.path.isdir('parquet'):
extracted_files = []
for i in range(0, len(paths), n):
files = paths[i:i+n]
extracted_files.extend(dl_manager.extract(files, num_proc=len(files)))
print(f"Extracted {files}")
else:
extracted_files = glob.glob(parquet_dir + "/*.parquet")
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"filepath": extracted_files},
),
]
def _generate_tables(self, filepath):
# print("Filepath: ", filepath)
# if parquet files are not present, convert jsonl to parquet
if not os.path.exists(parquet_dir):
print("Generating parquet files from jsonl files...")
convert_jsonl_to_parquet(filepath, parquet_dir)
# read parquet files
parquet_files=glob.glob(parquet_dir+"/*.parquet")
for index, file in enumerate(parquet_files):
table = pq.read_table(file)
yield index, table
# for file in parquet_files:
# table = pq.read_table(file)
# df = table.to_pandas()
# for index, row in df.iterrows():
# yield index, row.to_dict()
# Yields (key, example) tuples from the dataset
# id=0
# for file in filepath:
# # Open and yield examples from the compressed JSON files
# with open(file, "r") as f:
# for i, line in enumerate(f):
# try:
# data = json.loads(line)
# yield id, data
# id+=1
# except Exception as e:
# print(f"Error in file {file} at line {i} with error {e}")
# Finally, set the name of the dataset to match the script name
datasets = UsenetArchiveIt
|