yizhilll's picture
Add files using upload-large-folder tool
6dd0ae1 verified
raw
history blame
4.76 kB
from datatrove.executor import LocalPipelineExecutor
from datatrove.pipeline.readers import ParquetReader
from datatrove.utils.hashing import HashConfig
from custom_minhash import (
CustomMinhashConfig,
CustomMinhashDedupSignature,
CustomMinhashDedupBuckets,
CustomMinhashDedupCluster,
CustomMinhashDedupFilter,
)
from datatrove.pipeline.readers import JsonlReader
from datatrove.pipeline.tokens import TokensCounter
from datatrove.pipeline.writers.jsonl import JsonlWriter
from datatrove.utils.hashing import HashConfig
import argparse
from glob import glob
custom_minhash_config = CustomMinhashConfig(
hash_config=HashConfig(precision=32, hash_fc='sha1'),
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Demo script with argparse")
parser.add_argument("--sub-folder", type=str, required=True, help="Subfolder path")
parser.add_argument("--offset", type=int, default=0, required=False, help="task offset")
# parser.add_argument("--local-task", type=int, default=2, required=False, help="local task number")
args = parser.parse_args()
print(args)
MINHASH_BASE_PATH = "minhash"
LOGS_FOLDER = "minhash_logs"
n_file = len(glob(f"/gpfs/public/research/liyizhi/huggingface/datasets/fineweb-edu-score-2/data/{args.sub_folder}/*.parquet"))
TOTAL_TASKS = n_file
print(f"Total files in {args.sub_folder}: {n_file}")
INPUT_READER = ParquetReader(
"/gpfs/public/research/liyizhi/huggingface/datasets/fineweb-edu-score-2",
glob_pattern=f"data/{args.sub_folder}/*.parquet",
batch_size=100_000)
# INPUT_READER = ParquetReader("data/CC-MAIN-2013-20/", limit=3)
# INPUT_READER = JsonlReader('jsonl')
# stage 1 computes minhash signatures for each task (each task gets a set of files)
stage1 = LocalPipelineExecutor(
# job_name="mh1",
pipeline=[
INPUT_READER,
CustomMinhashDedupSignature(
output_folder=f"{MINHASH_BASE_PATH}/signatures",
config=custom_minhash_config,
naming_prefix=args.sub_folder,
),
],
tasks=TOTAL_TASKS,
# local_tasks=LOCAL_TASKS,
# local_rank_offset=TASK_OFFSET,
logging_dir=f"{LOGS_FOLDER}/signatures",
# slurm_logs_folder=f"{LOCAL_LOGS_FOLDER}/signatures/slurm_logs",
)
# stage 2 finds matches between signatures in each bucket
stage2 = LocalPipelineExecutor(
# job_name="mh2",
pipeline=[
CustomMinhashDedupBuckets(
input_folder=f"{MINHASH_BASE_PATH}/signatures",
output_folder=f"{MINHASH_BASE_PATH}/buckets",
config=custom_minhash_config,
),
],
tasks=custom_minhash_config.num_buckets,
local_tasks=1,
local_rank_offset=args.offset,
# time="90:00:00",
# partition="hopper-prod",
logging_dir=f"{LOGS_FOLDER}/buckets",
# depends=stage1,
)
# stage 3 creates clusters of duplicates using the results from all buckets
stage3 = LocalPipelineExecutor(
# job_name="mh3",
pipeline=[
CustomMinhashDedupCluster(
input_folder=f"{MINHASH_BASE_PATH}/buckets",
output_folder=f"{MINHASH_BASE_PATH}/remove_ids",
config=custom_minhash_config,
),
],
# tasks=1,
# time="90:00:00",
# partition="hopper-prod",
logging_dir=f"{LOGS_FOLDER}/clusters",
# mem_per_cpu_gb=70,
# cpus_per_task=2,
# depends=stage2,
# slurm_logs_folder=f"{LOCAL_LOGS_FOLDER}/clusters/slurm_logs",
)
# stage 4 reads the original input data and removes all but 1 sample per duplicate cluster
# the data must match exactly stage 1, so number of tasks and the input source must be the same
stage4 = LocalPipelineExecutor(
# job_name="mh4",
pipeline=[
INPUT_READER,
TokensCounter(), # nice way to see how many tokens we had before and after deduplication
CustomMinhashDedupFilter(
remove_id_input_folder=f"{MINHASH_BASE_PATH}/remove_ids",
sig_input_folder=f"{MINHASH_BASE_PATH}/signatures",
exclusion_writer=JsonlWriter(f"{MINHASH_BASE_PATH}/removed"),
config=custom_minhash_config,
naming_prefix=args.sub_folder,
),
],
tasks=TOTAL_TASKS,
# time="50:00:00",
# partition="hopper-cpu",
logging_dir=f"{LOGS_FOLDER}/filter",
depends=stage3,
# slurm_logs_folder=f"{LOCAL_LOGS_FOLDER}/filter/slurm_logs",
)
stage3.run()