File size: 6,161 Bytes
510e61a cf26707 510e61a cf26707 510e61a cf26707 510e61a cf26707 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
data_mix:
(): dataset_construction.DataMix
name: "french_30b"
shuffle: false
compute_dataset_stats: true
local_save_dir: data/
load_from_local_save_dir: true
keep_separated_datasets_in_dataset_dict: true
deduplicate_test_set: false
ngram_path_for_extra_deduplication: null
max_shard_size: "2GB"
datasets:
# Translation datasets
# - (): dataset_construction.DatasetConfig
# dataset_path: manu/opus100-en-fr
# test_split: "test"
# build_test_set_from_train: false
# num_train_examples: 1000
# num_test_examples: 1000
# - (): dataset_construction.DatasetConfig
# dataset_path: manu/europarl-en-fr
# build_test_set_from_train: true
# num_test_examples: 1000
- (): dataset_construction.DatasetConfig
dataset_path: manu/wmt-en-fr
test_split: "test"
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/English_French_Webpages_Scraped_Translated # Slow cause bad format
train_split: "train[:5%]"
build_test_set_from_train: true
filtering_function:
(): dataset_collection.french.web_scraped_translated.WebPageFilter
preprocessing_function:
(): dataset_collection.french.web_scraped_translated.WebPageMapper
# global voices can be added but maybe in wmt test set
# Speech datasets
- (): dataset_construction.DatasetConfig
dataset_path: manu/french_librispeech_text_only
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: manu/french_podcasts # continue scrapping
build_test_set_from_train: true
num_test_examples: 100
filtering_function:
(): dataset_collection.french.french_transcribed_podcast.PodcastFilter
preprocessing_function:
(): dataset_collection.french.french_transcribed_podcast.PodcastMapper
- (): dataset_construction.DatasetConfig
dataset_path: manu/french_open_subtitles
num_test_examples: 100
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/Original_Songs_Lyrics_with_French_Translation
build_test_set_from_train: true
filtering_function:
(): dataset_collection.french.french_translated_lyrics.LyricsFilter
preprocessing_function:
(): dataset_collection.french.french_translated_lyrics.LyricsMapper
# Books
# - (): dataset_construction.DatasetConfig
# dataset_path: manu/project_gutenberg
# train_split: fr
# build_test_set_from_train: true
# num_train_examples: 1000
# num_test_examples: 100
# here we should preprocess
- (): dataset_construction.DatasetConfig
dataset_path: manu/ProjectGutenberg_fr # needs to be filtered - beginning and end of books
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig # needs to be filtered and expanded - perplexity filtering
dataset_path: manu/bnf_gallica
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: manu/theses_fr_2013_2023
build_test_set_from_train: true
filtering_function:
(): dataset_collection.french.theses.ThesisFilter
preprocessing_function:
(): dataset_collection.french.theses.ThesisMapper
# Dila data
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/LEGI_opendata # manu/dila_legifrance
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/BALO_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/JADE_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/DOLE_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/SARDE_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/QR_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/JORF_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/INCA_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/ACCO_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/KALI_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/DEBATS_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/CNIL_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/CAPP_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/CASS_opendata
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: Nicolas-BZRD/CONSTIT_opendata
build_test_set_from_train: true
# needs a preprocessor to have an id_column
# PDF dataset
- (): dataset_construction.DatasetConfig
dataset_path: manu/illuin_layout_dataset_text_only
build_test_set_from_train: true
# Wiki
- (): dataset_construction.DatasetConfig
dataset_path: manu/wikisource_fr
build_test_set_from_train: true
- (): dataset_construction.DatasetConfig
dataset_path: wikipedia
dataset_name: "20220301.fr"
build_test_set_from_train: true
# Internet dumps
- (): dataset_construction.DatasetConfig
dataset_path: 'oscar-corpus/OSCAR-2301'
dataset_name: fr
train_split: "train[:10%]"
build_test_set_from_train: true
num_test_examples: 10000
filtering_function:
(): dataset_collection.french.oscar.OscarFilter
# tokenizer: "mistralai/Mistral-7B-v0.1"
tokenizer: "manu/tok-fr-en-code"
|