wikipedia / README.md
albertvillanova's picture
Add 20231101.ru data files
4ed0001
|
raw
history blame
12.3 kB
metadata
language:
  - ab
  - ace
  - ady
  - af
  - alt
  - am
  - ami
  - an
  - ang
  - anp
  - ar
  - arc
  - ary
  - arz
  - as
  - ast
  - atj
  - av
  - avk
  - awa
  - ay
  - az
  - azb
  - ba
  - ban
  - bar
  - bbc
  - bcl
  - be
  - bg
  - bh
  - bi
  - bjn
  - blk
  - bm
  - bn
  - bo
  - bpy
  - br
  - bs
  - bug
  - bxr
  - ca
  - cbk
  - cdo
  - ce
  - ceb
  - ch
  - chr
  - chy
  - ckb
  - co
  - cr
  - crh
  - cs
  - csb
  - cu
  - cv
  - cy
  - da
  - dag
  - de
  - dga
  - din
  - diq
  - dsb
  - dty
  - dv
  - dz
  - ee
  - el
  - eml
  - en
  - eo
  - es
  - et
  - eu
  - ext
  - fa
  - fat
  - ff
  - fi
  - fj
  - fo
  - fon
  - fr
  - frp
  - frr
  - fur
  - fy
  - ga
  - gag
  - gan
  - gcr
  - gd
  - gl
  - glk
  - gn
  - gom
  - gor
  - got
  - gpe
  - gsw
  - gu
  - guc
  - gur
  - guw
  - gv
  - ha
  - hak
  - haw
  - he
  - hi
  - hif
  - hr
  - hsb
  - ht
  - hu
  - hy
  - hyw
  - ia
  - id
  - ie
  - ig
  - ik
  - ilo
  - inh
  - io
  - is
  - it
  - iu
  - ja
  - jam
  - jbo
  - jv
  - ka
  - kaa
  - kab
  - kbd
  - kbp
  - kcg
  - kg
  - ki
  - kk
  - kl
  - km
  - kn
  - ko
  - koi
  - krc
  - ks
  - ksh
  - ku
  - kv
  - kw
  - ky
  - la
  - lad
  - lb
  - lbe
  - lez
  - lfn
  - lg
  - li
  - lij
  - lld
  - lmo
  - ln
  - lo
  - lt
  - ltg
  - lv
  - mad
  - mai
  - map
  - mdf
  - mg
  - mhr
  - mi
  - min
  - mk
  - ml
  - mn
  - mni
  - mnw
  - mr
  - mrj
  - ms
  - mt
  - mwl
  - my
  - myv
  - mzn
  - nah
  - nap
  - nds
  - nds
  - ne
  - new
  - nia
  - nl
  - nn
  - 'no'
  - nov
  - nqo
  - nrf
  - nso
  - nv
  - ny
  - oc
  - olo
  - om
  - or
  - os
  - pa
  - pag
  - pam
  - pap
  - pcd
  - pcm
  - pdc
  - pfl
  - pi
  - pih
  - pl
  - pms
  - pnb
  - pnt
  - ps
  - pt
  - pwn
  - qu
  - rm
  - rmy
  - rn
  - ro
  - ru
  - rue
  - rup
  - rw
  - sa
  - sah
  - sat
  - sc
  - scn
  - sco
  - sd
  - se
  - sg
  - sgs
  - sh
  - shi
  - shn
  - si
  - sk
  - skr
  - sl
  - sm
  - smn
  - sn
  - so
  - sq
  - sr
  - srn
  - ss
  - st
  - stq
  - su
  - sv
  - sw
  - szl
  - szy
  - ta
  - tay
  - tcy
  - te
  - tet
  - tg
  - th
  - ti
  - tk
  - tl
  - tly
  - tn
  - to
  - tpi
  - tr
  - trv
  - ts
  - tt
  - tum
  - tw
  - ty
  - tyv
  - udm
  - ug
  - uk
  - ur
  - uz
  - ve
  - vec
  - vep
  - vi
  - vls
  - vo
  - vro
  - wa
  - war
  - wo
  - wuu
  - xal
  - xh
  - xmf
  - yi
  - yo
  - za
  - zea
  - zgh
  - zh
  - zu
license:
  - cc-by-sa-3.0
  - gfdl
size_categories:
  - n<1K
  - 1K<n<10K
  - 10K<n<100K
  - 100K<n<1M
  - 1M<n<10M
task_categories:
  - text-generation
  - fill-mask
task_ids:
  - language-modeling
  - masked-language-modeling
configs:
  - config_name: 20230701.ca
    data_files:
      - split: train
        path: 20230701.ca/train-*
  - config_name: 20230701.fa
    data_files:
      - split: train
        path: 20230701.fa/train-*
  - config_name: 20231101.ar
    data_files:
      - split: train
        path: 20231101.ar/train-*
  - config_name: 20231101.ca
    data_files:
      - split: train
        path: 20231101.ca/train-*
  - config_name: 20231101.es
    data_files:
      - split: train
        path: 20231101.es/train-*
  - config_name: 20231101.fa
    data_files:
      - split: train
        path: 20231101.fa/train-*
  - config_name: 20231101.it
    data_files:
      - split: train
        path: 20231101.it/train-*
  - config_name: 20231101.ja
    data_files:
      - split: train
        path: 20231101.ja/train-*
  - config_name: 20231101.pl
    data_files:
      - split: train
        path: 20231101.pl/train-*
  - config_name: 20231101.pt
    data_files:
      - split: train
        path: 20231101.pt/train-*
  - config_name: 20231101.ru
    data_files:
      - split: train
        path: 20231101.ru/train-*
  - config_name: 20231101.uk
    data_files:
      - split: train
        path: 20231101.uk/train-*
  - config_name: 20231101.vi
    data_files:
      - split: train
        path: 20231101.vi/train-*
  - config_name: 20231101.war
    data_files:
      - split: train
        path: 20231101.war/train-*
  - config_name: 20231101.zh
    data_files:
      - split: train
        path: 20231101.zh/train-*
dataset_info:
  - config_name: 20230701.ca
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 1927007948
        num_examples: 730361
    download_size: 1097512880
    dataset_size: 1927007948
  - config_name: 20230701.fa
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 1862091485
        num_examples: 967199
    download_size: 742864584
    dataset_size: 1862091485
  - config_name: 20231101.ar
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 3124486159
        num_examples: 1219201
    download_size: 1323304271
    dataset_size: 3124486159
  - config_name: 20231101.ca
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 1958810542
        num_examples: 737409
    download_size: 1116799343
    dataset_size: 1958810542
  - config_name: 20231101.es
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 6033536133
        num_examples: 1841155
    download_size: 3493595869
    dataset_size: 6033536133
  - config_name: 20231101.fa
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 1899154938
        num_examples: 979869
    download_size: 759368283
    dataset_size: 1899154938
  - config_name: 20231101.it
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 4924856310
        num_examples: 1833639
    download_size: 2931265519
    dataset_size: 4924856310
  - config_name: 20231101.ja
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 7039610767
        num_examples: 1389467
    download_size: 3941998526
    dataset_size: 7039610767
  - config_name: 20231101.pl
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 2950148809
        num_examples: 1587721
    download_size: 1765059986
    dataset_size: 2950148809
  - config_name: 20231101.pt
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 2758783436
        num_examples: 1112246
    download_size: 1579641059
    dataset_size: 2758783436
  - config_name: 20231101.ru
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 10277958919
        num_examples: 1945063
    download_size: 4876849588
    dataset_size: 10277958919
  - config_name: 20231101.uk
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 4969483901
        num_examples: 1294720
    download_size: 2276769383
    dataset_size: 4969483901
  - config_name: 20231101.vi
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 1617830227
        num_examples: 1288680
    download_size: 729557588
    dataset_size: 1617830227
  - config_name: 20231101.war
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 467647882
        num_examples: 1266394
    download_size: 104588442
    dataset_size: 467647882
  - config_name: 20231101.zh
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 2790577882
        num_examples: 1384748
    download_size: 1721150260
    dataset_size: 2790577882
language_bcp47:
  - be-tarask
  - en-simple
  - zh-classical
  - zh-min-nan
  - zh-yue

Dataset Card for Wikimedia Wikipedia

Table of Contents

Dataset Description

Dataset Summary

Wikipedia dataset containing cleaned articles of all languages.

The dataset is built from the Wikipedia dumps (https://dumps.wikimedia.org/) with one subset per language, each conatining a single train split.

Each example contains the content of one full Wikipedia article with cleaning to strip markdown and unwanted sections (references, etc.).

All language subsets have already been processed for recent dump, and you can load them per date and language this way:

from datasets import load_dataset

load_dataset("wikipedia", "20231101.en")

Supported Tasks and Leaderboards

The dataset is generally used for Language Modeling.

Languages

You can find the list of languages here: https://meta.wikimedia.org/wiki/List_of_Wikipedias

Dataset Structure

Data Instances

An example looks as follows:

{'id': '1',
 'url': 'https://simple.wikipedia.org/wiki/April',
 'title': 'April',
 'text': 'April is the fourth month...'
}

Data Fields

The data fields are the same among all configurations:

  • id (str): ID of the article.
  • url (str): URL of the article.
  • title (str): Title of the article.
  • text (str): Text content of the article.

Data Splits

All configurations contain a single train split.

Dataset Creation

Curation Rationale

[More Information Needed]

Source Data

Initial Data Collection and Normalization

The dataset is built from the Wikipedia dumps: https://dumps.wikimedia.org

You can find the full list of languages and dates here: https://dumps.wikimedia.org/backup-index.html

The articles have been parsed using the mwparserfromhell tool.

Who are the source language producers?

[More Information Needed]

Annotations

Annotation process

[More Information Needed]

Who are the annotators?

[More Information Needed]

Personal and Sensitive Information

[More Information Needed]

Considerations for Using the Data

Social Impact of Dataset

[More Information Needed]

Discussion of Biases

[More Information Needed]

Other Known Limitations

[More Information Needed]

Additional Information

Dataset Curators

[More Information Needed]

Licensing Information

Copyright licensing information: https://dumps.wikimedia.org/legal.html

All original textual content is licensed under the GNU Free Documentation License (GFDL) and the Creative Commons Attribution-Share-Alike 3.0 License. Some text may be available only under the Creative Commons license; see their Terms of Use for details. Text written by some authors may be released under additional licenses or into the public domain.

Citation Information

@ONLINE{wikidump,
    author = "Wikimedia Foundation",
    title  = "Wikimedia Downloads",
    url    = "https://dumps.wikimedia.org"
}