Datasets:
from datasets import load_dataset, set_caching_enabled | |
set_caching_enabled(False) | |
# source = "MASSIVE.py" | |
source = "qanastek/MASSIVE" | |
dataset = load_dataset(source, "all") | |
# dataset = load_dataset(source, "ja-JP") | |
# dataset = load_dataset(source, "ko-KR") | |
# dataset = load_dataset(source, "zh-CN") | |
# dataset = load_dataset(source, "fr-FR") | |
# dataset = load_dataset(source, "fr-FR", download_mode="force_redownload") | |
# print(dataset) | |
# dataset = load_dataset(source, "en-US") | |
# dataset = load_dataset(source, "en-US", download_mode="force_redownload") | |
# , split="train" | |
print(dataset) | |
# print(dataset[0]) | |
f = dataset["validation"][0] | |
print(f) | |
# tags = [] | |
# for e in dataset["train"]: | |
# tags.extend( | |
# e["ner_tags"] | |
# ) | |
# print("#"*50) | |
# print(list(set(tags))) | |