ner-orgs / README.md
tomaarsen's picture
tomaarsen HF staff
Add simple README
d1b7df4
|
raw
history blame
4.22 kB
metadata
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/train-*
      - split: validation
        path: data/validation-*
      - split: test
        path: data/test-*
dataset_info:
  features:
    - name: tokens
      sequence: string
    - name: ner_tags
      sequence:
        class_label:
          names:
            '0': O
            '1': B-ORG
            '2': I-ORG
  splits:
    - name: train
      num_bytes: 40381520.59961503
      num_examples: 109424
    - name: validation
      num_bytes: 5782294.96333573
      num_examples: 15908
    - name: test
      num_bytes: 10727120.198367199
      num_examples: 28124
  download_size: 14938552
  dataset_size: 56890935.76131796

Dataset Card for "ner-orgs"

This dataset is a concatenation of subsets of Few-NERD, CoNLL 2003 and OntoNotes v5, but only the "B-ORG" and "I-ORG" labels.

Exactly half of the samples per split contain organisations, while the other half do not contain any.

It was generated using the following script:

import random
from datasets import load_dataset, concatenate_datasets, Features, Sequence, ClassLabel, Value, DatasetDict


FEATURES = Features(
    {
        "tokens": Sequence(feature=Value(dtype="string")),
        "ner_tags": Sequence(feature=ClassLabel(names=["O", "B-ORG", "I-ORG"])),
    }
)


def load_fewnerd():
    def mapper(sample):
        sample["ner_tags"] = [int(tag == 5) for tag in sample["ner_tags"]]
        sample["ner_tags"] = [
            2 if tag == 1 and idx > 0 and sample["ner_tags"][idx - 1] == 1 else tag
            for idx, tag in enumerate(sample["ner_tags"])
        ]
        return sample

    dataset = load_dataset("DFKI-SLT/few-nerd", "supervised")
    dataset = dataset.map(mapper, remove_columns=["id", "fine_ner_tags"])
    dataset = dataset.cast(FEATURES)
    return dataset


def load_conll():
    label_mapping = {3: 1, 4: 2}

    def mapper(sample):
        sample["ner_tags"] = [label_mapping.get(tag, 0) for tag in sample["ner_tags"]]
        return sample

    dataset = load_dataset("conll2003")
    dataset = dataset.map(mapper, remove_columns=["id", "pos_tags", "chunk_tags"])
    dataset = dataset.cast(FEATURES)
    return dataset


def load_ontonotes():
    label_mapping = {11: 1, 12: 2}

    def mapper(sample):
        sample["ner_tags"] = [label_mapping.get(tag, 0) for tag in sample["ner_tags"]]
        return sample

    dataset = load_dataset("tner/ontonotes5")
    dataset = dataset.rename_column("tags", "ner_tags")
    dataset = dataset.map(mapper)
    dataset = dataset.cast(FEATURES)
    return dataset


def has_org(sample):
    return bool(sum(sample["ner_tags"]))


def has_no_org(sample):
    return not has_org(sample)


def preprocess_raw_dataset(raw_dataset):
    # Set the number of sentences without an org equal to the number of sentences with an org
    dataset_org = raw_dataset.filter(has_org)
    dataset_no_org = raw_dataset.filter(has_no_org)
    dataset_no_org = dataset_no_org.select(random.sample(range(len(dataset_no_org)), k=len(dataset_org)))
    dataset = concatenate_datasets([dataset_org, dataset_no_org])
    return dataset


def main() -> None:
    fewnerd_dataset = load_fewnerd()
    conll_dataset = load_conll()
    ontonotes_dataset = load_ontonotes()

    raw_train_dataset = concatenate_datasets([fewnerd_dataset["train"], conll_dataset["train"], ontonotes_dataset["train"]])
    raw_eval_dataset = concatenate_datasets([fewnerd_dataset["validation"], conll_dataset["validation"], ontonotes_dataset["validation"]])
    raw_test_dataset = concatenate_datasets([fewnerd_dataset["test"], conll_dataset["test"], ontonotes_dataset["test"]])

    train_dataset = preprocess_raw_dataset(raw_train_dataset)
    eval_dataset = preprocess_raw_dataset(raw_eval_dataset)
    test_dataset = preprocess_raw_dataset(raw_test_dataset)

    dataset_dict = DatasetDict(
        {
            "train": train_dataset,
            "validation": eval_dataset,
            "test": test_dataset,
        }
    )
    dataset_dict.push_to_hub("ner-orgs", private=True)


if __name__ == "__main__":
    main()