File size: 939 Bytes
76b4cd2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
from transformers import GPTNeoConfig
import os
# export DATASET="${HOME}/data/nedd_wiki_news/nedd_wiki_news.py" # Name of the dataset in the Huggingface Hub
# export DATASET_CONFIG="nedd_nl" # Config of the dataset in the Huggingface Hub
# export DATASET_SPLIT="train" # Split to use for training tokenizer and model
# export VOCAB_SIZE="50257"
# export MODEL_PATH="${HOME}/data/${HF_PROJECT}" # Path to the model, e.g. here inside the mount
# export CONFIG_TYPE="gpt2-medium" # Config that our model will use
config_type = os.environ.get("CONFIG_TYPE")
dataset_name = os.environ.get("DATASET")
dataset_config = os.environ.get("DATASET_CONFIG")
dataset_split = os.environ.get("DATASET_SPLIT")
vocab_size = int(os.environ.get("VOCAB_SIZE"))
model_path = os.environ.get("MODEL_PATH")
config = GPTNeoConfig.from_pretrained(config_type, embed_dropout=0.0, attention_dropout=0.0, vocab_size=vocab_size)
config.save_pretrained(model_path)
|