python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from http import HTTPStatus
from typing import Any
from libcommon.constants import PROCESSING_STEP_DATASET_INFO_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import (
CacheEntryDoesNotExistError,
get_previous_step_or_raise,
get_response,
)
from worker.dtos import DatasetInfoResponse, JobResult, PreviousJob
from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
def compute_dataset_info_response(dataset: str) -> tuple[DatasetInfoResponse, float]:
"""
Get the response of dataset-info for one specific dataset on huggingface.co.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
Returns:
(`DatasetInfoResponse`, `float`): Tuple of an object with the dataset_info response and
progress float value from 0. to 1. which corresponds to the percentage of dataset configs
correctly processed and included in current response (some configs might not exist in cache yet
or raise errors).
Raises the following errors:
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step doesn't have the expected format.
"""
logging.info(f"get dataset_info for {dataset=}")
config_names_best_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
content = config_names_best_response.response["content"]
if "config_names" not in content:
raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.")
try:
config_infos: dict[str, Any] = {}
total = 0
pending, failed = [], []
partial = False
for config_item in content["config_names"]:
config = config_item["config"]
total += 1
try:
config_response = get_response(kind="config-info", dataset=dataset, config=config)
except CacheEntryDoesNotExistError:
logging.debug(f"No response found in previous step for {dataset=} {config=}: 'config-info'.")
pending.append(
PreviousJob(
kind="config-info",
dataset=dataset,
config=config,
split=None,
)
)
continue
if config_response["http_status"] != HTTPStatus.OK:
logging.debug(f"Previous step gave an error: {config_response['http_status']}")
failed.append(
PreviousJob(
kind="config-info",
dataset=dataset,
config=config,
split=None,
)
)
continue
config_infos[config] = config_response["content"]["dataset_info"]
partial = partial or config_response["content"]["partial"]
except Exception as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
progress = (total - len(pending)) / total if total else 1.0
return DatasetInfoResponse(dataset_info=config_infos, pending=pending, failed=failed, partial=partial), progress
class DatasetInfoJobRunner(DatasetJobRunner):
@staticmethod
def get_job_type() -> str:
return "dataset-info"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_DATASET_INFO_VERSION
def compute(self) -> JobResult:
response_content, progress = compute_dataset_info_response(dataset=self.dataset)
return JobResult(response_content, progress=progress)
| datasets-server-main | services/worker/src/worker/job_runners/dataset/info.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Optional
from datasets import get_dataset_config_names
from datasets.data_files import EmptyDatasetError as _EmptyDatasetError
from libcommon.constants import PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION
from libcommon.exceptions import (
ConfigNamesError,
DatasetModuleNotInstalledError,
DatasetWithTooManyConfigsError,
EmptyDatasetError,
)
from worker.dtos import CompleteJobResult, ConfigNameItem, DatasetConfigNamesResponse
from worker.job_runners.dataset.dataset_job_runner import (
DatasetJobRunnerWithDatasetsCache,
)
def compute_config_names_response(
dataset: str,
max_number: int,
hf_token: Optional[str] = None,
) -> DatasetConfigNamesResponse:
"""
Get the response of dataset-config-names for one specific dataset on huggingface.co.
Dataset can be private or gated if you pass an acceptable token.
It is assumed that the dataset exists and can be accessed using the token.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
hf_token (`str`, *optional*):
An authentication token (See https://huggingface.co/settings/token)
Returns:
`DatasetConfigNamesResponse`: An object with the list of config names.
Raises the following errors:
- [`libcommon.exceptions.EmptyDatasetError`]
The dataset is empty.
- [`libcommon.exceptions.DatasetModuleNotInstalledError`]
The dataset tries to import a module that is not installed.
- [`libcommon.exceptions.ConfigNamesError`]
If the list of configs could not be obtained using the datasets library.
"""
logging.info(f"get config names for dataset={dataset}")
# get the list of splits in streaming mode
try:
config_name_items: list[ConfigNameItem] = [
{"dataset": dataset, "config": str(config)}
for config in sorted(get_dataset_config_names(path=dataset, token=hf_token))
]
except _EmptyDatasetError as err:
raise EmptyDatasetError("The dataset is empty.", cause=err) from err
except ImportError as err:
raise DatasetModuleNotInstalledError(
"The dataset tries to import a module that is not installed.", cause=err
) from err
except Exception as err:
raise ConfigNamesError("Cannot get the config names for the dataset.", cause=err) from err
number_of_configs = len(config_name_items)
if number_of_configs > max_number:
raise DatasetWithTooManyConfigsError(
f"The maximum number of configs allowed is {max_number}, dataset has {number_of_configs} configs."
)
return DatasetConfigNamesResponse(config_names=config_name_items)
class DatasetConfigNamesJobRunner(DatasetJobRunnerWithDatasetsCache):
@staticmethod
def get_job_type() -> str:
return "dataset-config-names"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION
def compute(self) -> CompleteJobResult:
return CompleteJobResult(
compute_config_names_response(
dataset=self.dataset,
hf_token=self.app_config.common.hf_token,
max_number=self.app_config.config_names.max_number,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/dataset/config_names.py |
import argparse
import glob
import multiprocessing as mp
import os
import pickle
import random
import struct
import numpy as np
from datasets import load_from_disk
from tqdm import tqdm
from transformers import GPT2Tokenizer, T5Tokenizer
parser = argparse.ArgumentParser(description="Load a dataset.")
parser.add_argument("--name", type=str, required=True)
parser.add_argument("--save_dir", type=str, required=True)
parser.add_argument("--source_dir", type=str, required=True)
parser.add_argument("--split", type=str, default="train")
parser.add_argument("--tokenize", action="store_true")
parser.add_argument("--tokenizer", type=str, default="gpt2")
parser.add_argument("--pre_sep", type=bytes, default=b"\xff\xff")
parser.add_argument("--post_sep", type=bytes, default=b"")
parser.add_argument("--sampling_rate", type=float, default=1.0)
args = parser.parse_args()
if args.tokenize:
if args.tokenizer == "gpt2":
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
elif args.tokenizer == "t5":
tokenizer = T5Tokenizer.from_pretrained("t5-small")
else:
raise
split = args.split
dataset_name = args.name
source_dir = args.source_dir + "/" + dataset_name
save_dir = args.save_dir + "/" + dataset_name + "_" + str(args.sampling_rate).replace(".", "")
print("Loading", dataset_name)
ds = load_from_disk(source_dir)
ds = ds[split]
print("Done loading")
print("Sampling rate", args.sampling_rate)
pre_sep = args.pre_sep
post_sep = args.post_sep
UID = 0
def sep():
global UID
UID += 1
return pre_sep + struct.pack("<I", UID) + post_sep
def tok(x):
if args.tokenize:
out = tokenizer.encode(x.decode("utf8"))
out = np.array(out, dtype=np.uint16).view(np.uint8).tobytes()
else:
out = x
return out
if not os.path.exists(save_dir):
os.mkdir(save_dir)
fout = open(os.path.join(save_dir, dataset_name + "." + split), "wb")
pos_to_id = {}
#with mp.get_context("fork").Pool(mp.cpu_count()) as p:
sizes = [0]
# IF ROOTS:
# for directory in glob.glob(source_dir + "*"):
# print(directory)
# ds = load_from_disk(directory)
# ds = ds[split]
for i, b in tqdm(enumerate(ds)):
if random.random() > args.sampling_rate:
continue
if b["text_length"] > 188944:
continue
next_line = sep() + b["text"].encode("utf8")
fout.write(next_line)
pos_to_id[sizes[-1]] = i
sizes.append(sizes[-1] + len(next_line))
open(os.path.join(save_dir, dataset_name + "." + split + ".size"), "wb").write(
np.array(sizes, dtype=np.uint64).tobytes()
)
with open(os.path.join(save_dir, dataset_name + "." + split + ".pos2id.pkl"), "wb") as f:
pickle.dump(pos_to_id, f)
| datablations-main | filtering/deduplication/hf_dataset_to_file.py |
import argparse
import glob
import os
from functools import partial
from multiprocessing import Pool, cpu_count
from datasets import DatasetDict, load_from_disk
def save_dataset(dataset_name, base_dir, sample_size=100000, token=None):
print("Processing", dataset_name)
ds = load_from_disk(base_dir + dataset_name)
ds.shuffle()
while sample_size > len(ds["train"]):
sample_size //= 10
small_ds = DatasetDict({"train": ds["train"].select(range(sample_size))})
small_ds.push_to_hub("ola13/small-" + dataset_name, private=True, token=token)
print("Pushed", dataset_name, "to hub.")
if __name__ == "__main__":
"""
Run the following in the terminal where you execute the code (replace the XXX with your actual token):
```
export HUGGINGFACE_TOKEN=hf_XXXXXXXXXXXXXXX
```
"""
HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
if HUGGINGFACE_TOKEN is None:
raise RuntimeError("Hugging Face token not specified.")
parser = argparse.ArgumentParser()
parser.add_argument(
"--base_dir",
type=str,
required=True,
)
parser.add_argument(
"--sample_size",
type=int,
default=100000,
)
args = parser.parse_args()
files = glob.glob(args.base_dir + "/*")
datasets = [f.split("/")[-1] for f in files]
print(datasets)
workers = cpu_count()
print("Number of workers:", workers)
pool = Pool(workers)
pool.map(
partial(save_dataset, base_dir=args.base_dir, sample_size=args.sample_size, token=HUGGINGFACE_TOKEN),
datasets,
)
pool.close()
pool.join()
| datablations-main | filtering/deduplication/save_dataset_sample.py |
import os
from multiprocessing import cpu_count
from datasets import load_dataset
HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
print(HUGGINGFACE_TOKEN)
oscar = load_dataset(
"oscar-corpus/OSCAR-2201", "en", use_auth_token=HUGGINGFACE_TOKEN, num_proc=cpu_count(), ignore_verifications=True
)
oscar.save_to_disk("/home/piktus_huggingface_co/lumi/oscar/")
| datablations-main | filtering/deduplication/save_dataset.py |
import os
from collections import Counter
from datasets import load_dataset
HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
oscar = load_dataset(
"oscar-corpus/OSCAR-2201",
"en",
use_auth_token=HUGGINGFACE_TOKEN,
num_proc=128,
ignore_verifications=True,
)
# oscar.save_to_disk("/home/piktus_huggingface_co/lumi/oscar/")
oscar_ids = oscar["train"]["id"]
print("Number of Oscar IDs", len(oscar_ids))
unique_ids = Counter(oscar_ids)
print(unique_ids.most_common(10))
| datablations-main | filtering/deduplication/download_oscar.py |
import jsonlines
from collections import defaultdict
from datasets import load_from_disk, load_dataset, concatenate_datasets
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
oscar = load_from_disk("/home/piktus_huggingface_co/lumi/preprocessed_data/oscar-dedup-exapanded/")
# oscar = load_dataset("ola13/small-oscar")["train"]
oscar_shards = {}
for i in tqdm(range(160)):
oscar_shards[i] = oscar.shard(num_shards=160, index=i)
def filter_shards(shard_id):
print("Processing shard {}".format(shard_id))
shard_lines = []
for line in tqdm(oscar_shards[shard_id]):
# if len(line["text"]) < 500:
if (line["included_in_dedup"] and line["dup_ratio"] == 0.0) or ((not line["included_in_dedup"]) and (not line["has_dup_25"])):
shard_lines.append({"text": line["text"]})
return shard_lines
pool = Pool(160)
results = pool.map(filter_shards, [i for i in range(160)])
pool.close()
pool.join()
with jsonlines.open('/home/piktus_huggingface_co/lumi/preprocessed_data/oscar-dedup-25-exapanded.jsonl', mode='w') as writer:
for shard_lines in results:
writer.write_all(shard_lines) | datablations-main | filtering/deduplication/filter_oscar_jsonl.py |
import argparse
import os
import sys
from datasets import load_dataset
sys.path.append("/home/piktus_huggingface_co/lumi/text-dedup")
print(sys.path)
from text_dedup.suffix_array import suffix_array
def get_args():
"""
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, required=True, help="Path to the dataset you're using on the HF hub. Pass e.g. `csv` or `json` and `data_files=path_on_disk` to load something locally")
parser.add_argument('--subset', type=str, default=None, help="Subset of the dataset you're using, if needed")
parser.add_argument('--data_files', type=str, default=None, help="Path to the dataset on disk if using local files")
parser.add_argument('--path_on_disk', type=str, required=True, help="Path to the Rust dedup implem on your disk, see https://github.com/google-research/deduplicate-text-datasets")
parser.add_argument('--cache_dir', type=str, required=True, help="Where all the suffix tree files will get built")
return parser.parse_args()
"""
def generator_from_dataset(dataset):
for item in dataset:
yield item["text"]
if __name__ == "__main__":
# args = get_args()
# dataset = load_dataset(args.name, args.subset, data_files=args.data_files, use_auth_token=True, split="train")
# corpus = generator_from_dataset(dataset)
ds = load_dataset("ola13/small-oscar", use_auth_token=os.environ.get("HUGGINGFACE_TOKEN"))
deduplicator = suffix_array(
ds["train"],
dedup_name="test",
k=10,
merge_strategy="overlapping",
google_repo_path="/home/piktus_huggingface_co/lumi/deduplicate-text-datasets/",
output_dir="/mnt/disks/looking_glass_storage/dedup",
column="text",
)
# suffix_array(k=10, merge_strategy='overlapping', google_repo_path=args.path_on_disk, cache_dir=args.cache_dir)
# slices = deduplicator.fit_predict(corpus)
# for sentence, intervals in zip(corpus, slices):
# print(sentence)
# print([sentence.encode('utf-8')[s].decode('utf-8', errors='ignore') for s in intervals])
| datablations-main | filtering/deduplication/suffix_dedup.py |
from datasets import load_from_disk
import string
def find_whitespace(text):
for i, c in enumerate(text):
if c in string.whitespace:
yield i
def
def get_segmentation(text, passage_tokens, overlap_tokens):
whitespace_idx = [-1] + list(find_whitespace(text))
unique_tokens = passage_tokens - overlap_tokens
passages = []
for i in range(0, len(whitespace_idx), unique_tokens):
if i + passage_tokens >= len(whitespace_idx):
passages.append((whitespace_idx[i] + 1, len(text)))
break
passages.append((whitespace_idx[i] + 1, whitespace_idx[i + passage_tokens] + 1))
return passages
if __name__ == "__main__":
oscar = load_from_disk("/home/piktus_huggingface_co/lumi/preprocessed_data/oscar_025")["train"]
with open("/home/piktus_huggingface_co/lumi/preprocessed_data/oscar_025/queries.txt", "w") as queries:
for line in oscar:
text = line["text"]
whitespace_idx = [-1] + list(find_whitespace(text))
for i in whitespace_idx:
if i + 101 < len(text):
queries.write(text[i+1:i+101] + "\n")
| datablations-main | filtering/deduplication/dedup_oscar.py |
import argparse
import ast
import glob
import os
from datasets import DatasetDict, concatenate_datasets, load_from_disk
def get_perplexity(meta):
meta = ast.literal_eval(meta) if isinstance(meta, str) else meta
perplexity_score = meta["perplexity_score"]
return float(perplexity_score)
if __name__ == "__main__":
"""
Export your huggingface token which gives access to the `bigscience-catalogue-lm-data` organization.
Run the following in the terminal where you execute the code (replace the XXX with your actual token):
```
export HUGGINGFACE_TOKEN=hf_XXXXXXXXXXXXXXX
```
"""
HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
if HUGGINGFACE_TOKEN is None:
raise RuntimeError("Hugging Face token not specified.")
parser = argparse.ArgumentParser()
parser.add_argument(
"--base_dir",
type=str,
default="/mnt/disks/looking_glass_storage/data/perplexity_filtered/lumi/",
)
parser.add_argument(
"--sample_size",
type=int,
default=100000,
)
args = parser.parse_args()
files = glob.glob(args.base_dir + "roots_en_*")
dataset_names = [f.split("/")[-1] for f in files]
print(dataset_names)
datasets = []
for dataset_name in dataset_names:
print("Processing", dataset_name)
ds = load_from_disk(args.base_dir + dataset_name)
ds = ds["train"]
updated_dataset = ds.map(
lambda example: {
"text": example["text"],
"meta": {"perplexity_score": get_perplexity(example["meta"])},
},
num_proc=48,
)
datasets.append(updated_dataset)
roots_en = concatenate_datasets(datasets)
roots_en.shuffle()
small_roots_en = DatasetDict({"train": roots_en.select(range(args.sample_size))})
small_roots_en.push_to_hub("ola13/small-roots_en", private=True, token=HUGGINGFACE_TOKEN)
print("Pushed roots_en to hub.")
| datablations-main | filtering/deduplication/save_roots_sample.py |
import argparse
import os
import pickle
from bisect import bisect_right
from collections import defaultdict
from multiprocessing import cpu_count
from datasets import load_from_disk
from tqdm import tqdm
def get_pairs(byterange):
"""
Returns pairs generated by
https://github.com/google-research/deduplicate-text-datasets#collecting-the-duplicates-together
"""
print("Getting pairs")
pairs = []
with open(byterange, "r") as f:
save = False
for line in tqdm(f):
if line.strip() == "out":
save = True
continue
if save:
left, right = line.strip().split()
pairs.append((int(left), int(right)))
print("num pairs", len(pairs))
return pairs
def get_bytes(pairs, data):
"""
Return bytes constituring the duplicated substring. There seems to be something off here, see:
https://github.com/google-research/deduplicate-text-datasets/issues/24
"""
print("Getting bytes")
byte_array = []
for left, right in tqdm(pairs):
byte_array.append(data[left:right])
print("byte_array size", len(byte_array))
return byte_array
def get_doc_id(pos, pos2id, pos2id_list):
"""
Gets id of the datapoint at position.
"""
pos = bisect_right(pos2id_list, pos)
doc_id = pos2id[pos2id_list[pos - 1]]
return doc_id
def get_url(row, dataset_name):
if dataset_name == "oscar":
return row["meta"]["warc_headers"]["warc-target-uri"]
if dataset_name == "the_pile" or dataset_name == "roots_en":
return None
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_name",
"-d",
type=str,
required=True,
)
parser.add_argument(
"--base_dir",
"-b",
type=str,
required=True,
)
args = parser.parse_args()
dataset_name = args.dataset_name
base_dir = args.base_dir
HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
print("Loading", dataset_name)
dataset = load_from_disk("/home/piktus_huggingface_co/lumi/preprocessed_data/{}".format(dataset_name))["train"]
dataset_raw = load_from_disk("/home/piktus_huggingface_co/lumi/{}".format(dataset_name))["train"]
print("Done loading", dataset_name)
data_path = base_dir + "/{ds}.train".format(ds=dataset_name)
byterange = base_dir + "/{ds}.train.byterange".format(ds=dataset_name)
pairs = get_pairs(byterange)
data = open(data_path, "rb").read()
pos2id = pickle.load(open(base_dir + "/{ds}.train.pos2id.pkl".format(ds=dataset_name), "rb"))
pos2id_list = sorted(pos2id.keys())
doc_pairs = defaultdict(list)
dup_len = defaultdict(int)
doc_bytes = defaultdict(list)
repetitions = defaultdict(set)
clusters = defaultdict(set)
included_docs = set()
print("Getting bytes")
byte_array = []
for left, right in tqdm(pairs):
byte_array.append(data[left:right])
print("byte_array size", len(byte_array))
print("Calculating repetitions")
for (l, r), b in tqdm(zip(pairs, byte_array)):
i = get_doc_id(l, pos2id, pos2id_list)
doc_pairs[i].append((l, r))
dup_len[i] += (r - l)
doc_bytes[i].append(b)
repetitions[b].add(i)
print(
"Singleton pairs:",
sum(1 for i in doc_pairs.keys() if len(doc_pairs[i]) == 1)
)
for idx in pos2id.values():
included_docs.add(idx)
print("Num included", len(included_docs))
print("Calculating clusters")
for i, _ in tqdm(doc_pairs.items()):
c = set()
for rep in doc_bytes[i]:
c |= repetitions[rep]
clusters[i] = c
# cleanup
del byterange
del data
del pos2id
del pos2id_list
del pairs
def add_duplication_info(example, idx):
example["text"] = dataset_raw[idx]["text"]
text_length = len(example["text"])
example["text_length"] = text_length
example["url"] = get_url(example, dataset_name)
example["domain"] = example["url"].split("/")[2] if example["url"] is not None else None
example["dup_ratio"] = dup_len[idx] / text_length
example["pairs"] = doc_pairs[idx]
example["repetitions"] = doc_bytes[idx]
example["included_in_dedup"] = idx in included_docs
example["cluster"] = clusters[idx]
return example
print("Mapping")
dataset = dataset.map(add_duplication_info, num_proc=max(1, cpu_count() - 32), with_indices=True, new_fingerprint="13")
dataset.save_to_disk("/home/piktus_huggingface_co/lumi/preprocessed_data/{}-dedup".format(dataset_name))
dataset.push_to_hub(
"datablations/{}-dedup".format(dataset_name),
private=True,
token=os.environ.get("HUGGINGFACE_TOKEN"),
)
| datablations-main | filtering/deduplication/add_dedup_info.py |
# import csv
import pandas as pd
import string
from datasets import load_dataset
from tqdm import tqdm
def find_whitespace(text):
for i, c in enumerate(text):
if c in string.whitespace:
yield i
oscar_small = pd.DataFrame(load_dataset("ola13/small-oscar")["train"][:10])
query_length = 100
with open("queries.bin", "wb") as f:
for idx, line in tqdm(oscar_small.iterrows()):
text = line["text"]
whitespace_idx = [-1] + list(find_whitespace(text))
for i in whitespace_idx[::2]:
if i + query_length + 1 < len(text):
query = text[(i + 1) : (i + query_length + 1)]
query_in_bytes = query.encode("utf-8")
size = len(query_in_bytes)
bytes_representation = size.to_bytes(4, "little")
f.write(bytes_representation)
f.write(query_in_bytes) | datablations-main | filtering/deduplication/save_rust_format.py |
"""
muP Preparation from https://github.com/microsoft/mutransformers#basic-usage-of-models
!git clone https://github.com/microsoft/mutransformers.git
%cd mutransformers
!pip install -r requirements.txt
!pip install -e .
!pip install -q datasets
With our CC-like architectures we found that
7m params & 100M tokens -> 8.1 loss
1b1 params & 100M tokens -> 6.6 loss
2b8 params & 100M tokens -> 7.5 loss
So looking to run the last two, which in our CC setup have the hyperparams:
(d_model ffw_size kv_size n_heads n_layers)
PARAM_1143M=(1792 7168 128 14 26)
PARAM_2980M=(2560 10240 128 20 34)
target_config -> base_config: Divide width by 10 to 20 / Generally have 128 as width ; Adapt num_attention_heads, too (128 hidden & 8 heads)
base_config -> delta_config: Multiply hidden size by 2
Do small HP optim on LR at small scale:
Run tiny grid search at 64 hidden size (200M params) on init std 0.1 / default; make same warmup as prior experiments; Use batch size from prior experiments; Use cosine deacying to 10%
Then use those HPs found for 1B & 2b8 models
"""
### Cosine Annealing with Warmup from
### https://github.com/Lightning-Universe/lightning-bolts/blob/master/pl_bolts/optimizers/lr_scheduler.py
import warnings
import math
from typing import List
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class LinearWarmupCosineAnnealingLR(_LRScheduler):
"""Sets the learning rate of each parameter group to follow a linear warmup schedule between warmup_start_lr
and base_lr followed by a cosine annealing schedule between base_lr and eta_min.
.. warning::
It is recommended to call :func:`.step()` for :class:`LinearWarmupCosineAnnealingLR`
after each iteration as calling it after each epoch will keep the starting lr at
warmup_start_lr for the first epoch which is 0 in most cases.
.. warning::
passing epoch to :func:`.step()` is being deprecated and comes with an EPOCH_DEPRECATION_WARNING.
It calls the :func:`_get_closed_form_lr()` method for this scheduler instead of
:func:`get_lr()`. Though this does not change the behavior of the scheduler, when passing
epoch param to :func:`.step()`, the user should call the :func:`.step()` function before calling
train and validation methods.
Example:
>>> layer = nn.Linear(10, 1)
>>> optimizer = Adam(layer.parameters(), lr=0.02)
>>> scheduler = LinearWarmupCosineAnnealingLR(optimizer, warmup_epochs=10, max_epochs=40)
>>> #
>>> # the default case
>>> for epoch in range(40):
... # train(...)
... # validate(...)
... scheduler.step()
>>> #
>>> # passing epoch param case
>>> for epoch in range(40):
... scheduler.step(epoch)
... # train(...)
... # validate(...)
"""
def __init__(
self,
optimizer: Optimizer,
warmup_epochs: int,
max_epochs: int,
warmup_start_lr: float = 0.0,
eta_min: float = 0.0,
last_epoch: int = -1,
) -> None:
"""
Args:
optimizer (Optimizer): Wrapped optimizer.
warmup_epochs (int): Maximum number of iterations for linear warmup
max_epochs (int): Maximum number of iterations
warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
"""
self.warmup_epochs = warmup_epochs
self.max_epochs = max_epochs
self.warmup_start_lr = warmup_start_lr
self.eta_min = eta_min
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
"""Compute learning rate using chainable form of the scheduler."""
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.",
UserWarning,
)
if self.last_epoch == 0:
return [self.warmup_start_lr] * len(self.base_lrs)
if self.last_epoch < self.warmup_epochs:
return [
group["lr"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
]
if self.last_epoch == self.warmup_epochs:
return self.base_lrs
if (self.last_epoch - 1 - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs)) == 0:
return [
group["lr"]
+ (base_lr - self.eta_min) * (1 - math.cos(math.pi / (self.max_epochs - self.warmup_epochs))) / 2
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
]
return [
(1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs)))
/ (
1
+ math.cos(
math.pi * (self.last_epoch - self.warmup_epochs - 1) / (self.max_epochs - self.warmup_epochs)
)
)
* (group["lr"] - self.eta_min)
+ self.eta_min
for group in self.optimizer.param_groups
]
def _get_closed_form_lr(self) -> List[float]:
"""Called when epoch is passed as a param to the `step` function of the scheduler."""
if self.last_epoch < self.warmup_epochs:
return [
self.warmup_start_lr + self.last_epoch * (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)
for base_lr in self.base_lrs
]
return [
self.eta_min
+ 0.5
* (base_lr - self.eta_min)
* (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs)))
for base_lr in self.base_lrs
]
TARGET_CONFIG = {
"test": {
"hidden_size": 1024,
"intermediate_size": 1024*4,
"num_attention_heads": 32,
"num_layers": 12,
"batch_size": 256,
"per_device_train_batch_size": 4,
},
"200M": { # 203668480
"hidden_size": 1024,
"intermediate_size": 1024*4,
"num_attention_heads": 32,
"num_layers": 12,
"batch_size": 256,
"per_device_train_batch_size": 4,
},
"800M": { # 709326848
"hidden_size": 1024*2,
"intermediate_size": 1024*4*2,
"num_attention_heads": 32*2,
"num_layers": 12,
"batch_size": 256,
"per_device_train_batch_size": 2,
},
"1B": { # 1516975104
"hidden_size": 1024*3,
"intermediate_size": 1024*4*3,
"num_attention_heads": 32*2,
"num_layers": 12,
"batch_size": 256,
"per_device_train_batch_size": 2,
},
"2B": { # 1766073088
"hidden_size": int(1024*3.25),
"intermediate_size": int(1024*3.25)*4,
"num_attention_heads": 32*4,
"num_layers": 12,
"batch_size": 256,
"per_device_train_batch_size": 1,
},
"2B5": { # 2626613248
"hidden_size": int(1024*4),
"intermediate_size": int(1024*4)*4,
"num_attention_heads": 32*4,
"num_layers": 12,
"batch_size": 512,
"per_device_train_batch_size": 1,
},
"3B": { # 2951208704
"hidden_size": int(1024*4.25),
"intermediate_size": int(1024*4.25)*4,
"num_attention_heads": 32*4,
"num_layers": 12,
"batch_size": 512,
"per_device_train_batch_size": 1,
},
"3B5": { # 3294678528
"hidden_size": int(1024*4.5),
"intermediate_size": int(1024*4.5)*4,
"num_attention_heads": 32*4,
"num_layers": 12,
"batch_size": 512,
"per_device_train_batch_size": 1,
},
"1B1": {
"hidden_size": 1792,
"intermediate_size": 1792*4,
"num_attention_heads": 14,
"num_layers": 26,
"batch_size": 256,
"per_device_train_batch_size": 1,
},
"2B8": {
"hidden_size": 2560,
"intermediate_size": 2560*4,
"num_attention_heads": 20,
"num_layers": 34,
"batch_size": 512,
"per_device_train_batch_size": 1,
},
}
CONFIG_TO_RUN = "2B" # MODIFY BASED ON DESIRED CONFIG
USE_MUP = True
RUN_OFFLINE = True
# method-params-tokens
model_name = "sp" if not USE_MUP else "mup"
model_name += f"-{CONFIG_TO_RUN}".lower()
model_name += "-100m"
BASE_HIDDEN = 128
BASE_INTERMEDIATE = 256
BASE_NUM_ATTENTION_HEADS = 8
LR = 1e-3 if USE_MUP else 2e-4 # MUP default LR & SP default LR
INIT_RANGE = 0.01 # MUP default init range
if RUN_OFFLINE:
import os
os.environ["HF_DATASETS_OFFLINE"] = "1"
BATCH_SIZE = TARGET_CONFIG[CONFIG_TO_RUN]["batch_size"]
if USE_MUP:
from mutransformers import GPT2Config, GPT2LMHeadModel
from mup import make_base_shapes, set_base_shapes, MuAdamW
# define a base model
base_config = GPT2Config(
hidden_size=BASE_HIDDEN,
intermediate_size=BASE_INTERMEDIATE,
num_attention_heads=BASE_NUM_ATTENTION_HEADS,
initializer_range=INIT_RANGE,
)
base_model = GPT2LMHeadModel(config=base_config)
# define a delta models where we vary all "widths" we want to vary
delta_config = GPT2Config(
hidden_size=BASE_HIDDEN*2,
intermediate_size=BASE_INTERMEDIATE*2,
num_attention_heads=BASE_NUM_ATTENTION_HEADS*2,
initializer_range=INIT_RANGE,
)
delta_model = GPT2LMHeadModel(config=delta_config)
# define a base shape object based on comparing delta_model against base_model
base_shapes = make_base_shapes(base_model, delta_model, savefile='gpt256.bsh')
# define target model
target_config = GPT2Config(
hidden_size=TARGET_CONFIG[CONFIG_TO_RUN]["hidden_size"],
intermediate_size=TARGET_CONFIG[CONFIG_TO_RUN]["intermediate_size"],
num_attention_heads=TARGET_CONFIG[CONFIG_TO_RUN]["num_attention_heads"],
num_layers=TARGET_CONFIG[CONFIG_TO_RUN]["num_layers"],
initializer_range=INIT_RANGE,
use_cache=False,
)
else:
from transformers import GPT2Config, GPT2LMHeadModel
# define target model
target_config = GPT2Config(
hidden_size=TARGET_CONFIG[CONFIG_TO_RUN]["hidden_size"],
intermediate_size=TARGET_CONFIG[CONFIG_TO_RUN]["intermediate_size"],
num_attention_heads=TARGET_CONFIG[CONFIG_TO_RUN]["num_attention_heads"],
num_layers=TARGET_CONFIG[CONFIG_TO_RUN]["num_layers"],
use_cache=False,
)
target_model = GPT2LMHeadModel(config=target_config)
if USE_MUP:
# set base shapes
set_base_shapes(target_model, base_shapes)
# you can alternatively load base shape from file
# set_base_shapes(target_model, 'bert256.bsh')
# re-initialize
target_model.apply(target_model._init_weights)
# make sure to use mup optimizers for training
optimizer = MuAdamW(target_model.parameters(), lr=LR)
else:
from transformers import AdamW
optimizer = AdamW(target_model.parameters(), lr=LR)
import numpy as np
model_parameters = filter(lambda p: p.requires_grad, target_model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Number of trainable parameters: ", params)
"""
Training code
Train billion parameter models on 100M tokens of C4
Adapted from:
https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb
"""
from datasets import load_dataset
# git clone https://huggingface.co/datasets/datablations/c4-100m
datasets = load_dataset('./c4-100m')
# wget https://huggingface.co/datasets/allenai/c4/resolve/main/en/c4-validation.00000-of-00008.json.gz
# val_dataset = load_dataset('json', data_files='c4-validation.00000-of-00008.json.gz')['train']
val_dataset = load_dataset('json', data_files='c4-validation.*-of-00008.json.gz')['train']
# val_dataset = load_dataset('c4', 'en', split='validation[:10%]')
datasets["validation"] = val_dataset
datasets = datasets.select_columns("text")
from transformers import AutoTokenizer, Trainer, TrainingArguments
tokenizer = AutoTokenizer.from_pretrained("gpt2")
tokenized_datasets = datasets.map(lambda x: tokenizer(x["text"]), batched=True, num_proc=4, remove_columns=["text"])
block_size = tokenizer.model_max_length
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
batch_size=1000,
num_proc=4,
)
num_steps = len(lm_datasets["train"]) // BATCH_SIZE
scheduler = LinearWarmupCosineAnnealingLR(
optimizer,
warmup_epochs=num_steps // 100, # 1% of training steps
max_epochs=num_steps,
eta_min=LR / 10, # Decay to 10% of LR
)
per_device_train_batch_size = TARGET_CONFIG[CONFIG_TO_RUN]["per_device_train_batch_size"]
gradient_accumulation_steps = BATCH_SIZE // per_device_train_batch_size
training_args = TrainingArguments(
model_name,
evaluation_strategy="steps",
weight_decay=0.01,
push_to_hub=not(RUN_OFFLINE),
per_device_train_batch_size=per_device_train_batch_size,
per_device_eval_batch_size=32,
num_train_epochs=1,
gradient_accumulation_steps=gradient_accumulation_steps,
save_steps=100,
bf16=True,
#gradient_checkpointing=True, # Use if OOM
)
# If loading pre-trained model for eval
#from mutransformers import GPT2Config, GPT2LMHeadModel
#from mup import make_base_shapes, set_base_shapes, MuAdamW
#model_name = "mup-2b-100m-e3"
#target_model = GPT2LMHeadModel.from_pretrained(model_name)
#set_base_shapes(target_model, base_shapes)
#set_base_shapes(target_model, 'gpt256.bsh')
trainer = Trainer(
model=target_model,
args=training_args,
train_dataset=lm_datasets["train"], # .select(range(256)), # Testing
eval_dataset=lm_datasets["validation"],
optimizers=(optimizer, scheduler), # Use mup optimizer & cosine scheduler
)
if USE_MUP:
del base_model
del delta_model
trainer.train()
# Continue training
# trainer.train("checkpoint-100")
if RUN_OFFLINE:
trainer.save_model(model_name)
else:
trainer.push_to_hub()
import math
eval_results = trainer.evaluate()
print(f"Loss: {eval_results['eval_loss']:.4f}")
print(f"Perplexity: {math.exp(eval_results['eval_loss']):.4f}")
import json
with open(f"{model_name}-full.json", "w") as f:
json.dump(eval_results, f)
| datablations-main | training/mup.py |
import os
import shutil
# shutil.rmtree()
checkpoint_dirs = [dir_name for dir_name in os.listdir() if dir_name.startswith('checkpoint')]
for dir_name in checkpoint_dirs:
latest_file_path = os.path.join(dir_name, 'latest')
with open(latest_file_path, 'r') as f:
latest_checkpoint = f.read().strip()
if not os.path.exists(os.path.join(dir_name, latest_checkpoint)):
print(f"Deleting directory {dir_name} because checkpoint {latest_checkpoint} does not exist in it.")
shutil.rmtree(dir_name)
#break
#os.rmdir(dir_name)
| datablations-main | utils/cleandirs.py |
#!/usr/bin/env python
# this script converts results.json:
#
# "results": {
# "arc_challenge": {
# "acc": 0.24232081911262798,
# "acc_stderr": 0.01252159329580012,
# "acc_norm": 0.2764505119453925,
# "acc_norm_stderr": 0.013069662474252425
# },
#
# into a format expected by a spreadsheet, which is:
#
# task metric value err
# arc_challenge acc xxx yyy
# arc_challenge acc_norm xxx yyy
# arc_challenge f1 xxx yyy
#
# usage:
# report-to-csv.py results.json
import sys
import statistics
import json
import io
import csv
results_file = sys.argv[1]
csv_file = results_file.replace("json", "csv")
print(f"Converting {results_file} to {csv_file}")
with io.open(results_file, 'r', encoding='utf-8') as f:
raw_results = json.load(f)
results = {}
for ds_name, v in sorted(raw_results.items()):
results[ds_name.split("/")[-1]] = v
with io.open(csv_file, 'w', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(["dataset", "fewshots", "prompt", "metric", "value"])
for ds_name, v in sorted(results.items()):
medians = []
for fewshots, v_sub in sorted(v.items()):
acc_scores, bleu_scores, rouge_scores = [], [], []
for prompt_name, res in sorted(v_sub.items()):
# T0 Eval
if "evaluation" in res:
for metric, value in sorted(res["evaluation"].items()):
writer.writerow([ds_name, fewshots, prompt_name, metric, value])
if metric == "accuracy":
acc_scores.append(value)
# LM Eval Harness Generation
elif "rouge2_fmeasure" in res:
writer.writerow([ds_name, fewshots, prompt_name, "rouge2_fmeasure", res["rouge2_fmeasure"]])
rouge_scores.append(res["rouge2_fmeasure"])
# LM Eval Harness Accuracy
elif "acc" in res:
writer.writerow([ds_name, fewshots, prompt_name, "acc", res["acc"]])
acc_scores.append(res["acc"])
#elif "bleu" in res:
# # Make sure BLEU is 0-1 not 0-100
# writer.writerow([ds_name, prompt_name, "bleu", res["bleu"] / 100])
# bleu_scores.append(res["bleu"] / 100)
if acc_scores:
median = statistics.median(acc_scores)
medians.append(median)
writer.writerow([ds_name, fewshots, "median", "accuracy", median])
elif bleu_scores:
median = statistics.median(bleu_scores)
medians.append(median)
writer.writerow([ds_name, fewshots, "median", "bleu", median])
elif rouge_scores:
median = statistics.median(rouge_scores)
medians.append(median)
writer.writerow([ds_name, fewshots, "median", "rouge2_fmeasure", median])
if medians:
writer.writerow([ds_name, fewshots, "average", "multiple", statistics.mean(medians)])
| datablations-main | utils/csv_generative.py |
def full_flops(dataset_size, hidden_size, num_heads, num_layers, seq_len=2048, vocab_size=32000, ffw_size=None):
if ffw_size is None:
ffw_size = 4 * hidden_size
embeddings_flops = 2 * seq_len * vocab_size * hidden_size
attention_kqv_proj = 2 * 3 * seq_len * hidden_size * hidden_size
attention_kq_logits = 2 * seq_len * seq_len * hidden_size
attention_softmax = 3* num_heads* seq_len * seq_len
attention_softmax_q_red = 2 * seq_len * seq_len * hidden_size
attention_final_layer = 2 * seq_len * hidden_size * hidden_size
dense_flops = 2 * seq_len * (hidden_size * ffw_size + ffw_size * hidden_size)
final_logits = 2 * seq_len * hidden_size * vocab_size
total_flops = embeddings_flops + num_layers*(attention_kqv_proj + attention_kq_logits +\
attention_softmax + attention_softmax_q_red + attention_final_layer + \
dense_flops) + final_logits
return total_flops*3 * dataset_size/seq_len
def params(hidden_size, num_heads, num_layers, seq_len=2048, vocab_size=32000, ffw_size=None, relative_attention=False):
if ffw_size is None:
ffw_size = 4 * hidden_size
per_layer = 4*hidden_size*hidden_size # attention
per_layer += 4*hidden_size # attention bias
per_layer += 2 * ffw_size * hidden_size # dense
per_layer += ffw_size + hidden_size # dense bias
per_layer += 2 * hidden_size # layer norm
if relative_attention:
per_layer += hidden_size*hidden_size # relative position embeddings according to Dai et al.
embeddings = 1 * hidden_size*vocab_size + vocab_size
if not relative_attention:
embeddings += seq_len*hidden_size
N = num_layers * (per_layer) + embeddings
return N
def simple_flops(dataset_size, hidden_size, num_heads, num_layers, seq_len=2048, vocab_size=32000, ffw_size=None, relative_attention=False):
if ffw_size is None:
ffw_size = 4 * hidden_size
return 6 * params(hidden_size=hidden_size, num_heads=num_heads, num_layers=num_layers, seq_len=seq_len, vocab_size=vocab_size, ffw_size=ffw_size, relative_attention=relative_attention) * dataset_size
def get_dataset_size(flops, hidden_size, num_heads, num_layers, seq_len=2048, vocab_size=32000, ffw_size=None, relative_attention=True):
return flops / (6 * params(hidden_size=hidden_size, num_heads=num_heads, num_layers=num_layers, seq_len=seq_len, vocab_size=vocab_size, ffw_size=ffw_size, relative_attention=relative_attention))
| datablations-main | utils/flops-params_py.py |
"""
Saves a merged.json file in the provided directory
python merge_all_json.py DIRECTORY
"""
import json
import os
from pathlib import Path
import sys
from typing import Dict
def find_all_json(root_dir: Path):
if root_dir.is_file():
if root_dir.name.endswith(".json"):
return [root_dir]
else:
return []
all_jsons = []
for path in root_dir.iterdir():
all_jsons += find_all_json(path)
return all_jsons
def sort_dict(dictionary: Dict) -> Dict:
results = {}
for key, value in sorted(dictionary.items(), key=lambda item: item[0]):
new_value = value
if isinstance(value, dict):
new_value = sort_dict(new_value)
elif isinstance(value, list):
new_value = sorted(value)
results[key] = new_value
return results
def main():
# find all json file in directory
root_dir = Path(sys.argv[1])
out_path = os.path.join(root_dir, "merged.json")
if os.path.exists(out_path):
os.remove(out_path)
all_jsons = find_all_json(root_dir)
# merge
results = {}
for json_file in all_jsons:
with open(json_file, "r") as fi:
data = json.load(fi)
if str(json_file.name).startswith("slim"):
print(f"Parsing {json_file} as bigscience/lm-eval-harness file.")
fewshots = data["config"]["num_fewshot"]
for dic in data["results"]:
key = dic["task_name"]
# Same dataset but not really comparable
if "en-fr" in dic["prompt_name"]:
key += "_en-fr"
elif "fr-en" in dic["prompt_name"]:
key += "_fr-en"
elif "hi-en" in dic["prompt_name"]:
key += "_hi-en"
elif "en-hi" in dic["prompt_name"]:
key += "_en-hi"
sub_key = dic["prompt_name"]
results.setdefault(key, {})
results[key].setdefault(fewshots, {})
results[key][fewshots].setdefault(sub_key, {})
results[key][fewshots][sub_key] = {
**results[key][fewshots][sub_key],
**{subk: subv for subk, subv in dic.items() if type(subv) in [int, float]}
}
elif str(json_file.name).startswith("agg"):
print(f"Skipping {json_file} from bigscience/lm-eval-harness.")
continue
else:
print(f"Parsing {json_file} as bigscience/t-zero file.")
key = f"{data['dataset_name']}_{data['dataset_config_name']}"
fewshots = 0
if key in results:
assert data["template_name"] not in results
results.setdefault(key, {})
results[key].setdefault(fewshots, {})
results[key][fewshots][data["template_name"]] = data
else:
results[key] = {
fewshots: {
data["template_name"]: data
}
}
# sort
sorted_results = sort_dict(results)
# write
with open(out_path, "w") as fo:
json.dump(sorted_results, fo)
if __name__ == "__main__":
main()
| datablations-main | utils/merge_generative.py |
#!/usr/bin/env python
# creates a local auth token file which can then be safely used by other programs without leaking
# the password in public git
import getpass
import json
from pathlib import Path
from huggingface_hub import HfApi
HUB_DATA_PATH_SHARED = "/pfs/lustrep4/scratch/project_462000119/muennighoff/nov-2022-bettercom/.hub_info.json"
#HUB_DATA_PATH = Path(__file__).resolve().parent / ".hub_info.json"
username = input("Hub username: ")
password = getpass.getpass("Hub password: ")
email = input("Hub email: ")
auth_token = HfApi().login(username=username, password=password)
data = dict(username=username, email=email, auth_token=auth_token)
#print(data)
with open(HUB_DATA_PATH_SHARED, 'w') as f:
json.dump(data, f) | datablations-main | utils/hub_auth.py |
"""
Script for searching through logs to look for failing nodes.
"""
import sys
import re
NODE_RANK_RE = re.compile(r'Launching on (\S+) \((\d+)/(\d+)\)')
ERROR_STRINGS = [
'Segmentation fault',
'Failed to initialize RSMI device mutex',
'ERROR:torch.distributed.elastic.multiprocessing.api:failed',
]
if len(sys.argv) != 3:
print(f'usage: {sys.argv[0]} STDERR-LOG STDOUT-LOG')
sys.exit(1)
rank_to_node, node_count = {}, None
with open(sys.argv[2]) as f:
for line in f:
m = NODE_RANK_RE.search(line)
if not m:
continue
node, rank, count = m.groups()
rank, count = int(rank), int(count)
if node_count is None:
node_count = count
else:
assert node_count == count
assert rank not in rank_to_node
rank_to_node[rank] = node
with open(sys.argv[1]) as f:
for line in f:
if any(e in line for e in ERROR_STRINGS):
line = line.rstrip('\n')
try:
rank = int(line.split(':')[0])
except:
print(f'failed to parse rank: {line}', file=sys.stderr)
continue
print(f'{rank_to_node[rank]}\t{line}')
| datablations-main | utils/errornodes.py |
#!/usr/bin/env python
#
# This tool automatically pushes newly added and modified files into the hub repo, if they match the
# provided one or more patterns.
#
# If the program fails to run the first time make sure to run `hub-auth.py` to authenticate and save
# the token, and user name/email locally which will then be used by this program to alter the config
# of the target repo to automatically commit as the user you authenticated with. This is needed when
# pushing as someone else, which is the case here, as we want the software to always work and not
# depend on the developer's git setup.
#
# Example:
#
# hub-sync.py --repo-path /hf/Megatron-DeepSpeed-master/output_dir/tensorboard/ --patterns '*tfevents*'
#
# multiple patterns can be passed
import argparse
import io
import json
import os
import re
import subprocess
import sys
from collections import defaultdict
from fnmatch import fnmatch
from huggingface_hub import HfApi, HfFolder, Repository
from pathlib import Path
from typing import List, Optional, Union
# normally using a globally shared hub data, but can override it with the local token if need be
HUB_DATA_PATH_SHARED = "/gpfsdswork/projects/rech/six/commun/auth/.hub_info.json"
# for now disabling local, since it leads to outdated auth tokens
HUB_DATA_PATH_LOCAL = Path(__file__).resolve().parent / ".hub_info.json"
HUB_AUTH_TOKEN_PATH = "/gpfsdswork/projects/rech/six/commun/auth/.hub_auth"
# map https://git-scm.com/docs/git-status#_short_format
#
# ' ' = unmodified
# M = modified
# A = added
# D = deleted
# R = renamed
# C = copied
# U = updated but unmerged
# X Y Meaning
# -------------------------------------------------
# [AMD] not updated
# M [ MD] updated in index
# A [ MD] added to index
# D deleted from index
# R [ MD] renamed in index
# C [ MD] copied in index
# [MARC] index and work tree matches
# [ MARC] M work tree changed since index
# [ MARC] D deleted in work tree
# [ D] R renamed in work tree
# [ D] C copied in work tree
# -------------------------------------------------
# D D unmerged, both deleted
# A U unmerged, added by us
# U D unmerged, deleted by them
# U A unmerged, added by them
# D U unmerged, deleted by us
# A A unmerged, both added
# U U unmerged, both modified
# -------------------------------------------------
# ? ? untracked
# ! ! ignored
git_status_lookup = {
"?": "untracked",
"M": "modified",
"A": "added",
"D": "deleted",
"R": "renamed",
"C": "copied",
"U": "updated_unmerged",
}
def get_git_files_by_status(local_dir):
try:
git_status = subprocess.run(
["git", "status", "-s"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=local_dir,
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
if len(git_status) == 0:
return {}
file_statuses = [status.strip() for status in git_status.split("\n")]
# create a dict of lists for each long key in git_status_lookup
files = defaultdict(list)
for l in file_statuses:
k, v = l.split(' ', 1)
k = k.strip()[0] # get first column
# remap to sensible name
k = git_status_lookup.get(k, "unknown")
files[k].append(v)
#print(files)
return files
# XXX: this should be PR'ed into https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/repository.py
# after adjusting the API self, self.local_dir
def get_untracked_files(local_dir) -> List[str]:
"""
Returns a list of untracked files in the working directory
"""
key = "untracked"
files_by_status = get_git_files_by_status(local_dir)
return files_by_status[key] if key in files_by_status else []
def get_modified_files(local_dir) -> List[str]:
"""
Returns a list of modified files in the working directory
"""
key = "modified"
files_by_status = get_git_files_by_status(local_dir)
return files_by_status[key] if key in files_by_status else []
def get_new_and_modified_files(local_dir) -> List[str]:
"""
Returns a list of untracked and modified files in the working directory recursively.
It will include relative path for files under sub-dirs that are untracked.
"""
try:
cmd = "git ls-files --modified --others --exclude-standard".split()
output = subprocess.run(
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=local_dir,
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
if len(output) == 0:
return []
return [f.strip() for f in output.split("\n")]
def run_cmd(cmd, local_dir):
try:
git_status = subprocess.run(
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=local_dir,
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return git_status
def hub_config_repo(hub_data, local_dir):
# if we have the bot user email set, that means we have done this process already
# but some users don't have any `user.email` set, so recover gracefully if that's the case
try:
cmd = f"git config user.email"
email = run_cmd(cmd.split(), local_dir)
if len(email) > 0 and email == hub_data['email']:
return
except:
pass
print(f"* Detected a new clone. Setting it up for {hub_data['username']}")
# to work as another user we need
# 1. their user.email ( but also user.name is required but can be anything)
cmd = f"git config user.email {hub_data['email']}"
run_cmd(cmd.split(), local_dir)
cmd = f"git config user.name {hub_data['username']}"
run_cmd(cmd.split(), local_dir)
# 2. pre-auth the repo
# a. get url
cmd = "git remote get-url origin"
url = run_cmd(cmd.split(), local_dir)
# b. extract just the huggingface.co/app-test-user/test-tensorboard part
repo_part_url = re.sub(r'https.*(?=huggingface)', '', url, 0, re.M)
cmd = f"git remote set-url origin --push https://{hub_data['username']}:{hub_data['auth_token']}@{repo_part_url}"
run_cmd(cmd.split(), local_dir)
def get_hub_data():
"""
To simplify the setup of different projects we use a common hug info data file at HUB_DATA_PATH_SHARED.
But if desired it can be overridden with a local data file at HUB_DATA_PATH_LOCAL
"""
# if os.path.isfile(HUB_DATA_PATH_LOCAL):
# hub_data_path = HUB_DATA_PATH_LOCAL
if os.path.isfile(HUB_DATA_PATH_SHARED):
hub_data_path = HUB_DATA_PATH_SHARED
else:
raise FileNotFoundError(f"Couldn't locate {HUB_DATA_PATH_SHARED}. "
"Please run hub-auth.py first")
with io.open(hub_data_path, 'r', encoding='utf-8') as f:
return json.load(f)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--patterns", nargs='+', default=None, required=True, type=str, help="one or more patterns of files to match to add to the hub - make sure to quote those!")
parser.add_argument("--repo-path", type=str, required=True, help="path to the already cloned repo")
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
return parser.parse_args()
def main():
args = get_args()
if not (os.path.isdir(args.repo_path) and os.path.isdir(f"{args.repo_path}/.git")):
raise FileNotFoundError(f"Directory '{args.repo_path}' either doesn't exist or it's not a git clone directory. "
"Clone the desired repo first to '{args.repo_path}'.")
if len(args.patterns) == 0:
raise ValueError("At least one --pattern is required.")
print(f"* Processing {args.repo_path}")
if args.debug:
print(f"Tracking {len(args.patterns)} patterns:")
print(''.join(f"- {x}\n" for x in args.patterns))
hub_data = get_hub_data()
repo = Repository(args.repo_path)
hub_config_repo(hub_data, local_dir=args.repo_path)
files_dict = get_git_files_by_status(args.repo_path)
# we want untracked and modified files
uncommitted_files = get_new_and_modified_files(args.repo_path)
total_to_commit = 0
if len(uncommitted_files) > 0:
print(f"* Found {len(uncommitted_files)} uncommitted files:")
if args.debug:
print(''.join(f"- {f}\n" for f in uncommitted_files))
for pattern in args.patterns:
# *** new and modified files ***
# check that these are the files that match the pattern passed to git_add
uncommitted_files_matched = [f for f in uncommitted_files if fnmatch(f, pattern)]
print(f"* Found {len(uncommitted_files_matched)} uncommitted files matching pattern: {pattern}:")
if args.debug:
print(''.join(f"- {f}\n" for f in uncommitted_files_matched))
if len(uncommitted_files_matched) > 0:
total_to_commit += len(uncommitted_files_matched)
# # auto_lfs_track requires huggingface-hub-0.0.15, but transformers forces 0.0.12
repo.git_add(pattern=pattern) # , auto_lfs_track=True)
repo.git_commit(commit_message="new data")
if total_to_commit:
print(f"* Pushing {total_to_commit} files")
repo.git_push()
print("* Pushed")
else:
print("* Detected no new or modified files. Nothing to push.")
if __name__ == "__main__":
main()
| datablations-main | utils/hub_sync.py |
import argparse
import os
from typing import List, Dict
import subprocess
import shlex
import numpy as np
import pyarrow as pa
from datasets import load_dataset, Dataset, concatenate_datasets
from tqdm import tqdm
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, required=True,
help="Path to the dataset you're using on the HF hub. Pass e.g. `csv` or `json` and `data_files=path_on_disk` to load something locally")
parser.add_argument('--subset', type=str, default=None, help="Subset of the dataset you're using, if needed")
parser.add_argument('--data_files', type=str, default=None, help="Path to the dataset on disk if using local files")
parser.add_argument('--ratios', nargs='+', type=float, help="Subsampling ratios", required=True)
parser.add_argument('--names', nargs='+', type=str, help="Names for the produced subsets", required=False)
parser.add_argument('--pre_shuffle', action="store_true", help="Whether to shuffle the dataset in advance")
parser.add_argument('--shuffle_seed', type=int, default=0, help="Shuffling seed")
return parser.parse_args()
def get_size_per_example(texts: List[str]) -> Dict:
size_values = [len(text.encode()) for text in texts]
examples = {"bytes_len": size_values}
return examples
def get_total_byte_size(dataset):
return pa.compute.sum(dataset.data["bytes_len"]).as_py()
def output_path(args, ratio, name):
if name is None:
name = f"{ratio}_subsample"
if args.data_files is not None:
# assumes there's an extension
path = args.data_files.split(".")[:-1]
path += f"_{name}"
path += ".jsonl"
else:
path = f"{args.name}_{args.subset}_{name}.jsonl"
return os.path.abspath(path)
if __name__ == "__main__":
args = get_args()
if args.names is None:
args.names = [None] * len(args.ratios)
else:
assert len(args.names) == len(args.ratios)
dataset = load_dataset(args.name, args.subset, data_files=args.data_files, num_proc=os.cpu_count(), split="train")
dataset = dataset.map(
get_size_per_example,
batched=True,
num_proc=os.cpu_count(),
batch_size=1024,
input_columns=["text"],
)
if args.pre_shuffle:
# this is going to be incredibly slow on large datasets
dataset = dataset.shuffle(args.shuffle_seed)
dataset = dataset.flatten_indices(num_proc=os.cpu_count())
cumsum_sizes = pa.compute.cumulative_sum(dataset.data["bytes_len"])
cumsum_ds = Dataset(pa.Table.from_arrays([cumsum_sizes], names=["cumsum_sizes"]))
dataset = concatenate_datasets([dataset, cumsum_ds], axis=1)
total_size = dataset[-1]["cumsum_sizes"]
dataset = dataset.with_format("numpy")
ratios_and_names = sorted(list(zip(args.ratios, args.names)), key=lambda x: x[0], reverse=True)
base_file = args.data_files
assert dataset._indices is None
for ratio, name in tqdm(ratios_and_names):
cutoff_point = np.searchsorted(dataset["cumsum_sizes"], total_size * ratio)
if base_file is None:
subset = dataset.select(range(cutoff_point)).remove_columns(["bytes_len", "cumsum_sizes"])
assert subset._indices is None
subset.to_json(output_path(args, ratio, name), num_proc=64, batch_size=100_000)
base_file = output_path(args, ratio, name)
else:
subprocess.run(shlex.split(f"head -{cutoff_point} {base_file}"),
stdout=open(output_path(args, ratio, name), "w"), check=True)
| datablations-main | utils/hf_dataset_subsampling.py |
###############################################################################
# Language Modeling on Penn Tree Bank
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import torch
from torch.autograd import Variable
import data
parser = argparse.ArgumentParser(description='PyTorch PTB Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./data/penn',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (LSTM, QRNN)')
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, 'rb') as f:
model = torch.load(f)
model.eval()
if args.model == 'QRNN':
model.reset()
if args.cuda:
model.cuda()
else:
model.cpu()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(1)
input = Variable(torch.rand(1, 1).mul(ntokens).long(), volatile=True)
if args.cuda:
input.data = input.data.cuda()
with open(args.outf, 'w') as outf:
for i in range(args.words):
output, hidden = model(input, hidden)
word_weights = output.squeeze().data.div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.data.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ('\n' if i % 20 == 19 else ' '))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
| awd-lstm-lm-master | generate.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
from embed_regularize import embedded_dropout
from locked_dropout import LockedDropout
from weight_drop import WeightDrop
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0, tie_weights=False):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropout()
self.idrop = nn.Dropout(dropouti)
self.hdrop = nn.Dropout(dropouth)
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
assert rnn_type in ['LSTM', 'QRNN', 'GRU'], 'RNN type is not supported'
if rnn_type == 'LSTM':
self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else (ninp if tie_weights else nhid), 1, dropout=0) for l in range(nlayers)]
if wdrop:
self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop) for rnn in self.rnns]
if rnn_type == 'GRU':
self.rnns = [torch.nn.GRU(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else ninp, 1, dropout=0) for l in range(nlayers)]
if wdrop:
self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop) for rnn in self.rnns]
elif rnn_type == 'QRNN':
from torchqrnn import QRNNLayer
self.rnns = [QRNNLayer(input_size=ninp if l == 0 else nhid, hidden_size=nhid if l != nlayers - 1 else (ninp if tie_weights else nhid), save_prev_x=True, zoneout=0, window=2 if l == 0 else 1, output_gate=True) for l in range(nlayers)]
for rnn in self.rnns:
rnn.linear = WeightDrop(rnn.linear, ['weight'], dropout=wdrop)
print(self.rnns)
self.rnns = torch.nn.ModuleList(self.rnns)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
#if nhid != ninp:
# raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.ninp = ninp
self.nhid = nhid
self.nlayers = nlayers
self.dropout = dropout
self.dropouti = dropouti
self.dropouth = dropouth
self.dropoute = dropoute
self.tie_weights = tie_weights
def reset(self):
if self.rnn_type == 'QRNN': [r.reset() for r in self.rnns]
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden, return_h=False):
emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if self.training else 0)
#emb = self.idrop(emb)
emb = self.lockdrop(emb, self.dropouti)
raw_output = emb
new_hidden = []
#raw_output, hidden = self.rnn(emb, hidden)
raw_outputs = []
outputs = []
for l, rnn in enumerate(self.rnns):
current_input = raw_output
raw_output, new_h = rnn(raw_output, hidden[l])
new_hidden.append(new_h)
raw_outputs.append(raw_output)
if l != self.nlayers - 1:
#self.hdrop(raw_output)
raw_output = self.lockdrop(raw_output, self.dropouth)
outputs.append(raw_output)
hidden = new_hidden
output = self.lockdrop(raw_output, self.dropout)
outputs.append(output)
result = output.view(output.size(0)*output.size(1), output.size(2))
if return_h:
return result, hidden, raw_outputs, outputs
return result, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return [(Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else (self.ninp if self.tie_weights else self.nhid)).zero_()),
Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else (self.ninp if self.tie_weights else self.nhid)).zero_()))
for l in range(self.nlayers)]
elif self.rnn_type == 'QRNN' or self.rnn_type == 'GRU':
return [Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else (self.ninp if self.tie_weights else self.nhid)).zero_())
for l in range(self.nlayers)]
| awd-lstm-lm-master | model.py |
from torch.autograd import Variable
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz, args):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
def get_batch(source, i, args, seq_len=None, evaluation=False):
seq_len = min(seq_len if seq_len else args.bptt, len(source) - 1 - i)
data = Variable(source[i:i+seq_len], volatile=evaluation)
target = Variable(source[i+1:i+1+seq_len].view(-1))
return data, target
| awd-lstm-lm-master | utils.py |
import argparse
import time
import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import data
import model
from utils import batchify, get_batch, repackage_hidden
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='data/penn',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (LSTM, QRNN)')
parser.add_argument('--save', type=str,default='best.pt',
help='model to use the pointer over')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--bptt', type=int, default=5000,
help='sequence length')
parser.add_argument('--window', type=int, default=3785,
help='pointer window length')
parser.add_argument('--theta', type=float, default=0.6625523432485668,
help='mix between uniform distribution and pointer softmax distribution over previous words')
parser.add_argument('--lambdasm', type=float, default=0.12785920428335693,
help='linear mix between only pointer (1) and only vocab (0) distribution')
args = parser.parse_args()
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
eval_batch_size = 1
test_batch_size = 1
#train_data = batchify(corpus.train, args.batch_size)
val_data = batchify(corpus.valid, test_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
criterion = nn.CrossEntropyLoss()
def one_hot(idx, size, cuda=True):
a = np.zeros((1, size), np.float32)
a[0][idx] = 1
v = Variable(torch.from_numpy(a))
if cuda: v = v.cuda()
return v
def evaluate(data_source, batch_size=10, window=args.window):
# Turn on evaluation mode which disables dropout.
if args.model == 'QRNN': model.reset()
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
next_word_history = None
pointer_history = None
for i in range(0, data_source.size(0) - 1, args.bptt):
if i > 0: print(i, len(data_source), math.exp(total_loss / i))
data, targets = get_batch(data_source, i, evaluation=True, args=args)
output, hidden, rnn_outs, _ = model(data, hidden, return_h=True)
rnn_out = rnn_outs[-1].squeeze()
output_flat = output.view(-1, ntokens)
###
# Fill pointer history
start_idx = len(next_word_history) if next_word_history is not None else 0
next_word_history = torch.cat([one_hot(t.data[0], ntokens) for t in targets]) if next_word_history is None else torch.cat([next_word_history, torch.cat([one_hot(t.data[0], ntokens) for t in targets])])
#print(next_word_history)
pointer_history = Variable(rnn_out.data) if pointer_history is None else torch.cat([pointer_history, Variable(rnn_out.data)], dim=0)
#print(pointer_history)
###
# Built-in cross entropy
# total_loss += len(data) * criterion(output_flat, targets).data[0]
###
# Manual cross entropy
# softmax_output_flat = torch.nn.functional.softmax(output_flat)
# soft = torch.gather(softmax_output_flat, dim=1, index=targets.view(-1, 1))
# entropy = -torch.log(soft)
# total_loss += len(data) * entropy.mean().data[0]
###
# Pointer manual cross entropy
loss = 0
softmax_output_flat = torch.nn.functional.softmax(output_flat)
for idx, vocab_loss in enumerate(softmax_output_flat):
p = vocab_loss
if start_idx + idx > window:
valid_next_word = next_word_history[start_idx + idx - window:start_idx + idx]
valid_pointer_history = pointer_history[start_idx + idx - window:start_idx + idx]
logits = torch.mv(valid_pointer_history, rnn_out[idx])
theta = args.theta
ptr_attn = torch.nn.functional.softmax(theta * logits).view(-1, 1)
ptr_dist = (ptr_attn.expand_as(valid_next_word) * valid_next_word).sum(0).squeeze()
lambdah = args.lambdasm
p = lambdah * ptr_dist + (1 - lambdah) * vocab_loss
###
target_loss = p[targets[idx].data]
loss += (-torch.log(target_loss)).data[0]
total_loss += loss / batch_size
###
hidden = repackage_hidden(hidden)
next_word_history = next_word_history[-window:]
pointer_history = pointer_history[-window:]
return total_loss / len(data_source)
# Load the best saved model.
with open(args.save, 'rb') as f:
if not args.cuda:
model = torch.load(f, map_location=lambda storage, loc: storage)
else:
model = torch.load(f)
print(model)
# Run on val data.
val_loss = evaluate(val_data, test_batch_size)
print('=' * 89)
print('| End of pointer | val loss {:5.2f} | val ppl {:8.2f}'.format(
val_loss, math.exp(val_loss)))
print('=' * 89)
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
print('=' * 89)
print('| End of pointer | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
| awd-lstm-lm-master | pointer.py |
import numpy as np
import torch
from torch.autograd import Variable
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout)
mask = Variable(mask)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
X = embed._backend.Embedding.apply(words, masked_embed_weight,
padding_idx, embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse
)
return X
if __name__ == '__main__':
V = 50
h = 4
bptt = 10
batch_size = 2
embed = torch.nn.Embedding(V, h)
words = np.random.random_integers(low=0, high=V-1, size=(batch_size, bptt))
words = torch.LongTensor(words)
words = Variable(words)
origX = embed(words)
X = embedded_dropout(embed, words)
print(origX)
print(X)
| awd-lstm-lm-master | embed_regularize.py |
import torch
from torch.nn import Parameter
from functools import wraps
class WeightDrop(torch.nn.Module):
def __init__(self, module, weights, dropout=0, variational=False):
super(WeightDrop, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self.variational = variational
self._setup()
def widget_demagnetizer_y2k_edition(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function
# It must be a function rather than a lambda as otherwise pickling explodes
# We can't write boring code though, so ... WIDGET DEMAGNETIZER Y2K EDITION!
# (╯°□°)╯︵ ┻━┻
return
def _setup(self):
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN
if issubclass(type(self.module), torch.nn.RNNBase):
self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition
for name_w in self.weights:
print('Applying weight drop of {} to {}'.format(self.dropout, name_w))
w = getattr(self.module, name_w)
del self.module._parameters[name_w]
self.module.register_parameter(name_w + '_raw', Parameter(w.data))
def _setweights(self):
for name_w in self.weights:
raw_w = getattr(self.module, name_w + '_raw')
w = None
if self.variational:
mask = torch.autograd.Variable(torch.ones(raw_w.size(0), 1))
if raw_w.is_cuda: mask = mask.cuda()
mask = torch.nn.functional.dropout(mask, p=self.dropout, training=True)
w = mask.expand_as(raw_w) * raw_w
else:
w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training)
setattr(self.module, name_w, w)
def forward(self, *args):
self._setweights()
return self.module.forward(*args)
if __name__ == '__main__':
import torch
from weight_drop import WeightDrop
# Input is (seq, batch, input)
x = torch.autograd.Variable(torch.randn(2, 1, 10)).cuda()
h0 = None
###
print('Testing WeightDrop')
print('=-=-=-=-=-=-=-=-=-=')
###
print('Testing WeightDrop with Linear')
lin = WeightDrop(torch.nn.Linear(10, 10), ['weight'], dropout=0.9)
lin.cuda()
run1 = [x.sum() for x in lin(x).data]
run2 = [x.sum() for x in lin(x).data]
print('All items should be different')
print('Run 1:', run1)
print('Run 2:', run2)
assert run1[0] != run2[0]
assert run1[1] != run2[1]
print('---')
###
print('Testing WeightDrop with LSTM')
wdrnn = WeightDrop(torch.nn.LSTM(10, 10), ['weight_hh_l0'], dropout=0.9)
wdrnn.cuda()
run1 = [x.sum() for x in wdrnn(x, h0)[0].data]
run2 = [x.sum() for x in wdrnn(x, h0)[0].data]
print('First timesteps should be equal, all others should differ')
print('Run 1:', run1)
print('Run 2:', run2)
# First time step, not influenced by hidden to hidden weights, should be equal
assert run1[0] == run2[0]
# Second step should not
assert run1[1] != run2[1]
print('---')
| awd-lstm-lm-master | weight_drop.py |
import argparse
import time
import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import data
import model
from utils import batchify, get_batch, repackage_hidden
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='data/penn/',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (LSTM, QRNN, GRU)')
parser.add_argument('--emsize', type=int, default=400,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=1150,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=3,
help='number of layers')
parser.add_argument('--lr', type=float, default=30,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=80, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=70,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.4,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.3,
help='dropout for rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.65,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.5,
help='amount of weight dropout to apply to the RNN hidden to hidden matrix')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
randomhash = ''.join(str(time.time()).split('.'))
parser.add_argument('--save', type=str, default=randomhash+'.pt',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=2,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=1.2e-6,
help='weight decay applied to all weights')
parser.add_argument('--resume', type=str, default='',
help='path of model to resume')
parser.add_argument('--optimizer', type=str, default='sgd',
help='optimizer to use (sgd, adam)')
parser.add_argument('--when', nargs="+", type=int, default=[-1],
help='When (which epochs) to divide the learning rate by 10 - accepts multiple')
args = parser.parse_args()
args.tied = True
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
def model_save(fn):
with open(fn, 'wb') as f:
torch.save([model, criterion, optimizer], f)
def model_load(fn):
global model, criterion, optimizer
with open(fn, 'rb') as f:
model, criterion, optimizer = torch.load(f)
import os
import hashlib
fn = 'corpus.{}.data'.format(hashlib.md5(args.data.encode()).hexdigest())
if os.path.exists(fn):
print('Loading cached dataset...')
corpus = torch.load(fn)
else:
print('Producing dataset...')
corpus = data.Corpus(args.data)
torch.save(corpus, fn)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
###############################################################################
# Build the model
###############################################################################
from splitcross import SplitCrossEntropyLoss
criterion = None
ntokens = len(corpus.dictionary)
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.dropouth, args.dropouti, args.dropoute, args.wdrop, args.tied)
###
if args.resume:
print('Resuming model ...')
model_load(args.resume)
optimizer.param_groups[0]['lr'] = args.lr
model.dropouti, model.dropouth, model.dropout, args.dropoute = args.dropouti, args.dropouth, args.dropout, args.dropoute
if args.wdrop:
from weight_drop import WeightDrop
for rnn in model.rnns:
if type(rnn) == WeightDrop: rnn.dropout = args.wdrop
elif rnn.zoneout > 0: rnn.zoneout = args.wdrop
###
if not criterion:
splits = []
if ntokens > 500000:
# One Billion
# This produces fairly even matrix mults for the buckets:
# 0: 11723136, 1: 10854630, 2: 11270961, 3: 11219422
splits = [4200, 35000, 180000]
elif ntokens > 75000:
# WikiText-103
splits = [2800, 20000, 76000]
print('Using', splits)
criterion = SplitCrossEntropyLoss(args.emsize, splits=splits, verbose=False)
###
params = list(model.parameters()) + list(criterion.parameters())
if args.cuda:
model = model.cuda()
criterion = criterion.cuda()
params = list(model.parameters()) + list(criterion.parameters())
###
total_params = sum(x.size()[0] * x.size()[1] if len(x.size()) > 1 else x.size()[0] for x in params if x.size())
print('Args:', args)
print('Model total parameters:', total_params)
###############################################################################
# Training code
###############################################################################
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
if args.model == 'QRNN': model.reset()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
output, hidden = model(data, hidden)
total_loss += len(data) * criterion(model.decoder.weight, model.decoder.bias, output, targets).data
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
# Turn on training mode which enables dropout.
if args.model == 'QRNN': model.reset()
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
seq_len = max(5, int(np.random.normal(bptt, 5)))
# There's a very small chance that it could select a very long sequence length resulting in OOM
# seq_len = min(seq_len, args.bptt + 10)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = repackage_hidden(hidden)
optimizer.zero_grad()
output, hidden, rnn_hs, dropped_rnn_hs = model(data, hidden, return_h=True)
raw_loss = criterion(model.decoder.weight, model.decoder.bias, output, targets)
loss = raw_loss
# Activiation Regularization
if args.alpha: loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
if args.beta: loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
if args.clip: torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:05.5f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f} | bpc {:8.3f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss), cur_loss / math.log(2)))
total_loss = 0
start_time = time.time()
###
batch += 1
i += seq_len
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
# At any point you can hit Ctrl + C to break out of training early.
try:
optimizer = None
if args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
if 't0' in optimizer.param_groups[0]:
tmp = {}
for prm in model.parameters():
tmp[prm] = prm.data.clone()
prm.data = optimizer.state[prm]['ax'].clone()
val_loss2 = evaluate(val_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f} | valid bpc {:8.3f}'.format(
epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss), val_loss / math.log(2)))
print('-' * 89)
if val_loss2 < stored_loss:
model_save(args.save)
print('Saving Averaged!')
stored_loss = val_loss2
for prm in model.parameters():
prm.data = tmp[prm].clone()
else:
val_loss = evaluate(val_data, eval_batch_size)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f} | valid bpc {:8.3f}'.format(
epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss), val_loss / math.log(2)))
print('-' * 89)
if val_loss < stored_loss:
model_save(args.save)
print('Saving model (new best validation)')
stored_loss = val_loss
if args.optimizer == 'sgd' and 't0' not in optimizer.param_groups[0] and (len(best_val_loss)>args.nonmono and val_loss > min(best_val_loss[:-args.nonmono])):
print('Switching to ASGD')
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
if epoch in args.when:
print('Saving model before learning rate decreased')
model_save('{}.e{}'.format(args.save, epoch))
print('Dividing learning rate by 10')
optimizer.param_groups[0]['lr'] /= 10.
best_val_loss.append(val_loss)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
model_load(args.save)
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
test_loss, math.exp(test_loss), test_loss / math.log(2)))
print('=' * 89)
| awd-lstm-lm-master | main.py |
from collections import defaultdict
import torch
import torch.nn as nn
import numpy as np
class SplitCrossEntropyLoss(nn.Module):
r'''SplitCrossEntropyLoss calculates an approximate softmax'''
def __init__(self, hidden_size, splits, verbose=False):
# We assume splits is [0, split1, split2, N] where N >= |V|
# For example, a vocab of 1000 words may have splits [0] + [100, 500] + [inf]
super(SplitCrossEntropyLoss, self).__init__()
self.hidden_size = hidden_size
self.splits = [0] + splits + [100 * 1000000]
self.nsplits = len(self.splits) - 1
self.stats = defaultdict(list)
self.verbose = verbose
# Each of the splits that aren't in the head require a pretend token, we'll call them tombstones
# The probability given to this tombstone is the probability of selecting an item from the represented split
if self.nsplits > 1:
self.tail_vectors = nn.Parameter(torch.zeros(self.nsplits - 1, hidden_size))
self.tail_bias = nn.Parameter(torch.zeros(self.nsplits - 1))
def logprob(self, weight, bias, hiddens, splits=None, softmaxed_head_res=None, verbose=False):
# First we perform the first softmax on the head vocabulary and the tombstones
if softmaxed_head_res is None:
start, end = self.splits[0], self.splits[1]
head_weight = None if end - start == 0 else weight[start:end]
head_bias = None if end - start == 0 else bias[start:end]
# We only add the tombstones if we have more than one split
if self.nsplits > 1:
head_weight = self.tail_vectors if head_weight is None else torch.cat([head_weight, self.tail_vectors])
head_bias = self.tail_bias if head_bias is None else torch.cat([head_bias, self.tail_bias])
# Perform the softmax calculation for the word vectors in the head for all splits
# We need to guard against empty splits as torch.cat does not like random lists
head_res = torch.nn.functional.linear(hiddens, head_weight, bias=head_bias)
softmaxed_head_res = torch.nn.functional.log_softmax(head_res)
if splits is None:
splits = list(range(self.nsplits))
results = []
running_offset = 0
for idx in splits:
# For those targets in the head (idx == 0) we only need to return their loss
if idx == 0:
results.append(softmaxed_head_res[:, :-(self.nsplits - 1)])
# If the target is in one of the splits, the probability is the p(tombstone) * p(word within tombstone)
else:
start, end = self.splits[idx], self.splits[idx + 1]
tail_weight = weight[start:end]
tail_bias = bias[start:end]
# Calculate the softmax for the words in the tombstone
tail_res = torch.nn.functional.linear(hiddens, tail_weight, bias=tail_bias)
# Then we calculate p(tombstone) * p(word in tombstone)
# Adding is equivalent to multiplication in log space
head_entropy = (softmaxed_head_res[:, -idx]).contiguous()
tail_entropy = torch.nn.functional.log_softmax(tail_res)
results.append(head_entropy.view(-1, 1) + tail_entropy)
if len(results) > 1:
return torch.cat(results, dim=1)
return results[0]
def split_on_targets(self, hiddens, targets):
# Split the targets into those in the head and in the tail
split_targets = []
split_hiddens = []
# Determine to which split each element belongs (for each start split value, add 1 if equal or greater)
# This method appears slower at least for WT-103 values for approx softmax
#masks = [(targets >= self.splits[idx]).view(1, -1) for idx in range(1, self.nsplits)]
#mask = torch.sum(torch.cat(masks, dim=0), dim=0)
###
# This is equally fast for smaller splits as method below but scales linearly
mask = None
for idx in range(1, self.nsplits):
partial_mask = targets >= self.splits[idx]
mask = mask + partial_mask if mask is not None else partial_mask
###
#masks = torch.stack([targets] * (self.nsplits - 1))
#mask = torch.sum(masks >= self.split_starts, dim=0)
for idx in range(self.nsplits):
# If there are no splits, avoid costly masked select
if self.nsplits == 1:
split_targets, split_hiddens = [targets], [hiddens]
continue
# If all the words are covered by earlier targets, we have empties so later stages don't freak out
if sum(len(t) for t in split_targets) == len(targets):
split_targets.append([])
split_hiddens.append([])
continue
# Are you in our split?
tmp_mask = mask == idx
split_targets.append(torch.masked_select(targets, tmp_mask))
split_hiddens.append(hiddens.masked_select(tmp_mask.unsqueeze(1).expand_as(hiddens)).view(-1, hiddens.size(1)))
return split_targets, split_hiddens
def forward(self, weight, bias, hiddens, targets, verbose=False):
if self.verbose or verbose:
for idx in sorted(self.stats):
print('{}: {}'.format(idx, int(np.mean(self.stats[idx]))), end=', ')
print()
total_loss = None
if len(hiddens.size()) > 2: hiddens = hiddens.view(-1, hiddens.size(2))
split_targets, split_hiddens = self.split_on_targets(hiddens, targets)
# First we perform the first softmax on the head vocabulary and the tombstones
start, end = self.splits[0], self.splits[1]
head_weight = None if end - start == 0 else weight[start:end]
head_bias = None if end - start == 0 else bias[start:end]
# We only add the tombstones if we have more than one split
if self.nsplits > 1:
head_weight = self.tail_vectors if head_weight is None else torch.cat([head_weight, self.tail_vectors])
head_bias = self.tail_bias if head_bias is None else torch.cat([head_bias, self.tail_bias])
# Perform the softmax calculation for the word vectors in the head for all splits
# We need to guard against empty splits as torch.cat does not like random lists
combo = torch.cat([split_hiddens[i] for i in range(self.nsplits) if len(split_hiddens[i])])
###
all_head_res = torch.nn.functional.linear(combo, head_weight, bias=head_bias)
softmaxed_all_head_res = torch.nn.functional.log_softmax(all_head_res)
if self.verbose or verbose:
self.stats[0].append(combo.size()[0] * head_weight.size()[0])
running_offset = 0
for idx in range(self.nsplits):
# If there are no targets for this split, continue
if len(split_targets[idx]) == 0: continue
# For those targets in the head (idx == 0) we only need to return their loss
if idx == 0:
softmaxed_head_res = softmaxed_all_head_res[running_offset:running_offset + len(split_hiddens[idx])]
entropy = -torch.gather(softmaxed_head_res, dim=1, index=split_targets[idx].view(-1, 1))
# If the target is in one of the splits, the probability is the p(tombstone) * p(word within tombstone)
else:
softmaxed_head_res = softmaxed_all_head_res[running_offset:running_offset + len(split_hiddens[idx])]
if self.verbose or verbose:
start, end = self.splits[idx], self.splits[idx + 1]
tail_weight = weight[start:end]
self.stats[idx].append(split_hiddens[idx].size()[0] * tail_weight.size()[0])
# Calculate the softmax for the words in the tombstone
tail_res = self.logprob(weight, bias, split_hiddens[idx], splits=[idx], softmaxed_head_res=softmaxed_head_res)
# Then we calculate p(tombstone) * p(word in tombstone)
# Adding is equivalent to multiplication in log space
head_entropy = softmaxed_head_res[:, -idx]
# All indices are shifted - if the first split handles [0,...,499] then the 500th in the second split will be 0 indexed
indices = (split_targets[idx] - self.splits[idx]).view(-1, 1)
# Warning: if you don't squeeze, you get an N x 1 return, which acts oddly with broadcasting
tail_entropy = torch.gather(torch.nn.functional.log_softmax(tail_res), dim=1, index=indices).squeeze()
entropy = -(head_entropy + tail_entropy)
###
running_offset += len(split_hiddens[idx])
total_loss = entropy.float().sum() if total_loss is None else total_loss + entropy.float().sum()
return (total_loss / len(targets)).type_as(weight)
if __name__ == '__main__':
np.random.seed(42)
torch.manual_seed(42)
if torch.cuda.is_available():
torch.cuda.manual_seed(42)
V = 8
H = 10
N = 100
E = 10
embed = torch.nn.Embedding(V, H)
crit = SplitCrossEntropyLoss(hidden_size=H, splits=[V // 2])
bias = torch.nn.Parameter(torch.ones(V))
optimizer = torch.optim.SGD(list(embed.parameters()) + list(crit.parameters()), lr=1)
for _ in range(E):
prev = torch.autograd.Variable((torch.rand(N, 1) * 0.999 * V).int().long())
x = torch.autograd.Variable((torch.rand(N, 1) * 0.999 * V).int().long())
y = embed(prev).squeeze()
c = crit(embed.weight, bias, y, x.view(N))
print('Crit', c.exp().data[0])
logprobs = crit.logprob(embed.weight, bias, y[:2]).exp()
print(logprobs)
print(logprobs.sum(dim=1))
optimizer.zero_grad()
c.backward()
optimizer.step()
| awd-lstm-lm-master | splitcross.py |
import argparse
import time
import math
import numpy as np
np.random.seed(331)
import torch
import torch.nn as nn
from torch.autograd import Variable
import data
import model
from utils import batchify, get_batch, repackage_hidden
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='data/penn/',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=400,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=1150,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=3,
help='number of layers')
parser.add_argument('--lr', type=float, default=30,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=80, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=70,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.4,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.3,
help='dropout for rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.65,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.5,
help='amount of weight dropout to apply to the RNN hidden to hidden matrix')
parser.add_argument('--tied', action='store_false',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
randomhash = ''.join(str(time.time()).split('.'))
parser.add_argument('--save', type=str, default=randomhash+'.pt',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=2,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=1.2e-6,
help='weight decay applied to all weights')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.dropouth, args.dropouti, args.dropoute, args.wdrop, args.tied)
if args.cuda:
model.cuda()
total_params = sum(x.size()[0] * x.size()[1] if len(x.size()) > 1 else x.size()[0] for x in model.parameters())
print('Args:', args)
print('Model total parameters:', total_params)
criterion = nn.CrossEntropyLoss()
###############################################################################
# Training code
###############################################################################
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
if args.model == 'QRNN': model.reset()
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).data
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
# Turn on training mode which enables dropout.
if args.model == 'QRNN': model.reset()
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
seq_len = max(5, int(np.random.normal(bptt, 5)))
# There's a very small chance that it could select a very long sequence length resulting in OOM
seq_len = min(seq_len, args.bptt + 10)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = repackage_hidden(hidden)
optimizer.zero_grad()
output, hidden, rnn_hs, dropped_rnn_hs = model(data, hidden, return_h=True)
raw_loss = criterion(output.view(-1, ntokens), targets)
loss = raw_loss
# Activiation Regularization
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
###
batch += 1
i += seq_len
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
# Loop over epochs.
lr = args.lr
stored_loss = evaluate(val_data)
best_val_loss = []
# At any point you can hit Ctrl + C to break out of training early.
try:
#optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
if 't0' in optimizer.param_groups[0]:
tmp = {}
for prm in model.parameters():
tmp[prm] = prm.data.clone()
prm.data = optimizer.state[prm]['ax'].clone()
val_loss2 = evaluate(val_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss2, math.exp(val_loss2)))
print('-' * 89)
if val_loss2 < stored_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
print('Saving Averaged!')
stored_loss = val_loss2
for prm in model.parameters():
prm.data = tmp[prm].clone()
if (len(best_val_loss)>args.nonmono and val_loss2 > min(best_val_loss[:-args.nonmono])):
print('Done!')
import sys
sys.exit(1)
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
#optimizer.param_groups[0]['lr'] /= 2.
best_val_loss.append(val_loss2)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
| awd-lstm-lm-master | finetune.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
class LockedDropout(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, dropout=0.5):
if not self.training or not dropout:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)
mask = Variable(m, requires_grad=False) / (1 - dropout)
mask = mask.expand_as(x)
return mask * x
| awd-lstm-lm-master | locked_dropout.py |
import os
import torch
from collections import Counter
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
self.counter = Counter()
self.total = 0
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
self.counter[token_id] += 1
self.total += 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
| awd-lstm-lm-master | data.py |
#!/usr/bin/env python
# coding=utf-8
import os
import sys
import zipfile
if os.path.exists('train.txt'):
print('Tokenized enwik8 already exists - skipping processing')
sys.exit()
data = zipfile.ZipFile('enwik8.zip').read('enwik8')
print('Length of enwik8: {}'.format(len(data)))
num_test_chars = 5000000
train_data = data[: -2 * num_test_chars]
valid_data = data[-2 * num_test_chars: -num_test_chars]
test_data = data[-num_test_chars:]
for fn, part in [('train.txt', train_data), ('valid.txt', valid_data), ('test.txt', test_data)]:
print('{} will have {} bytes'.format(fn, len(part)))
print('- Tokenizing...')
part_str = ' '.join([str(c) if c != ord('\n') else '\n' for c in part])
print('- Writing...')
f = open(fn, 'w').write(part_str)
f = open(fn + '.raw', 'wb').write(part)
| awd-lstm-lm-master | data/enwik8/prep_enwik8.py |
import os
import deepspeed
import torch.distributed as dist
from distill_bloom import build_train_val_test_dataset
from distill_bloom import parse_args
args = parse_args()
local_rank = int(os.getenv("LOCAL_RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "1"))
deepspeed.init_distributed("nccl")
rank = dist.get_rank()
if rank == 0:
train_ds, val, test = build_train_val_test_dataset(args)
print(f"The total dataset includes: {len(train_ds)} subsets")
for i, train_data in enumerate(train_ds):
print(f"Train dataset: {i} has {len(train_data)} samples")
for data in train_data:
print("Text: ", data['text'])
break | distill-bloom-deepspeed-main | test_dataset.py |
# usage:
# deepspeed --num_gpus 8 teacher-inference-script.py --name bigscience/bloom
#
# to run benchmarks:
# deepspeed --num_gpus 8 teacher-inference-script.py --name bigscience/bloom --benchmark
#
# This is going to improve, but at the moment, the process is a bit cumbersome - we first use
# 1. use Deepspeed-ZeRO to instantiate the model on GPUs, w/o loading the checkpoints,
# 2. free the allocated storage
# 3. start Deepspeed-Inference and only now load the checkpoint
# 4. run generate
# Done.
#
import gc
import glob
import io
import json
import math
import os
import time
from pathlib import Path
import deepspeed
import torch
import torch.distributed as dist
from huggingface_hub import snapshot_download
from transformers import AutoConfig, AutoModelForCausalLM
from transformers.models.bloom.modeling_bloom import BloomBlock as BloomBlock
from transformers.utils import is_offline_mode
from distill_bloom import build_train_val_test_dataset, DistributedDataset, DistributedDataLoader
from distill_bloom import parse_args, DeepSpeedInitWrapper, print_rank0
# Arguments
args = parse_args()
local_rank = int(os.getenv("LOCAL_RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "1")) # World size is the number of GPUs
deepspeed.init_distributed("nccl")
rank = dist.get_rank()
## Check the args
assert (world_size % args.global_batch_size) == 0, "micro_batch_size must be divisible by num_gpus"
ds_init = DeepSpeedInitWrapper(args)
ds_init.init_deepspeed_inference()
model_name = ds_init.repo_root
# Wait that all processes have correctly initiliazed DeepSpeed
dist.barrier()
print_rank0(f"*** Loading the model {model_name}")
config = AutoConfig.from_pretrained(model_name)
# Construct model with fake meta tensors, later will be replaced during ds-inference ckpt load
with deepspeed.OnDevice(dtype=ds_init.dtype, device="meta"):
model = AutoModelForCausalLM.from_config(config, torch_dtype=torch.bfloat16)
model = model.eval()
# checkpoints_json=None
model = deepspeed.init_inference(
model,
mp_size=world_size,
base_dir=ds_init.repo_root,
dtype=getattr(torch, ds_init.infer_dtype),
checkpoint=ds_init.checkpoints_json,
**ds_init.kwargs,
)
model = model.module
# Dataset building - each rank will have a different shard of the dataset
train_ds, _, _ = build_train_val_test_dataset(args)
data_loader = DistributedDataLoader(
train_ds,
rank=rank,
world_size=world_size,
batch_size=1
)
dist.barrier()
def generate_logits(inputs):
"""returns a list of zipped inputs, outputs and number of new tokens"""
inputs = inputs.to(torch.cuda.current_device())
outputs = model(inputs).logits
return outputs
def generate_logits_batch(data_loader):
for batch in data_loader:
inputs = batch['text']
# as a sanity check, I used to check that inputs are different for each rank
inputs = inputs.to(torch.cuda.current_device())
outputs = model(inputs).logits
# Here we leave the return statement for debugging purposes
# But in practice at this point we would probably call
# dist.barrier() and send the logits together with the input
# to the student model
return outputs
# warmup is a must if measuring speed as it's when all the optimizations are performed
# e.g. on 8x80 a100 the first pass of 100 tokens takes 23sec, and the next one is 4secs
print_rank0(f"*** Running generate warmup")
# _ = generate_logits(inputs)
_ = generate_logits_batch(data_loader)
print_rank0(f"*** Running generate")
t_generate_start = time.time()
# generated = generate_logits(inputs)
generated = generate_logits_batch(data_loader)
print(rank, generated.shape)
t_generate_span = time.time() - t_generate_start | distill-bloom-deepspeed-main | teacher-inference-script.py |
# Dataset imports
from .arguments.arguments import parse_args
from .dataset.get_dataset import build_train_val_test_dataset
from .dataset.dataloader import DistributedDataset, DistributedDataLoader
# Arguments import
from .init_wrapper import DeepSpeedInitWrapper, print_rank0 | distill-bloom-deepspeed-main | distill_bloom/__init__.py |
# usage:
# deepspeed --num_gpus 8 teacher-inference-script.py --name bigscience/bloom
#
# to run benchmarks:
# deepspeed --num_gpus 8 teacher-inference-script.py --name bigscience/bloom --benchmark
#
# This is going to improve, but at the moment, the process is a bit cumbersome - we first use
# 1. use Deepspeed-ZeRO to instantiate the model on GPUs, w/o loading the checkpoints,
# 2. free the allocated storage
# 3. start Deepspeed-Inference and only now load the checkpoint
# 4. run generate
# Done.
#
import gc
import glob
import io
import json
import math
import os
import time
from argparse import ArgumentParser
from pathlib import Path
import deepspeed
import torch
import torch.distributed as dist
from huggingface_hub import snapshot_download
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
from transformers.models.bloom.modeling_bloom import BloomBlock as BloomBlock
from transformers.utils import is_offline_mode
# the Deepspeed team made these so it's super fast to load (~1 minute), rather than wait 10-20min loading time.
tp_presharded_models = [
"microsoft/bloom-deepspeed-inference-int8",
"microsoft/bloom-deepspeed-inference-fp16",
]
t_start = time.time()
num_tokens = 100
parser = ArgumentParser()
parser.add_argument("--name", required=True, type=str, help="model_name")
parser.add_argument(
"--dtype",
type=str,
help="float16 or int8",
choices=["int8", "float16"],
default="float16",
)
parser.add_argument(
"--local_rank", required=False, type=int, help="used by dist launchers"
)
parser.add_argument("--batch_size", default=1, type=int, help="batch size")
parser.add_argument(
"--benchmark", action="store_true", help="additionally run benchmark"
)
args = parser.parse_args()
local_rank = int(os.getenv("LOCAL_RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "1"))
deepspeed.init_distributed("nccl")
rank = dist.get_rank()
def print_rank0(*msg):
if rank != 0:
return
print(*msg)
### Model loading and instantiating on GPUs
def get_repo_root(model_name_or_path, revision=None):
# checks if online or not
if is_offline_mode():
print_rank0("Offline mode: forcing local_files_only=True")
local_files_only = True
else:
local_files_only = False
# loads files from hub
cached_repo_dir = snapshot_download(
model_name_or_path,
allow_patterns=["*"],
local_files_only=local_files_only,
revision=revision,
)
return cached_repo_dir
def get_checkpoint_files(model_name_or_path, revision=None, force_offline=True):
if not force_offline:
# checks if online or not
if is_offline_mode():
print_rank0("Offline mode: forcing local_files_only=True")
local_files_only = True
else:
local_files_only = False
# loads files from hub
cached_repo_dir = snapshot_download(
model_name_or_path,
allow_patterns=["*"],
local_files_only=True,
revision=revision,
)
else:
cached_repo_dir = model_name_or_path
# extensions: .bin | .pt
# creates a list of paths from all downloaded files in cache dir
file_list = [
str(entry)
for entry in Path(cached_repo_dir).rglob("*.[bp][it][n]")
if entry.is_file()
]
return file_list
model_name = args.name
infer_dtype = args.dtype
tp_presharded_mode = True if model_name in tp_presharded_models else False
# print(get_checkpoint_files(model_name))
print_rank0(f"*** Loading the model {model_name}")
tokenizer = AutoTokenizer.from_pretrained(model_name)
config = AutoConfig.from_pretrained(model_name)
# XXX: can't automatically derive dtype via config's `from_pretrained`
# dtype = torch.bfloat16 if model_name in ["bigscience/bloom", "bigscience/bigscience-small-testing"] else torch.float16
# use one of these args to `init_inference`
# 1. injection_policy is the slower version, but it's plain pytorch so it'll always work
# 2. replace_with_kernel_inject is the faster one (fast fused kernels)
kernel_inject = True
# kernel_inject = False
if kernel_inject:
# XXX: for now ds-inference only works with fp16
dtype = torch.float16
else:
dtype = torch.bfloat16
if args.benchmark:
torch.cuda.empty_cache()
gc.collect()
deepspeed.runtime.utils.see_memory_usage("pre-from-pretrained", force=True)
# Construct model with fake meta tensors, later will be replaced during ds-inference ckpt load
with deepspeed.OnDevice(dtype=dtype, device="meta"):
model = AutoModelForCausalLM.from_config(config, torch_dtype=torch.bfloat16)
if args.benchmark:
deepspeed.runtime.utils.see_memory_usage("post-from-pretrained", force=True)
model = model.eval()
if args.benchmark:
torch.cuda.empty_cache()
gc.collect()
deepspeed.runtime.utils.see_memory_usage("post-init-ds-zero-init", force=True)
### Deepspeed-Inference Loading
checkpoints_json = "checkpoints.json"
def write_checkponts_json():
with io.open(checkpoints_json, "w", encoding="utf-8") as f:
# checkpoint_files = glob.glob(f"{checkpoint_dir}/*bin")
checkpoint_files = get_checkpoint_files(model_name)
# print("Checkpoint files:", checkpoint_files)
data = {"type": "BLOOM", "checkpoints": checkpoint_files, "version": 1.0}
json.dump(data, f)
if args.benchmark:
torch.cuda.empty_cache()
gc.collect()
deepspeed.runtime.utils.see_memory_usage("pre-ds-inference-init", force=True)
if kernel_inject:
kwargs = dict(replace_with_kernel_inject=True)
else:
kwargs = dict(
injection_policy={BloomBlock: ("self_attention.dense", "mlp.dense_4h_to_h")}
)
# TODO: this fails even if the model is present locally
# repo_root = get_repo_root(model_name)
repo_root = model_name
if tp_presharded_mode:
# tp presharded repos come with their own checkpoints config file
checkpoints_json = os.path.join(repo_root, "ds_inference_config.json")
else:
# for normal bloom repo we need to write the checkpoints config file
if rank == 0:
write_checkponts_json()
dist.barrier()
# checkpoints_json=None
model = deepspeed.init_inference(
model,
mp_size=world_size,
base_dir=repo_root,
dtype=getattr(torch, infer_dtype),
checkpoint=checkpoints_json,
**kwargs,
)
if args.benchmark:
torch.cuda.empty_cache()
gc.collect()
deepspeed.runtime.utils.see_memory_usage("post-ds-inference-init", force=True)
model = model.module
if args.benchmark:
t_ready = time.time()
### Generate
print_rank0(f"*** Starting to generate {num_tokens} tokens with bs={args.batch_size}")
input_sentences = [
"DeepSpeed is a machine learning framework",
"He is working on",
"He has a",
"He got all",
"Everyone is happy and I can",
"The new movie that got Oscar this year",
"In the far far distance from our galaxy,",
"Peace is the only way",
]
if args.batch_size > len(input_sentences):
# dynamically extend to support larger bs by repetition
input_sentences *= math.ceil(args.batch_size / len(input_sentences))
generate_kwargs = dict(max_new_tokens=num_tokens, do_sample=False)
print_rank0(f"Generate args {generate_kwargs}")
inputs = input_sentences[: args.batch_size]
def generate():
"""returns a list of zipped inputs, outputs and number of new tokens"""
input_tokens = tokenizer.batch_encode_plus(
inputs, return_tensors="pt", padding=True
)
for t in input_tokens:
if torch.is_tensor(input_tokens[t]):
input_tokens[t] = input_tokens[t].to(torch.cuda.current_device())
outputs = model.generate(**input_tokens, **generate_kwargs)
input_tokens_lengths = [x.shape[0] for x in input_tokens.input_ids]
output_tokens_lengths = [x.shape[0] for x in outputs]
total_new_tokens = [
o - i for i, o in zip(input_tokens_lengths, output_tokens_lengths)
]
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
return zip(inputs, outputs, total_new_tokens)
# warmup is a must if measuring speed as it's when all the optimizations are performed
# e.g. on 8x80 a100 the first pass of 100 tokens takes 23sec, and the next one is 4secs
print_rank0(f"*** Running generate warmup")
_ = generate()
print_rank0(f"*** Running generate")
t_generate_start = time.time()
generated = generate()
t_generate_span = time.time() - t_generate_start
for i, o, _ in generated:
print_rank0(f"{'-'*60}\nin={i}\nout={o}\n")
if args.benchmark:
torch.cuda.empty_cache()
gc.collect()
deepspeed.runtime.utils.see_memory_usage("end-of-run", force=True)
### Benchmark
# benchmark it!
if args.benchmark:
print_rank0(f"*** Running benchmark")
# warm up
for i in range(1):
_ = generate()
torch.cuda.synchronize()
# benchmark
t0 = time.time()
cycles = 5
total_new_tokens_generated = 0
for i in range(cycles):
generated = generate()
total_new_tokens_generated += sum(new_tokens for _, _, new_tokens in generated)
torch.cuda.synchronize()
througput = (time.time() - t0) / (total_new_tokens_generated)
print_rank0(
f"""
*** Performance stats:
Throughput per token including tokenize: {througput*1000:.2f} msecs
Start to ready to generate: {t_ready - t_start:.3f} secs
Tokenize and generate {total_new_tokens_generated} (bs={args.batch_size}) tokens: {t_generate_span:.3f} secs
Start to finish: {t_ready - t_start + t_generate_span:.3f} secs
"""
)
| distill-bloom-deepspeed-main | distill_bloom/teacher-inference-script.py |
import io, json
from pathlib import Path
import torch
import torch.distributed as dist
from transformers.models.bloom.modeling_bloom import BloomBlock as BloomBlock
class DeepSpeedInitWrapper(object):
r"""
This is a wrapper around DeepSpeed inference / training script initialisation.
It is used to initialise the DeepSpeed engine and load the necessary variables
to correctly load the model and run inference.
Args:
args (:obj:`argparse.Namespace`):
The parsed arguments from the command line. This contains all the arguments for
training and inference. The `model_path` argument is used to load the model from
the specified path.
"""
def __init__(self, args):
r"""
We need to store the rank of the current process since `write_checkpoints` is
called only on rank 0.
"""
self.rank = dist.get_rank()
self.checkpoints_json = "checkpoints.json"
self.repo_root = args.teacher_model_path
self.infer_dtype = "float16"
def init_deepspeed_inference(self):
r"""
This function is a wrapper around the first lines that are called inside
https://github.com/huggingface/transformers-bloom-inference/blob/main/bloom-inference-scripts/bloom-ds-inference.py
"""
tp_presharded_models = [
"microsoft/bloom-deepspeed-inference-int8",
"microsoft/bloom-deepspeed-inference-fp16",
]
tp_presharded_mode = True if self.repo_root in tp_presharded_models else False
# use one of these args to `init_inference`
# 1. injection_policy is the slower version, but it's plain pytorch so it'll always work
# 2. replace_with_kernel_inject is the faster one (fast fused kernels)
kernel_inject = True
# kernel_inject = False
if kernel_inject:
# XXX: for now ds-inference only works with fp16
self.dtype = torch.float16
else:
self.dtype = torch.bfloat16
if kernel_inject:
self.kwargs = dict(replace_with_kernel_inject=True)
else:
self.kwargs = dict(
injection_policy={BloomBlock: ("self_attention.dense", "mlp.dense_4h_to_h")}
)
if tp_presharded_mode:
# tp presharded repos come with their own checkpoints config file
checkpoints_json = os.path.join(self.repo_root, "ds_inference_config.json")
else:
# for normal bloom repo we need to write the checkpoints config file
if self.rank == 0:
write_checkponts_json(self.repo_root , self.rank, self.checkpoints_json)
# dist.barrier()
def print_rank0(*msg, rank=0):
if rank != 0:
return
print(*msg)
def get_checkpoint_files(model_name_or_path, rank=0,revision=None, force_offline=True):
if not force_offline:
# checks if online or not
if is_offline_mode():
print_rank0("Offline mode: forcing local_files_only=True", rank)
local_files_only = True
else:
local_files_only = False
# loads files from hub
cached_repo_dir = snapshot_download(
model_name_or_path,
allow_patterns=["*"],
local_files_only=True,
revision=revision,
)
else:
cached_repo_dir = model_name_or_path
# extensions: .bin | .pt
# creates a list of paths from all downloaded files in cache dir
file_list = [
str(entry)
for entry in Path(cached_repo_dir).rglob("*.[bp][it][n]")
if entry.is_file()
]
return file_list
def write_checkponts_json(model_name, rank=0, checkpoints_json="checkpoints.json"):
with io.open(checkpoints_json, "w", encoding="utf-8") as f:
# checkpoint_files = glob.glob(f"{checkpoint_dir}/*bin")
checkpoint_files = get_checkpoint_files(model_name, rank)
# print("Checkpoint files:", checkpoint_files)
data = {"type": "BLOOM", "checkpoints": checkpoint_files, "version": 1.0}
json.dump(data, f)
| distill-bloom-deepspeed-main | distill_bloom/init_wrapper.py |
import torch.distributed as dist
from .utils import build_dataset_group
def build_train_val_test_dataset(args):
r"""
This function wraps all the dataset building functions from megatron.
"""
if args.train_samples:
train_samples = args.train_samples
else:
train_samples = args.train_iters * args.global_batch_size
eval_iters = (args.train_iters // args.eval_interval + 1) * args.eval_iters
test_iters = args.eval_iters
train_val_test_num_samples = [
train_samples,
eval_iters * args.global_batch_size,
test_iters * args.global_batch_size,
]
train_ds, valid_ds, test_ds = None, None, None
print("> building train, validation, and test datasets for GPT ...")
# Option 1 of data loading using --data-path
if args.data_path:
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
data_impl=args.data_impl,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
)
# Option 2 of data loading using --(train|valid|test)-weighted-split-paths
elif args.train_weighted_split_paths:
assigned_train_valid_test = []
if args.train_weighted_split_paths is not None:
train_ds = []
assigned_train_valid_test.append("train")
if args.valid_weighted_split_paths is not None:
valid_ds = []
assigned_train_valid_test.append("valid")
if args.test_weighted_split_paths is not None:
test_ds = []
assigned_train_valid_test.append("test")
for s in assigned_train_valid_test:
data_groups = zip(
eval(f"args.{s}_weighted_split_paths"),
eval(f"args.{s}_weighted_split_weights"),
eval(f"args.{s}_weighted_split_splits"),
eval(f"args.{s}_weighted_split_names"),
)
for paths, weights, splits, name in data_groups:
d = build_dataset_group(
name,
paths,
weights,
splits,
args.data_impl,
train_val_test_num_samples,
args.seq_length,
args.seed,
(not args.mmap_warmup),
train_valid_test=s,
)
eval(f"{s}_ds").append(d)
else:
raise NotImplementedError("No dataloading argument passed")
print("> finished creating GPT datasets ...")
return train_ds, valid_ds, test_ds
| distill-bloom-deepspeed-main | distill_bloom/dataset/get_dataset.py |
import os
import time
import numpy as np
import torch
from .megatron import mpu
def print_rank_0(message):
"""If distributed is initialized, print only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
class GPTDataset(torch.utils.data.Dataset):
def __init__(
self,
name,
data_prefix,
documents,
indexed_dataset,
num_samples,
seq_length,
seed,
):
self.name = name
self.indexed_dataset = indexed_dataset
# Checks
assert np.min(documents) >= 0
assert np.max(documents) < indexed_dataset.sizes.shape[0]
# Build index mappings.
self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings(
self.name,
data_prefix,
documents,
self.indexed_dataset.sizes,
num_samples,
seq_length,
seed,
)
def __len__(self):
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
return self.sample_idx.shape[0] - 1
def __getitem__(self, idx):
# Get the shuffled index.
idx = self.shuffle_idx[idx]
# Start and end documents and offsets.
doc_index_f = self.sample_idx[idx][0]
doc_index_l = self.sample_idx[idx + 1][0]
offset_f = self.sample_idx[idx][1]
offset_l = self.sample_idx[idx + 1][1]
# If we are within the same document, just extract the chunk.
if doc_index_f == doc_index_l:
sample = self.indexed_dataset.get(
self.doc_idx[doc_index_f],
offset=offset_f,
length=offset_l - offset_f + 1,
)
else:
# Otherwise, get the rest of the initial document.
sample_list = [
self.indexed_dataset.get(self.doc_idx[doc_index_f], offset=offset_f)
]
# Loop over all in between documents and add the entire document.
for i in range(doc_index_f + 1, doc_index_l):
sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
# And finally add the relevant portion of last document.
sample_list.append(
self.indexed_dataset.get(self.doc_idx[doc_index_l], length=offset_l + 1)
)
sample = np.concatenate(sample_list)
return {"text": np.array(sample, dtype=np.int64)}
def _build_index_mappings(
name,
data_prefix,
documents,
sizes,
num_samples,
seq_length,
seed,
cutoff_last_epoch=0.95,
):
"""Build doc-idx, sample-idx, and shuffle-idx.
doc-idx: is an array (ordered) of documents to be used in training.
sample-idx: is the start document index and document offset for each
training sample.
shuffle-idx: maps the sample index into a random index into sample-idx.
"""
# Number of tokens in each epoch and number of required epochs.
tokens_per_epoch = _num_tokens(documents, sizes)
num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples)
# rng state
np_rng = np.random.RandomState(seed=seed)
# Filename of the index mappings.
_filename = data_prefix
_filename += "_{}_indexmap".format(name)
_filename += "_{}ns".format(num_samples)
_filename += "_{}sl".format(seq_length)
_filename += "_{}s".format(seed)
doc_idx_filename = _filename + "_doc_idx.npy"
sample_idx_filename = _filename + "_sample_idx.npy"
shuffle_idx_filename = _filename + "_shuffle_idx.npy"
# Build the indexed mapping if not exist.
if torch.distributed.get_rank() == 0:
if (
(not os.path.isfile(doc_idx_filename))
or (not os.path.isfile(sample_idx_filename))
or (not os.path.isfile(shuffle_idx_filename))
):
print_rank_0(
" > WARNING: could not find index map files, building "
"the indices on rank 0 ..."
)
# For the last epoch, decide whether include the entire epoch
# in the global shuffle or not.
# If we need only one epoch, then separating last epoch does
# not mean anything.
if num_epochs == 1:
separate_last_epoch = False
print(
" > only one epoch required, setting separate_last_epoch to False",
flush=True,
)
else:
# Get the number of samples for the last epoch
num_samples_from_epochs_minus_one = (
(num_epochs - 1) * tokens_per_epoch - 1
) // seq_length
last_epoch_num_samples = num_samples - num_samples_from_epochs_minus_one
assert last_epoch_num_samples >= 0, (
f"last epoch number of samples {last_epoch_num_samples} should be"
" non-negative."
)
num_samples_per_epoch = (tokens_per_epoch - 1) // seq_length
assert last_epoch_num_samples <= num_samples_per_epoch, (
f"last epoch number of samples {last_epoch_num_samples} exceeded"
f" max value {num_samples_per_epoch}."
)
# If we have less than cutoff_last_epoch * samples_per_epoch of the samples for the last epoch,
# seperate out the epoch and treat it differently.
separate_last_epoch = last_epoch_num_samples < int(
cutoff_last_epoch * num_samples_per_epoch
)
if separate_last_epoch:
string = (
" > last epoch number of samples ({}) is smaller "
"than {}% of number of samples per epoch ({}), "
"setting separate_last_epoch to True"
)
else:
string = (
" > last epoch number of samples ({}) is larger "
"than {}% of number of samples per epoch ({}), "
"setting separate_last_epoch to False"
)
print(
string.format(
last_epoch_num_samples,
cutoff_last_epoch * 100,
num_samples_per_epoch,
),
flush=True,
)
# doc-idx.
start_time = time.time()
doc_idx = _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch)
np.save(doc_idx_filename, doc_idx, allow_pickle=True)
print_rank_0(
" > elasped time to build and save doc-idx mapping "
"(seconds): {:4f}".format(time.time() - start_time)
)
# sample-idx.
start_time = time.time()
# Use C++ implementation for speed.
# First compile and then import.
from .megatron import helpers
assert doc_idx.dtype == np.int32
assert sizes.dtype == np.int32
sample_idx = helpers.build_sample_idx(
sizes, doc_idx, seq_length, num_epochs, tokens_per_epoch
)
# sample_idx = _build_sample_idx(sizes, doc_idx, seq_length,
# num_epochs, tokens_per_epoch)
np.save(sample_idx_filename, sample_idx, allow_pickle=True)
print_rank_0(
" > elasped time to build and save sample-idx mapping "
"(seconds): {:4f}".format(time.time() - start_time)
)
# shuffle-idx.
start_time = time.time()
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
if separate_last_epoch:
num_samples_ = num_samples_from_epochs_minus_one
else:
num_samples_ = sample_idx.shape[0] - 1
shuffle_idx = _build_shuffle_idx(
num_samples_, sample_idx.shape[0] - 1, np_rng
)
np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True)
print_rank_0(
" > elasped time to build and save shuffle-idx mapping"
" (seconds): {:4f}".format(time.time() - start_time)
)
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
# counts = torch.cuda.LongTensor([1])
# torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
# torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group())
# assert counts[0].item() == (
# torch.distributed.get_world_size() //
# torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()))
# Load mappings.
start_time = time.time()
print_rank_0(" > loading doc-idx mapping from {}".format(doc_idx_filename))
doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode="r")
print_rank_0(" > loading sample-idx mapping from {}".format(sample_idx_filename))
sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode="r")
print_rank_0(" > loading shuffle-idx mapping from {}".format(shuffle_idx_filename))
shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode="r")
print_rank_0(
" loaded indexed file in {:3.3f} seconds".format(time.time() - start_time)
)
print_rank_0(" total number of samples: {}".format(sample_idx.shape[0]))
print_rank_0(" total number of epochs: {}".format(num_epochs))
return doc_idx, sample_idx, shuffle_idx
def _num_tokens(documents, sizes):
"""Total number of tokens in the dataset."""
return np.sum(sizes[documents])
def _num_epochs(tokens_per_epoch, seq_length, num_samples):
"""Based on number of samples and sequence lenght, calculate how many
epochs will be needed."""
num_epochs = 0
total_tokens = 0
while True:
num_epochs += 1
total_tokens += tokens_per_epoch
# -1 is because we need to retrieve seq_length + 1 token each time
# but the last token will overlap with the first token of the next
# sample except for the last sample.
if ((total_tokens - 1) // seq_length) >= num_samples:
return num_epochs
def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch):
"""Build an array with length = number-of-epochs * number-of-dcuments.
Each index is mapped to a corresponding document."""
if not separate_last_epoch or num_epochs == 1:
doc_idx = np.mgrid[0:num_epochs, 0 : len(documents)][1]
doc_idx[:] = documents
doc_idx = doc_idx.reshape(-1)
doc_idx = doc_idx.astype(np.int32)
np_rng.shuffle(doc_idx)
return doc_idx
doc_idx_first = _build_doc_idx(documents, num_epochs - 1, np_rng, False)
doc_idx_last = _build_doc_idx(documents, 1, np_rng, False)
return np.concatenate((doc_idx_first, doc_idx_last))
def _build_sample_idx(sizes, doc_idx, seq_length, num_epochs, tokens_per_epoch):
"""Sample index mapping is a 2D array with sizes
[number-of-samples + 1, 2] where [..., 0] contains
the index into `doc_idx` and [..., 1] is the
starting offset in that document."""
# Total number of samples. For -1 see comments in `_num_epochs`.
num_samples = (num_epochs * tokens_per_epoch - 1) // seq_length
sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int32)
# Index into sample_idx.
sample_index = 0
# Index into doc_idx.
doc_idx_index = 0
# Begining offset for each document.
doc_offset = 0
# Start with first document and no offset.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
while sample_index <= num_samples:
# Start with a fresh sequence.
remaining_seq_length = seq_length + 1
while remaining_seq_length != 0:
# Get the document length.
doc_id = doc_idx[doc_idx_index]
doc_length = sizes[doc_id] - doc_offset
# And add it to the current sequence.
remaining_seq_length -= doc_length
# If we have more than a full sequence, adjust offset and set
# remaining length to zero so we return from the while loop.
# Note that -1 here is for the same reason we have -1 in
# `_num_epochs` calculations.
if remaining_seq_length <= 0:
doc_offset += remaining_seq_length + doc_length - 1
remaining_seq_length = 0
else:
# Otherwise, start from the begining of the next document.
doc_idx_index += 1
doc_offset = 0
# Record the sequence.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
return sample_idx
def _build_shuffle_idx(num_samples, total_size, np_rng):
"""Build the range [0, size) and shuffle."""
print(
" > building shuffle index with split [0, {}) and [{}, {}) ...".format(
num_samples, num_samples, total_size
),
flush=True,
)
dtype_ = np.uint32
if total_size >= (np.iinfo(np.uint32).max - 1):
dtype_ = np.int64
shuffle_idx_first = np.arange(start=0, stop=num_samples, step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_first)
if num_samples == total_size:
return shuffle_idx_first
shuffle_idx_last = np.arange(
start=num_samples, stop=total_size, step=1, dtype=dtype_
)
np_rng.shuffle(shuffle_idx_last)
return np.concatenate((shuffle_idx_first, shuffle_idx_last))
| distill-bloom-deepspeed-main | distill_bloom/dataset/gpt_dataset.py |
import time
import numpy as np
import torch
from .gpt_dataset import GPTDataset
from .indexed_dataset import (IndexedDataset, MMapIndexedDataset,
create_doc_idx, data_file_path, index_file_path)
def print_rank_0(message):
"""If distributed is initialized, print only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return "cached"
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return "mmap"
else:
return None
else:
print(f"Dataset does not exist: {path}")
print(
"Path should be a basename that both .idx and .bin can be appended to get"
" full filenames."
)
return None
def get_train_valid_test_split_(splits_string, size):
r"""
Get dataset splits from comma or '/' separated string list.
`splits_string` expects an string of 3 sets of integers, summing up to `1000`.
Returns:
The proportion of the dataset to be used for training, validation and testing.
"""
splits = []
if splits_string.find(",") != -1:
splits = [float(s) for s in splits_string.split(",")]
elif splits_string.find("/") != -1:
splits = [float(s) for s in splits_string.split("/")]
else:
splits = [float(splits_string)]
while len(splits) < 3:
splits.append(0.0)
splits = splits[:3]
splits_sum = sum(splits)
assert splits_sum > 0.0
splits = [split / splits_sum for split in splits]
splits_index = [0]
for index, split in enumerate(splits):
splits_index.append(splits_index[index] + int(round(split * float(size))))
diff = splits_index[-1] - size
for index in range(1, len(splits_index)):
splits_index[index] -= diff
assert len(splits_index) == 4
assert splits_index[-1] == size
return splits_index
def analyze_data_prefix(data_prefix):
# The data prefix should be in the format of:
# weight-1, data-prefix-1, weight-2, data-prefix-2, ..
assert len(data_prefix) % 2 == 0
num_datasets = len(data_prefix) // 2
weights = [0] * num_datasets
prefixes = [0] * num_datasets
for i in range(num_datasets):
weights[i] = float(data_prefix[2 * i])
prefixes[i] = (data_prefix[2 * i + 1]).strip()
# Normalize weights
weight_sum = 0.0
for weight in weights:
weight_sum += weight
assert weight_sum > 0.0
weights = [weight / weight_sum for weight in weights]
return prefixes, weights
def get_split_by_range_(range_string, size):
"""Get dataset splits based on a range:
range_string is in the form START%:END% for e.g. 0.2:0.8
outputs an array of two values [start_index, end_index]
"""
# some checks that range is given in the correct form
splits = [float(i) for i in range_string.split(":")]
assert len(splits) == 2, "splits should be passed as start:end"
assert splits[0] <= 1 and splits[1] <= 1
splits_sum = sum(splits)
assert splits_sum > 0.0
splits_index = [round(s * float(size)) for s in splits]
assert len(splits_index) == 2
return splits_index
def get_datasets_weights_and_num_samples(data_prefix, train_valid_test_num_samples):
# Add 0.5% (the 1.005 factor) so in case the blending dataset does
# not uniformly distribute the number of samples, we still have
# samples left to feed to the network.
prefixes, weights = analyze_data_prefix(data_prefix)
datasets_train_valid_test_num_samples = []
for weight in weights:
datasets_train_valid_test_num_samples.append(
[
int(math.ceil(val * weight * 1.005))
for val in train_valid_test_num_samples
]
)
return prefixes, weights, datasets_train_valid_test_num_samples
def build_dataset_group(
dataset_group_name,
paths,
weights,
splits,
data_impl,
train_valid_test_num_samples,
seq_length,
seed,
skip_warmup,
train_valid_test,
):
"""
Build a single dataset group corresponding to Option 2 of data loading see arguments.py
a dataset group is passed on the following form
GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2, WEIGHT2 START:END PATH2
or alternatively
GIVEN_NAME PATH1 # for a single dataset to be used fully
"""
assert train_valid_test in ["train", "valid", "test"]
# Single dataset.
if len(paths) == 1:
dataset = _build_single_datasets(
paths[0],
splits[0],
data_impl,
train_valid_test_num_samples,
seq_length,
seed,
skip_warmup,
dataset_group_name,
train_valid_test,
)
return dataset
# Blending dataset.
else:
data_prefix = []
# data_prefix is on the shape:
# ["WEIGHT1", "PATH1", "WEIGHT2", "PATH2", "WEIGHT3", "PATH3"]
for w, p in zip(weights, paths):
data_prefix += [w, p]
output = get_datasets_weights_and_num_samples(
data_prefix, train_valid_test_num_samples
)
prefixes, weights, datasets_train_valid_test_num_samples = output
# Build individual datasets.
datasets = []
for i in range(len(prefixes)):
ds = _build_single_datasets(
prefixes[i],
splits[i],
data_impl,
datasets_train_valid_test_num_samples[i],
seq_length,
seed,
skip_warmup,
dataset_group_name,
train_valid_test,
)
datasets.append(ds)
all_datasets = BlendableDataset(datasets, weights)
return all_datasets
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
print(f"Dataset does not exist: {path}")
print(
"Path should be a basename that both .idx and .bin can be appended to get"
" full filenames."
)
return None
if impl == "infer":
impl = infer_dataset_impl(path)
if impl == "lazy" and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == "cached" and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == "mmap" and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
print(f"Unknown dataset implementation: {impl}")
return None
def get_indexed_dataset_(path, data_impl, skip_warmup):
"""Build indexed dataset."""
print_rank_0(" > building dataset index ...")
start_time = time.time()
indexed_dataset = make_dataset(path, data_impl, skip_warmup)
print_rank_0(
" > finished creating indexed dataset in {:4f} seconds".format(
time.time() - start_time
)
)
print_rank_0(" number of documents: {}".format(indexed_dataset.sizes.shape[0]))
return indexed_dataset
def build_dataset_group(
dataset_group_name,
paths,
weights,
splits,
data_impl,
train_valid_test_num_samples,
seq_length,
seed,
skip_warmup,
train_valid_test,
):
"""
Build a single dataset group corresponding to Option 2 of data loading see arguments.py
a dataset group is passed on the following form
GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2, WEIGHT2 START:END PATH2
or alternatively
GIVEN_NAME PATH1 # for a single dataset to be used fully
"""
assert train_valid_test in ["train", "valid", "test"]
# Single dataset.
if len(paths) == 1:
dataset = _build_single_datasets(
paths[0],
splits[0],
data_impl,
train_valid_test_num_samples,
seq_length,
seed,
skip_warmup,
dataset_group_name,
train_valid_test,
)
return dataset
# Blending dataset.
else:
data_prefix = []
# data_prefix is on the shape:
# ["WEIGHT1", "PATH1", "WEIGHT2", "PATH2", "WEIGHT3", "PATH3"]
for w, p in zip(weights, paths):
data_prefix += [w, p]
output = get_datasets_weights_and_num_samples(
data_prefix, train_valid_test_num_samples
)
prefixes, weights, datasets_train_valid_test_num_samples = output
# Build individual datasets.
datasets = []
for i in range(len(prefixes)):
ds = _build_single_datasets(
prefixes[i],
splits[i],
data_impl,
datasets_train_valid_test_num_samples[i],
seq_length,
seed,
skip_warmup,
dataset_group_name,
train_valid_test,
)
datasets.append(ds)
all_datasets = BlendableDataset(datasets, weights)
return all_datasets
def _build_single_datasets(
data_prefix,
range_string,
data_impl,
train_valid_test_num_samples,
seq_length,
seed,
skip_warmup,
dataset_group_name,
train_valid_test,
):
"""Build a single dataset"""
assert train_valid_test in ["train", "valid", "test"]
index = ["train", "valid", "test"].index(train_valid_test)
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix, data_impl, skip_warmup)
total_num_of_documents = indexed_dataset.sizes.shape[0]
# this corresponds to option2 for data loading on the form
# WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2, WEIGHT3 START:END PATH3
# splits here is an array of size 2 [start_index, end_index]
splits = get_split_by_range_(range_string=range_string, size=total_num_of_documents)
# Print stats about the splits.
print_rank_0(" > dataset split:")
print_rank_0(" {}:".format(dataset_group_name))
print_rank_0(
" document indices in [{}, {}) total of {} documents".format(
splits[0], splits[1], splits[1] - splits[0]
)
)
def build_dataset(name):
dataset = None
if splits[1] > splits[0]:
documents = np.arange(
start=splits[0], stop=splits[1], step=1, dtype=np.int32
)
dataset = GPTDataset(
name,
data_prefix,
documents,
indexed_dataset,
train_valid_test_num_samples[index],
seq_length,
seed,
)
return dataset
dataset = build_dataset(dataset_group_name)
return dataset
| distill-bloom-deepspeed-main | distill_bloom/dataset/utils.py |
import torch
class DistributedDataset(torch.utils.data.Dataset):
r"""
Wrapper for torch.utils.data.Dataset to make it distributed.
Args:
dataset (torch.utils.data.Dataset): Dataset to be distributed.
rank (int): Rank of the current process.
world_size (int): Number of processes in the distributed group.
"""
def __init__(self, dataset, rank, world_size):
self.dataset = dataset
self.current_dataset_index = 0
self.current_dataset = dataset[self.current_dataset_index]
self.rank = rank
self.world_size = world_size
def _update_dataset(self):
self.current_dataset_index += 1
if self.current_dataset_index >= len(self.dataset):
self.current_dataset_index = 0
self.current_dataset = self.dataset[self.current_dataset_index]
def __getitem__(self, index):
r"""
Loads a unique sample from the dataset.
First tries to load the sample from the current dataset.
If the current dataset is exhausted, it moves to the next dataset.
"""
try:
item = self.current_dataset[(index*self.world_size) + self.rank]
except IndexError:
self._update_dataset()
item = self.current_dataset[(index*self.world_size) + self.rank]
return item
def __len__(self):
r"""
Returns the length of the dataset. It corresponds to the total
lenght of all the datasets in the dataset list.
"""
total_length = list(map(lambda x: len(x), self.dataset))
return sum(total_length)
class DistributedDataLoader(torch.utils.data.DataLoader):
r"""
Wrapper around torch.utils.data.DataLoader to support distributed training.
"""
def __init__(self, dataset, rank, world_size, **kwargs):
self.dataset = DistributedDataset(dataset, rank, world_size)
super().__init__(self.dataset, **kwargs)
| distill-bloom-deepspeed-main | distill_bloom/dataset/dataloader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# copied from fairseq/fairseq/data/indexed_dataset.py
# Removed IndexedRawTextDataset since it relied on Fairseq dictionary
# other slight modifications to remove fairseq dependencies
# Added document index to index file and made it accessible.
# An empty sentence no longer separates documents.
import os
import shutil
import stat
import struct
from functools import lru_cache
from itertools import accumulate
import numpy as np
import torch
def best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ["lazy", "cached", "mmap"]
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return "cached"
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return "mmap"
else:
return None
else:
print(f"Dataset does not exist: {path}")
print(
"Path should be a basename that both .idx and .bin can be appended to get"
" full filenames."
)
return None
def make_builder(out_file, impl, dtype=None):
if impl == "mmap":
assert dtype is not None
return MMapIndexedDatasetBuilder(out_file, dtype=dtype)
else:
assert dtype is None
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
print(f"Dataset does not exist: {path}")
print(
"Path should be a basename that both .idx and .bin can be appended to get"
" full filenames."
)
return None
if impl == "infer":
impl = infer_dataset_impl(path)
if impl == "lazy" and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == "cached" and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == "mmap" and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
print(f"Unknown dataset implementation: {impl}")
return None
def dataset_exists(path, impl):
if impl == "mmap":
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for IndexedDataset"""
_HDR_MAGIC = b"TNTIDX\x00\x00"
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
print(magic, self._HDR_MAGIC)
assert magic == self._HDR_MAGIC, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = f.read(8)
assert struct.unpack("<Q", version) == (1,)
code, self.element_size = struct.unpack("<QQ", f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack("<QQ", f.read(16))
self.doc_count = struct.unpack("<Q", f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), "rb", buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError("index out of range")
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start] : self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
sents = np.split(a, offsets[:-1])
return sents
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(
data_file_path(path)
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx : ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx : ptx + a.size])
return a
elif isinstance(idx, slice):
# Hack just to make this work, can optimizer later if necessary
sents = []
for i in range(*idx.indices(len(self))):
sents.append(self[i])
return sents
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.uint16: 2,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8,
}
@staticmethod
def write_header(fout, dtype, numdata, numsize, numdoc):
"""Writes header for cached indexed dataset to given file handle, return number of bytes written.
"""
startpos = fout.tell()
fout.write(IndexedDataset._HDR_MAGIC)
fout.write(struct.pack("<Q", 1))
fout.write(struct.pack("<Q", code(dtype)))
fout.write(struct.pack("<Q", IndexedDatasetBuilder.element_sizes[dtype]))
fout.write(struct.pack("<Q", numdata - 1))
fout.write(struct.pack("<Q", numsize))
fout.write(struct.pack("<Q", numdoc))
endpos = fout.tell()
return endpos - startpos
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, "wb")
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
doc_offset = len(self.sizes)
begin = self.data_offsets[-1]
for data_offset in index.data_offsets[1:]:
self.data_offsets.append(begin + data_offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
self.doc_idx.extend((doc_offset + index.doc_idx)[1:])
with open(data_file_path(another_file), "rb") as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, "wb")
IndexedDatasetBuilder.write_header(
index,
self.dtype,
len(self.data_offsets),
len(self.sizes),
len(self.doc_idx),
)
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
index.close()
def _warmup_mmap_file(path):
with open(path, "rb") as stream:
while stream.read(100 * 1024 * 1024):
pass
def exscan_from_cumsum_(arr):
# given an array holding the result of an inclusive scan (cumsum),
# convert to an exclusive scan (shift to the right)
# [10, 30, 35, 50] --> [0, 10, 30, 35]
if arr.size > 1:
arr[1:] = arr[:-1]
if arr.size > 0:
arr[0] = 0
def get_pointers_with_total(sizes, elemsize, dtype):
"""Return a numpy array of type np.dtype giving the byte offsets.
Multiplies values in the sizes array by elemsize (bytes),
and then computes an exclusive scan to get byte offsets.
Returns the total number of bytes as second item in a tuple.
"""
# scale values in sizes array by elemsize to get sizes in bytes
pointers = np.array(sizes, dtype=dtype)
pointers *= elemsize
np.cumsum(pointers, axis=0, out=pointers)
# get total number of bytes from all sizes (last element)
bytes_last = pointers[-1] if len(sizes) > 0 else 0
# convert to byte offsets
exscan_from_cumsum_(pointers)
return pointers, bytes_last
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b"MMIDIDX\x00\x00"
@staticmethod
def write_header(fout, dtype, numsizes, numdocs):
"""Writes header for mmap indexed dataset to given file handle, return number of bytes written.
"""
startpos = fout.tell()
fout.write(MMapIndexedDataset.Index._HDR_MAGIC)
fout.write(struct.pack("<Q", 1))
fout.write(struct.pack("<B", code(dtype)))
fout.write(struct.pack("<Q", numsizes))
fout.write(struct.pack("<Q", numdocs))
endpos = fout.tell()
return endpos - startpos
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, "wb")
return self
@staticmethod
def _get_pointers(sizes, npdtype):
"""Return a numpy array of byte offsets given a list of sizes.
Multiplies values in the sizes array by dtype size (bytes),
and then computes an exclusive scan to get byte offsets.
"""
# compute element sizes in bytes
pointers, _ = get_pointers_with_total(
sizes, dtype().itemsize, npdtype
)
return pointers
def write(self, sizes, doc_idx):
MMapIndexedDataset.Index.write_header(
self._file, dtype, len(sizes), len(doc_idx)
)
sizes32 = np.array(sizes, dtype=np.int32)
self._file.write(sizes32.tobytes(order="C"))
del sizes32
pointers = self._get_pointers(sizes, np.int64)
self._file.write(pointers.tobytes(order="C"))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order="C"))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, "rb") as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = struct.unpack("<Q", stream.read(8))
assert (1,) == version
(dtype_code,) = struct.unpack("<B", stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack("<Q", stream.read(8))[0]
self._doc_count = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
print(" reading sizes...")
self._sizes = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._len, offset=offset
)
print(" reading pointers...")
self._pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes,
)
print(" reading document index...")
self._doc_idx = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
print(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
print(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(
data_file_path(self._path), mode="r", order="C"
)
print(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr
)
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
"""Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr
)
return np_array
@property
def sizes(self):
return self._index.sizes
def size(self, index):
return self._index.sizes[index]
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(
data_file_path(path)
)
@property
def dtype(self):
return self._index.dtype
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, "wb")
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order="C"))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
total_len = len(index.sizes) + len(self._sizes)
print(
f" concat {another_file} size={len(index.sizes)} for a total size of"
f" {total_len}"
)
offset = len(self._sizes)
self._sizes.extend(index.sizes)
self._doc_idx.extend((offset + index.doc_idx)[1:])
# Concatenate data
with open(data_file_path(another_file), "rb") as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx)
# To merge a set of binary files, one can simply concatenate them in order.
# We stat each binary file to determine its size, execute a scan to compute
# the byte offset where the calling rank should write its data, seek to proper
# spot, and copy each file.
def gather_files_dist_bin(outfile, filelist, distctx):
"""Concatenate binary files in filelist into a new file given by outfile"""
# lookup size of each of our binary files
filesizes = [os.stat(data_file_path(f))[stat.ST_SIZE] for f in filelist]
# compute total bytes of the merged file and the offset
# at which this rank will write data from its files
numbytes = sum(filesizes)
count = distctx.sum(numbytes)
offset = distctx.exscan(numbytes)
# We first write to a temporary file name. We rename to the final name
# if successful or delete the temporary file if not.
# This way if the final name appears, the user knows it's a valid file.
finalname = data_file_path(outfile)
finalnametmp = finalname + ".tmp"
# First delete the final file if it already exists
distctx.remove(finalname)
# Catch I/O errors from any process
err = None
try:
# Create shared output file and pre-truncate to its final size.
with distctx.open(finalnametmp, truncate=count) as fout:
# Seek to appropriate starting offset in the merged file.
fout.seek(offset)
# Copy in contents of each of our files.
for f in filelist:
with open(data_file_path(f), "rb") as fsrc:
shutil.copyfileobj(fsrc, fout)
except Exception as e:
err = e
# Check that all ranks wrote successfully.
# This will raise an exception all on ranks if we detect
# an exception on any rank.
distctx.allraise_if(err)
# Everyone wrote their part successfully.
# Rename the temporary file to the final file.
distctx.rename(finalnametmp, finalname)
def write_list_at_offset(fout, file_offset, vals, shift, elem_offset, dtype):
"""Write list of vals to fout starting at an offset given by file_offset, elem_offset, and dtype.
Copies list of values in vals to a numpy array of type dtype.
Adds a constant shift value to all elements.
Writes the numpy array to the file handle at given offset and scaled by size of the datatype.
offset = file_offset + elem_offset * dtype().itemsize
Parameters
----------
fout : file handle
Open file handle to which to write list of vals
file_offset : int
Byte offset within the file where the global list starts
vals : list[int]
List of values to be written
shift : int
Value to add to each element in vals before writing (use 0 for no change)
elem_offset : int
Zero-based element index where vals starts within the global list.
This value is scaled by dtype().itemsize to convert to a corresponding byte offset.
dtype : np.dtype
numpy datatype to be used when writing the list to the file
"""
# Make a copy of the vals list using the requested datatype.
npvals = np.array(vals, dtype=dtype)
# Shift values in the list by a constant value.
npvals += shift
# Seek to proper offset for this rank and write
# values into file, stored as given datatype.
fout.seek(file_offset + elem_offset * dtype().itemsize)
fout.write(npvals.tobytes(order="C"))
def gather_files_dist_check_dtype(filelist, dtype_rank_consistent, dtype_code, distctx):
# Verify that no rank has found an inconsistent value in its own set of files.
# This includes an allreduce to verify that dtype_rank_consistent is True everywhere.
distctx.allassert(
dtype_rank_consistent, "Some rank found inconsistent dtype values"
)
# Verify that at least one rank found a dtype value.
# Because of the bcast, the the value of first_dtype_code is the same on all ranks.
first_dtype_code = distctx.bcast_first(dtype_code)
assert (
first_dtype_code is not None
), "Failed to find a dtype value in any index file"
# Verify that the dtype is consistent on all ranks, if a rank has a dtype value.
distctx.allassert(
dtype_code == first_dtype_code or dtype_code is None,
"Different dtype values detected in index files",
)
# return the dtype
return dtypes[first_dtype_code]
def gather_files_dist_idx_cached(outfile, filelist, distctx):
# Read each index file and append items to our lists
sizes = []
data_offsets = [0]
dim_offsets = [0]
doc_idx = [0]
dtype_rank_consistent = (
True # whether this rank identifies inconsistent dtype values in its files
)
dtype_value = None # the current dtype code to compare against, if any
for f in filelist:
# read index file for this file
index = IndexedDataset(f)
# append its size, data, dim, and doc entries to our lists
doc_offset = len(sizes)
sizes.extend(index.sizes)
data_offsets.extend(index.data_offsets[1:] + data_offsets[-1])
dim_offsets.extend(index.dim_offsets[1:] + dim_offsets[-1])
doc_idx.extend(index.doc_idx[1:] + doc_offset)
# check that the dtype in this index matches the dtype in our other files
dtype_code = code(index.dtype)
if dtype_value is None:
dtype_value = dtype_code
if dtype_value != dtype_code:
dtype_rank_consistent = False
# Check that we have consistent dtypes in all files from all ranks,
# and return the dtype being used.
dtype = gather_files_dist_check_dtype(
filelist, dtype_rank_consistent, dtype_value, distctx
)
# Capture the last value in the data array before we delete any items.
# Note this may be zero on any rank that has no items,
# but zero is the correct value in that case.
# We use this last value to compute a shift value that
# is later be added to each element in our data list.
data_shift = distctx.exscan(data_offsets[-1])
# Drop the zero entry from the lists that start with
# a "0" value unless we're rank 0.
if distctx.rank != 0:
del data_offsets[0]
del dim_offsets[0]
del doc_idx[0]
# Compute total number of entires in data, size, dim,
# and doc_idx lists across all ranks. Also compute the offset
# of the calling rank for each list considering the number
# of entries for all ranks before the calling rank.
numdata = len(data_offsets)
numsize = len(sizes)
numdim = len(dim_offsets)
numdoc = len(doc_idx)
global_data_count = distctx.sum(numdata)
global_size_count = distctx.sum(numsize)
global_dim_count = distctx.sum(numdim)
global_doc_count = distctx.sum(numdoc)
global_data_offset = distctx.exscan(numdata)
global_size_offset = distctx.exscan(numsize)
global_dim_offset = distctx.exscan(numdim)
global_doc_offset = distctx.exscan(numdoc)
# We first write to a temporary file name. We rename to the final name
# if successful or delete the temporary file if not.
# This way if the final name appears, the user knows it's a valid file.
finalname = index_file_path(outfile)
finalnametmp = finalname + ".tmp"
# First delete the final file if it already exists
distctx.remove(finalname)
# Catch and I/O errors to later determine whether all ranks wrote successfully.
err = None
try:
# Create shared output file
with distctx.open(finalnametmp) as fout:
# Have rank 0 write the file header
file_offset = 0
if distctx.rank == 0:
try:
file_offset = fout.tell()
file_offset += IndexedDatasetBuilder.write_header(
fout,
dtype,
global_data_count,
global_size_count,
global_doc_count,
)
except Exception as e:
err = e
distctx.allraise_if(err)
# Broadcast current file position from rank 0.
file_offset = distctx.bcast(file_offset, root=0)
# The dimension list records the offset within
# the sizes list for each sentence.
# We shift our dimension index values to account for the number of size values
# that come before the calling rank which is in global_size_offset.
write_list_at_offset(
fout,
file_offset,
dim_offsets,
global_size_offset,
global_dim_offset,
np.int64,
)
file_offset += global_dim_count * np.int64().itemsize
# The data index records the element offset to the start of each
# sentence within the binary data file. Note that this is an
# element offset, not a byte offset. Each element is pyhsically stored
# in the data file as dtype().itemsize bytes.
# We shift the data index values according to the number of elements that
# come before the calling rank, which is stored in data_shift.
write_list_at_offset(
fout,
file_offset,
data_offsets,
data_shift,
global_data_offset,
np.int64,
)
file_offset += global_data_count * np.int64().itemsize
# Each sentence is stored as a tensor.
# The tensor for each sentence can be multidimensional.
# The number of tensor dimensions per sentence is variable,
# and the size of each dimension of a sentence is arbitrary.
# The size list records a flattened list of the sizes
# for each dimension of a sentence.
# No shift value is needed.
write_list_at_offset(
fout, file_offset, sizes, 0, global_size_offset, np.int64
)
file_offset += global_size_count * np.int64().itemsize
# The document index records the offset within the sizes
# array for the first sentence of each document.
# We shift the document index values for number of size values that
# come before the calling rank which is in global_size_offset.
write_list_at_offset(
fout,
file_offset,
doc_idx,
global_size_offset,
global_doc_offset,
np.int64,
)
file_offset += global_doc_count * np.int64().itemsize
except Exception as e:
# if we encounter any exception while writing, store it for later
err = e
# Check that all ranks wrote successfully
distctx.allraise_if(err)
# Everyone wrote their part successfully.
# Rename the temporary file to the final file.
distctx.rename(finalnametmp, finalname)
def gather_files_dist_idx_mmap(outfile, filelist, distctx):
# Read each index file and append items to the size and doc_idx lists
sizes = []
doc_idx = [0]
dtype_rank_consistent = (
True # whether rank identifies inconsistent dtype values in its files
)
dtype_value = None # the current dtype code to compare against, if any
for f in filelist:
# read index file for this file
index = MMapIndexedDataset.Index(index_file_path(f))
# append its size and doc entries to our lists
docs_offset = len(sizes)
sizes.extend(index.sizes)
doc_idx.extend(index.doc_idx[1:] + docs_offset)
# check that the dtype in this index matches the dtype in our other files
dtype_code = code(index.dtype)
if dtype_value is None:
dtype_value = dtype_code
if dtype_value != dtype_code:
dtype_rank_consistent = False
# Check that we have consistent dtypes in all files from all ranks,
# and return the dtype being used.
dtype = gather_files_dist_check_dtype(
filelist, dtype_rank_consistent, dtype_value, distctx
)
# Drop the zero entry from the lists that start with
# a "0" value unless we're rank 0
if distctx.rank != 0:
del doc_idx[0]
# Compute total number of size and document index
# values across all ranks. Also compute the offset
# of the calling rank for each value considering
# the values of sizes/docs for all ranks before the
# calling rank.
numsizes = len(sizes)
numdocs = len(doc_idx)
global_size_count = distctx.sum(numsizes)
global_docs_count = distctx.sum(numdocs)
global_size_offset = distctx.exscan(numsizes)
global_docs_offset = distctx.exscan(numdocs)
# Compute local byte offsets for each of our sentences given
# the token count and byte size of the vocab dtype.
pointers, pointers_bytes = get_pointers_with_total(
sizes, dtype().itemsize, np.int64
)
# Determine total number of bytes for all sentences on ranks
# before the calling rank.
pointer_offset = distctx.exscan(pointers_bytes)
# We first write to a temporary file name. We rename to the final name
# if successful or delete the temporary file if not.
# This way if the final name appears, the user knows it's a valid file.
finalname = index_file_path(outfile)
finalnametmp = finalname + ".tmp"
# First delete the final file if it already exists
distctx.remove(finalname)
# Catch and I/O errors to later determine whether all ranks wrote successfully.
err = None
try:
# Create shared output file
with distctx.open(finalnametmp) as fout:
# Have rank 0 write the file header
file_offset = 0
if distctx.rank == 0:
try:
file_offset = fout.tell()
file_offset += MMapIndexedDataset.Index.write_header(
fout, dtype, global_size_count, global_docs_count
)
except Exception as e:
err = e
distctx.allraise_if(err)
# Broadcast current file position from rank 0.
file_offset = distctx.bcast(file_offset, root=0)
# The list of size values from each rank are
# concatenated and stored as int32.
write_list_at_offset(
fout, file_offset, sizes, 0, global_size_offset, np.int32
)
file_offset += global_size_count * np.int32().itemsize
# The pointer values store the byte offset to each sentence when in memory.
# A sentence has a variable number of tokens, given by
# its corresponding entry in the size array. Each token
# of a sentence is stored in units of type dtype, which consumes
# dtype().itemsize bytes (often a standard type that is just
# large enough to represent all elements of the vocabulary).
# Since the pointers array is the same length as the sizes array,
# we use global_size_offset and global_size_count to position
# within the file for writing the pointer values.
write_list_at_offset(
fout,
file_offset,
pointers,
pointer_offset,
global_size_offset,
np.int64,
)
file_offset += global_size_count * np.int64().itemsize
# The document index points to the position in the sizes
# array for the starting sentence of each document.
# A variable number of sentences can be in each document.
# We shift the document index for number of sentences that
# come before the calling rank which is in global_size_offset.
write_list_at_offset(
fout,
file_offset,
doc_idx,
global_size_offset,
global_docs_offset,
np.int64,
)
file_offset += global_docs_count * np.int64().itemsize
except Exception as e:
# if we encounter any exception while writing, store it for later
err = e
# Check that all ranks wrote successfully
distctx.allraise_if(err)
# Everyone wrote their part successfully.
# Rename the temporary file to the final file.
distctx.rename(finalnametmp, finalname)
# Verify that all files in filelist are of the same index type.
# Returns the identified type {cached, mmap} as a string.
def gather_files_dist_check_impltype(filelist, distctx):
# Sanity check for typos in file names.
# Check that a data file exists for each of our files.
all_files_exist = all([os.path.exists(data_file_path(f)) for f in filelist])
# Check that all ranks have all of their files.
distctx.allassert(all_files_exist, "Some rank is missing its input file")
# map type string to an integer for easier bcast, use 0 for unknown
implmap = {"cached": 1, "mmap": 2}
# check that all files in filelist are of the same type
sametype = True
ourtype = None
for f in filelist:
# read header of index file to determine its type
impl = infer_dataset_impl(f)
implval = implmap[impl] if impl in implmap else 0
# check that the type matches our other files
if ourtype is None:
ourtype = implval
if ourtype != implval:
sametype = False
# Check that all ranks have the same type,
# and that there is no unknown type.
# This checks that:
# - all of our own files (if any) are of the same type AND
# - either we have no files or the type of our files match the broadcast type AND
# - the broadcast type is of a known type: {cached, mmap}
bcasttype = distctx.bcast_first(ourtype)
matchtype = (
sametype and (ourtype is None or ourtype == bcasttype) and bcasttype != 0
)
distctx.allassert(matchtype, "Cannot merge dataset files of different types")
# map back to return index string name
for key in implmap.keys():
if implmap[key] == bcasttype:
return key
# raise exception if key for bcasttype was not found
raise UnreachableCode
def gather_files_dist(filemain, filelist, distctx):
"""Collectively merge files into a new output file specified in filemain.
Each rank contributes a distinct list of zero or more files in filelist,
and each rank directly merges its set of files into filemain.
It is allowed for the input files in filelist to only be readable from the calling process.
In particular, the input files specified by the calling process may be in storage
that only the calling process can access, like /dev/shm or a node-local SSD.
The output file in filemain should be in a location that is writable by all processes.
NOTE: This uses parallel writes to a shared file to achieve high write bandwidth.
To do so, this implementation seeks beyond the end of the file to write at different
offsets from different processes via the seek() method on a python file handle.
The behavior of seek() is not well documented, but it seems to map to fseek()/lseek(),
and it works as desired on POSIX-compliant file systems like Lustre and GPFS."""
# Check that at least one input file is listed
filecount = distctx.sum(len(filelist))
assert filecount > 0, "All ranks have no input files to merge"
# Check that files are all of the same index type
indexstr = gather_files_dist_check_impltype(filelist, distctx)
# Concatenate the data files
gather_files_dist_bin(filemain, filelist, distctx)
# Combine index files into a single index file
if indexstr == "cached":
gather_files_dist_idx_cached(filemain, filelist, distctx)
elif indexstr == "mmap":
gather_files_dist_idx_mmap(filemain, filelist, distctx)
def get_start_end(count, rank, numranks):
"""Return (start, end) index values for calling rank to evenly divide count items among numranks.
Example usage:
start, end = get_start_end(len(itemlist), distctx.rank, distctx.numranks)
sublist = itemlist[start:end]
Parameters
----------
count : int
Total number of items to be divided
rank : int
Rank of the calling process, within range of [0, numranks)
numranks : int
Number of ranks by which to divide count items
Returns
----------
(start, end) : tuple(int)
Start and end index values that define the [start, end) range for rank
"""
num, remainder = divmod(count, numranks)
if rank < remainder:
start = (num + 1) * rank
end = start + num + 1
else:
start = (num + 1) * remainder + num * (rank - remainder)
end = start + num
return start, end
def merge_files_dist(filemain, filelist, distctx):
"""Merge list of indexed datasets into a single indexed dataset named in filemain.
Given a list of indexed datasets in filelist, and the set of processes defined
by the distributed environment in distctx, collectively merge files into
a new, single output indexed dataset named in filemain. This overwrites filemain
if it already exists. It does not delete the input datasets in filelist. The input
parameters filemain and filelist must be identical on all calling processes,
and all processes in distctx must call this method collectively.
It requires that all ranks be able to read any file in filelist, and all
ranks must be able to write to the single output file named in filemain."""
# TODO: if file sizes vary significantly, it might be better to consider
# file size when splitting the list to different ranks.
# evenly divide list of files among ranks
start, end = get_start_end(len(filelist), distctx.rank, distctx.numranks)
sublist = filelist[start:end]
# delegate merge to gather implementation
return gather_files_dist(filemain, sublist, distctx)
| distill-bloom-deepspeed-main | distill_bloom/dataset/indexed_dataset.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .initialize import (get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size)
from .utils import VocabUtility
class _VocabParallelCrossEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits, target):
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
torch.distributed.all_reduce(
logits_max,
op=torch.distributed.ReduceOp.MAX,
group=get_tensor_model_parallel_group(),
)
# Subtract the maximum value.
vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1))
# Get the partition's vocab indecies
get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
partition_vocab_size = vocab_parallel_logits.size()[-1]
rank = get_tensor_model_parallel_rank()
world_size = get_tensor_model_parallel_world_size()
vocab_start_index, vocab_end_index = get_vocab_range(
partition_vocab_size, rank, world_size
)
# Create a mask of valid vocab ids (1 means it needs to be masked).
target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
masked_target = target.clone() - vocab_start_index
masked_target[target_mask] = 0
# Get predicted-logits = logits[target].
# For Simplicity, we convert logits to a 2-D tensor with size
# [*, partition-vocab-size] and target to a 1-D tensor of size [*].
logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
masked_target_1d = masked_target.view(-1)
arange_1d = torch.arange(
start=0, end=logits_2d.size()[0], device=logits_2d.device
)
predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
predicted_logits_1d = predicted_logits_1d.clone().contiguous()
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
# All reduce is needed to get the chunks from other GPUs.
torch.distributed.all_reduce(
predicted_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_tensor_model_parallel_group(),
)
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = vocab_parallel_logits
torch.exp(vocab_parallel_logits, out=exp_logits)
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(
sum_exp_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_tensor_model_parallel_group(),
)
# Loss = log(sum(exp(logits))) - predicted-logit.
loss = torch.log(sum_exp_logits) - predicted_logits
# Store softmax, target-mask and masked-target for backward pass.
exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
@staticmethod
def backward(ctx, grad_output):
# Retreive tensors from the forward path.
softmax, target_mask, masked_target_1d = ctx.saved_tensors
# All the inputs have softmax as thier gradient.
grad_input = softmax
# For simplicity, work with the 2D gradient.
partition_vocab_size = softmax.size()[-1]
grad_2d = grad_input.view(-1, partition_vocab_size)
# Add the gradient from matching classes.
arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device)
grad_2d[arange_1d, masked_target_1d] -= 1.0 - target_mask.view(-1).float()
# Finally elementwise multiplication with the output gradients.
grad_input.mul_(grad_output.unsqueeze(dim=-1))
return grad_input, None
def vocab_parallel_cross_entropy(vocab_parallel_logits, target):
"""Helper function for the cross entropy."""
return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target)
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/cross_entropy.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model and data parallel groups."""
import torch
from .utils import ensure_divisibility
# Intra-layer model parallel group that the current rank belongs to.
_TENSOR_MODEL_PARALLEL_GROUP = None
# Inter-layer model parallel group that the current rank belongs to.
_PIPELINE_MODEL_PARALLEL_GROUP = None
# Model parallel group (both intra- and pipeline) that the current rank belongs to.
_MODEL_PARALLEL_GROUP = None
# Embedding group.
_EMBEDDING_GROUP = None
# Data parallel group that the current rank belongs to.
_DATA_PARALLEL_GROUP = None
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
# These values enable us to change the mpu sizes on the fly.
_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None
_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
_MPU_TENSOR_MODEL_PARALLEL_RANK = None
_MPU_PIPELINE_MODEL_PARALLEL_RANK = None
# A list of global ranks for each pipeline group to ease calculation of the source
# rank when broadcasting from the first or last pipeline stage
_PIPELINE_GLOBAL_RANKS = None
def is_unitialized():
"""Useful for code segments that may be accessed with or without mpu initialization
"""
return _DATA_PARALLEL_GROUP is None
def initialize_model_parallel(
tensor_model_parallel_size_=1,
pipeline_model_parallel_size_=1,
virtual_pipeline_model_parallel_size_=None,
):
"""
Initialize model data parallel groups.
Arguments:
tensor_model_parallel_size: number of GPUs used to parallelize model tensor.
pipeline_model_parallel_size: number of GPUs used to parallelize model pipeline.
Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we
use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
the model pipeline. The present function will
create 8 tensor model-parallel groups, 4 pipeline model-parallel groups
and 8 data-parallel groups as:
8 data_parallel groups:
[g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15]
8 tensor model-parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15]
4 pipeline model-parallel groups:
[g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
if torch.distributed.get_rank() == 0:
print(
"> initializing tensor model parallel with size {}".format(
tensor_model_parallel_size_
)
)
print(
"> initializing pipeline model parallel with size {}".format(
pipeline_model_parallel_size_
)
)
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size()
tensor_model_parallel_size = min(tensor_model_parallel_size_, world_size)
pipeline_model_parallel_size = min(pipeline_model_parallel_size_, world_size)
ensure_divisibility(
world_size, tensor_model_parallel_size * pipeline_model_parallel_size
)
data_parallel_size = world_size // (
tensor_model_parallel_size * pipeline_model_parallel_size
)
num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
num_data_parallel_groups = world_size // data_parallel_size
if virtual_pipeline_model_parallel_size_ is not None:
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = 0
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = (
virtual_pipeline_model_parallel_size_
)
rank = torch.distributed.get_rank()
# Build the data-parallel groups.
global _DATA_PARALLEL_GROUP
assert _DATA_PARALLEL_GROUP is None, "data parallel group is already initialized"
all_data_parallel_group_ranks = []
for i in range(pipeline_model_parallel_size):
start_rank = i * num_pipeline_model_parallel_groups
end_rank = (i + 1) * num_pipeline_model_parallel_groups
for j in range(tensor_model_parallel_size):
ranks = range(start_rank + j, end_rank, tensor_model_parallel_size)
all_data_parallel_group_ranks.append(list(ranks))
group = torch.distributed.new_group(ranks)
if rank in ranks:
_DATA_PARALLEL_GROUP = group
# Build the model-parallel groups.
global _MODEL_PARALLEL_GROUP
assert _MODEL_PARALLEL_GROUP is None, "model parallel group is already initialized"
for i in range(data_parallel_size):
ranks = [
data_parallel_group_ranks[i]
for data_parallel_group_ranks in all_data_parallel_group_ranks
]
group = torch.distributed.new_group(ranks)
if rank in ranks:
_MODEL_PARALLEL_GROUP = group
# Build the tensor model-parallel groups.
global _TENSOR_MODEL_PARALLEL_GROUP
assert (
_TENSOR_MODEL_PARALLEL_GROUP is None
), "tensor model parallel group is already initialized"
for i in range(num_tensor_model_parallel_groups):
ranks = range(
i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size
)
group = torch.distributed.new_group(ranks)
if rank in ranks:
_TENSOR_MODEL_PARALLEL_GROUP = group
# Build the pipeline model-parallel groups and embedding groups
# (first and last rank in each pipeline model-parallel group).
global _PIPELINE_MODEL_PARALLEL_GROUP
global _PIPELINE_GLOBAL_RANKS
assert (
_PIPELINE_MODEL_PARALLEL_GROUP is None
), "pipeline model parallel group is already initialized"
global _EMBEDDING_GROUP
assert _EMBEDDING_GROUP is None, "embedding group is already initialized"
for i in range(num_pipeline_model_parallel_groups):
ranks = range(i, world_size, num_pipeline_model_parallel_groups)
group = torch.distributed.new_group(ranks)
if rank in ranks:
_PIPELINE_MODEL_PARALLEL_GROUP = group
_PIPELINE_GLOBAL_RANKS = ranks
# Setup embedding group (to exchange gradients between
# first and last stages).
if len(ranks) > 1:
embedding_ranks = [ranks[0], ranks[-1]]
else:
embedding_ranks = ranks
group = torch.distributed.new_group(embedding_ranks)
if rank in embedding_ranks:
_EMBEDDING_GROUP = group
def model_parallel_is_initialized():
"""Check if model and data parallel groups are initialized."""
if (
_TENSOR_MODEL_PARALLEL_GROUP is None
or _PIPELINE_MODEL_PARALLEL_GROUP is None
or _DATA_PARALLEL_GROUP is None
):
return False
return True
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, "model parallel group is not initialized"
return _MODEL_PARALLEL_GROUP
def get_tensor_model_parallel_group():
"""Get the tensor model parallel group the caller rank belongs to."""
assert (
_TENSOR_MODEL_PARALLEL_GROUP is not None
), "intra_layer_model parallel group is not initialized"
return _TENSOR_MODEL_PARALLEL_GROUP
def get_pipeline_model_parallel_group():
"""Get the pipeline model parallel group the caller rank belongs to."""
assert (
_PIPELINE_MODEL_PARALLEL_GROUP is not None
), "pipeline_model parallel group is not initialized"
return _PIPELINE_MODEL_PARALLEL_GROUP
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
assert _DATA_PARALLEL_GROUP is not None, "data parallel group is not initialized"
return _DATA_PARALLEL_GROUP
def get_embedding_group():
"""Get the embedding group the caller rank belongs to."""
assert _EMBEDDING_GROUP is not None, "embedding group is not initialized"
return _EMBEDDING_GROUP
def set_tensor_model_parallel_world_size(world_size):
"""Set the tensor model parallel size"""
global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = world_size
def set_pipeline_model_parallel_world_size(world_size):
"""Set the pipeline model parallel size"""
global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size
def get_tensor_model_parallel_world_size():
"""Return world size for the tensor model parallel group."""
global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
if _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE is not None:
return _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())
def get_model_parallel_world_size():
assert (
get_pipeline_model_parallel_world_size() == 1
), "legacy get_model_parallel_world_size is only supported if PP is disabled"
return get_tensor_model_parallel_world_size()
def get_pipeline_model_parallel_world_size():
"""Return world size for the pipeline model parallel group."""
global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
if _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE is not None:
return _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
return torch.distributed.get_world_size(group=get_pipeline_model_parallel_group())
def set_tensor_model_parallel_rank(rank):
"""Set tensor model parallel rank."""
global _MPU_TENSOR_MODEL_PARALLEL_RANK
_MPU_TENSOR_MODEL_PARALLEL_RANK = rank
def set_pipeline_model_parallel_rank(rank):
"""Set pipeline model parallel rank."""
global _MPU_PIPELINE_MODEL_PARALLEL_RANK
_MPU_PIPELINE_MODEL_PARALLEL_RANK = rank
def get_tensor_model_parallel_rank():
"""Return my rank for the tensor model parallel group."""
global _MPU_TENSOR_MODEL_PARALLEL_RANK
if _MPU_TENSOR_MODEL_PARALLEL_RANK is not None:
return _MPU_TENSOR_MODEL_PARALLEL_RANK
return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
def get_model_parallel_rank():
assert (
get_pipeline_model_parallel_world_size() == 1
), "legacy get_model_parallel_rank is only supported if PP is disabled"
return get_tensor_model_parallel_rank()
def get_pipeline_model_parallel_rank():
"""Return my rank for the pipeline model parallel group."""
global _MPU_PIPELINE_MODEL_PARALLEL_RANK
if _MPU_PIPELINE_MODEL_PARALLEL_RANK is not None:
return _MPU_PIPELINE_MODEL_PARALLEL_RANK
return torch.distributed.get_rank(group=get_pipeline_model_parallel_group())
def is_pipeline_first_stage(ignore_virtual=False):
"""Return True if in the first pipeline model-parallel stage, False otherwise."""
if not ignore_virtual:
if (
get_virtual_pipeline_model_parallel_world_size() is not None
and get_virtual_pipeline_model_parallel_rank() != 0
):
return False
return get_pipeline_model_parallel_rank() == 0
def is_pipeline_last_stage(ignore_virtual=False):
"""Return True if in the last pipeline model-parallel stage, False otherwise."""
if not ignore_virtual:
virtual_pipeline_model_parallel_world_size = (
get_virtual_pipeline_model_parallel_world_size()
)
if (
virtual_pipeline_model_parallel_world_size is not None
and get_virtual_pipeline_model_parallel_rank()
!= (virtual_pipeline_model_parallel_world_size - 1)
):
return False
return get_pipeline_model_parallel_rank() == (
get_pipeline_model_parallel_world_size() - 1
)
def get_virtual_pipeline_model_parallel_rank():
"""Return the virtual pipeline-parallel rank."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
return _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
def set_virtual_pipeline_model_parallel_rank(rank):
"""Set the virtual pipeline-parallel rank."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = rank
def get_virtual_pipeline_model_parallel_world_size():
"""Return the virtual pipeline-parallel world size."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
return _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
def get_tensor_model_parallel_src_rank():
"""Calculate the global rank corresponding to the first local rank
in the tensor model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_tensor_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
def get_pipeline_model_parallel_first_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
return _PIPELINE_GLOBAL_RANKS[0]
def get_pipeline_model_parallel_last_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
last_rank_local = get_pipeline_model_parallel_world_size() - 1
return _PIPELINE_GLOBAL_RANKS[last_rank_local]
def get_pipeline_model_parallel_next_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
rank_in_pipeline = get_pipeline_model_parallel_rank()
world_size = get_pipeline_model_parallel_world_size()
return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size]
def get_pipeline_model_parallel_prev_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
rank_in_pipeline = get_pipeline_model_parallel_rank()
world_size = get_pipeline_model_parallel_world_size()
return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size]
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
return torch.distributed.get_world_size(group=get_data_parallel_group())
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return torch.distributed.get_rank(group=get_data_parallel_group())
def destroy_model_parallel():
"""Set the groups to none."""
global _TENSOR_MODEL_PARALLEL_GROUP
_TENSOR_MODEL_PARALLEL_GROUP = None
global _PIPELINE_MODEL_PARALLEL_GROUP
_PIPELINE_MODEL_PARALLEL_GROUP = None
global _DATA_PARALLEL_GROUP
_DATA_PARALLEL_GROUP = None
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/initialize.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model parallel utility interface."""
from .cross_entropy import vocab_parallel_cross_entropy
from .data import broadcast_data
from .initialize import (destroy_model_parallel, get_data_parallel_group,
get_data_parallel_rank, get_data_parallel_world_size,
get_embedding_group, get_model_parallel_group,
get_model_parallel_rank,
get_model_parallel_world_size,
get_pipeline_model_parallel_first_rank,
get_pipeline_model_parallel_group,
get_pipeline_model_parallel_last_rank,
get_pipeline_model_parallel_next_rank,
get_pipeline_model_parallel_prev_rank,
get_pipeline_model_parallel_rank,
get_pipeline_model_parallel_world_size,
get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_src_rank,
get_tensor_model_parallel_world_size,
get_virtual_pipeline_model_parallel_rank,
initialize_model_parallel, is_pipeline_first_stage,
is_pipeline_last_stage, is_unitialized,
model_parallel_is_initialized,
set_pipeline_model_parallel_rank,
set_pipeline_model_parallel_world_size,
set_tensor_model_parallel_rank,
set_tensor_model_parallel_world_size,
set_virtual_pipeline_model_parallel_rank)
from .mappings import (copy_to_tensor_model_parallel_region,
gather_from_tensor_model_parallel_region,
reduce_from_tensor_model_parallel_region,
scatter_to_tensor_model_parallel_region)
from .utils import divide, split_tensor_along_last_dim
# from .layers import ColumnParallelLinear
# from .layers import RowParallelLinear
# from .layers import VocabParallelEmbedding
# from .layers import (set_tensor_model_parallel_attributes,
# set_defaults_if_not_set_tensor_model_parallel_attributes,
# copy_tensor_model_parallel_attributes)
# from .random import checkpoint
# from .random import get_cuda_rng_tracker
# from .random import init_checkpointed_activations_memory_buffer
# from .random import model_parallel_cuda_manual_seed
# from .random import reset_checkpointed_activations_memory_buffer
# from .random import gather_split_1d_tensor
# from .random import split_tensor_into_1d_equal_chunks
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/__init__.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import contextlib
import torch
from megatron import get_args
from megatron.memory import allocate_mem_buff
from torch import _C
from torch.cuda import _lazy_call
from torch.cuda import device as device_ctx_manager
from torch.utils.checkpoint import detach_variable
from .initialize import (get_data_parallel_rank,
get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size)
# Default name for the model parallel rng tracker.
_MODEL_PARALLEL_RNG_TRACKER_NAME = "model-parallel-rng"
# Whether apply model parallelsim to checkpointed hidden states.
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = None
def init_checkpointed_activations_memory_buffer():
"""Initialize the memory buffer for the checkpointed activations."""
args = get_args()
upper_bound_sequence_length = max(
args.seq_length if args.seq_length is not None else 0,
args.decoder_seq_length if args.decoder_seq_length is not None else 0,
)
per_layer = (
args.micro_batch_size
* upper_bound_sequence_length
* args.hidden_size
// args.tensor_model_parallel_size
)
assert (
args.num_layers % args.checkpoint_num_layers == 0
), "number of layers is not divisible by checkpoint-num-layers"
num_checkpointer_layers = args.num_layers // args.checkpoint_num_layers
numel = per_layer * num_checkpointer_layers
dtype = torch.half
if not args.fp16:
dtype = torch.float
global _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER
assert (
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is None
), "checkpointed activations memory buffer is already allocated."
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = allocate_mem_buff(
"checkpointed activations", numel, dtype, track_usage=False
)
def reset_checkpointed_activations_memory_buffer():
"""Reset the memory used for checkpointing."""
if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None:
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER.reset()
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Argumentss:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, "_cuda_setRNGState") and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device("cuda")
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device("cuda", device)
def cb():
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb)
def split_tensor_into_1d_equal_chunks(tensor):
"""Break a tensor into equal 1D chunks."""
data = tensor.view(-1)
partition_size = torch.numel(data) // get_tensor_model_parallel_world_size()
start_index = partition_size * get_tensor_model_parallel_rank()
end_index = start_index + partition_size
return data[start_index:end_index]
def gather_split_1d_tensor(tensor):
"""Opposite of above function, gather values from model parallel ranks."""
world_size = get_tensor_model_parallel_world_size()
numel = torch.numel(tensor)
numel_gathered = world_size * numel
gathered = torch.empty(
numel_gathered,
dtype=tensor.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
chunks = [gathered[i * numel : (i + 1) * numel] for i in range(world_size)]
torch.distributed.all_gather(
chunks, tensor, group=get_tensor_model_parallel_group()
)
return gathered
class CudaRNGStatesTracker:
"""Tracker for the cuda RNG states.
Using the `add` method, a cuda rng state is initialized based on
the input `seed` and is assigned to `name`. Later, by forking the
rng state, we can perform operations and return to our starting
cuda state.
"""
def __init__(self):
# Map from a string name to the cuda rng state.
self.states_ = {}
# Seeds are just for book keeping and ensure no seed is set twice.
self.seeds_ = set()
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
states = {}
for name in self.states_:
states[name] = self.states_[name]
return states
def set_states(self, states):
"""Set the rng states. For efficiency purposes, we do not check
the size of seed for compatibility."""
self.states_ = states
def add(self, name, seed):
"""Track the rng state."""
# Check seed is not already used.
if seed in self.seeds_:
raise Exception("seed {} already exists".format(seed))
self.seeds_.add(seed)
# Check that state is not already defined.
if name in self.states_:
raise Exception("cuda rng state {} already exists".format(name))
# Get the current rng state.
orig_rng_state = torch.cuda.get_rng_state()
# Set the new state and store it.
torch.cuda.manual_seed(seed)
self.states_[name] = torch.cuda.get_rng_state()
# Reset rng state to what it was.
_set_cuda_rng_state(orig_rng_state)
@contextlib.contextmanager
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
print(name, self.states_)
raise Exception("cuda rng state {} is not added".format(name))
# Store current rng state.
orig_cuda_rng_state = torch.cuda.get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = torch.cuda.get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-tensor-model-parallel regions.
tensor-model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank()
# Data parallel gets the original seed.
data_parallel_seed = seed
if torch.distributed.get_rank() == 0:
print(
"> initializing model parallel cuda seeds on global rank {}, "
"model parallel rank {}, and data parallel rank {} with "
"model parallel seed: {} and data parallel seed: {}".format(
torch.distributed.get_rank(),
get_tensor_model_parallel_rank(),
get_data_parallel_rank(),
tensor_model_parallel_seed,
data_parallel_seed,
),
flush=True,
)
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
torch.cuda.manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(
_MODEL_PARALLEL_RNG_TRACKER_NAME, tensor_model_parallel_seed
)
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
2) the states in the model parallel tracker are also properly
tracked/set/reset.
"""
@staticmethod
def forward(ctx, run_function, *args):
ctx.run_function = run_function
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
with torch.no_grad():
outputs = run_function(*args)
# Divide hidden states across model parallel group and only keep
# the chunk corresponding to the current rank.
if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None:
ctx.input_0_shape = args[0].data.shape
args[0].data = split_tensor_into_1d_equal_chunks(args[0].data)
args[0].data = _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER.add(args[0].data)
# Store everything.
ctx.save_for_backward(*args)
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"Checkpointing is not compatible with .grad(), "
"please use .backward() if possible"
)
inputs = ctx.saved_tensors
if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None:
inputs[0].data = gather_split_1d_tensor(inputs[0].data)
inputs[0].data = inputs[0].data.view(ctx.input_0_shape)
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = torch.cuda.get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
# Compute the forward pass.
detached_inputs = detach_variable(inputs)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
torch.autograd.backward(outputs, args)
grads = tuple(
inp.grad if isinstance(inp, torch.Tensor) else inp
for inp in detached_inputs
)
return (None,) + grads
def checkpoint(function, *args):
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint."""
return CheckpointFunction.apply(function, *args)
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/random.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, "{} is not divisible by {}".format(
numerator, denominator
)
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
class VocabUtility:
"""Split the vocabulary into `world_size` chunks amd return the
first and last index of the vocabulary belonging to the `rank`
partition: Note that indecies in [fist, last)"""
@staticmethod
def vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size, rank, world_size
):
index_f = rank * per_partition_vocab_size
index_l = index_f + per_partition_vocab_size
return index_f, index_l
@staticmethod
def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size):
per_partition_vocab_size = divide(global_vocab_size, world_size)
return VocabUtility.vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size, rank, world_size
)
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/utils.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import math
from functools import partial
import deepspeed.runtime.activation_checkpointing.checkpointing as ds_checkpointing
import torch
import torch.nn.functional as F
import torch.nn.init as init
from megatron import get_args, mpu
from torch.nn.parameter import Parameter
from ..model.fused_layer_norm import MixedFusedLayerNorm as LayerNorm
from .initialize import (get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size)
from .mappings import (copy_to_tensor_model_parallel_region,
gather_from_tensor_model_parallel_region,
reduce_from_tensor_model_parallel_region,
scatter_to_tensor_model_parallel_region)
from .random import get_cuda_rng_tracker
from .utils import VocabUtility, divide, split_tensor_along_last_dim
_MODEL_PARALLEL_ATTRIBUTE_DEFAULTS = {
"tensor_model_parallel": False,
"partition_dim": -1,
"partition_stride": 1,
}
def param_is_not_tensor_parallel_duplicate(param):
return (
hasattr(param, "tensor_model_parallel") and param.tensor_model_parallel
) or (get_tensor_model_parallel_rank() == 0)
def set_tensor_model_parallel_attributes(tensor, is_parallel, dim, stride):
# Make sure the attributes are not set.
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
assert not hasattr(tensor, attribute)
# Set the attributes.
setattr(tensor, "tensor_model_parallel", is_parallel)
setattr(tensor, "partition_dim", dim)
setattr(tensor, "partition_stride", stride)
def set_defaults_if_not_set_tensor_model_parallel_attributes(tensor):
def maybe_set(attribute, value):
if not hasattr(tensor, attribute):
setattr(tensor, attribute, value)
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
maybe_set(attribute, _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS[attribute])
def copy_tensor_model_parallel_attributes(destination_tensor, source_tensor):
def maybe_copy(attribute):
if hasattr(source_tensor, attribute):
setattr(destination_tensor, attribute, getattr(source_tensor, attribute))
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
maybe_copy(attribute)
def _initialize_affine_weight_gpu(weight, init_method, partition_dim, stride=1):
"""Initialize affine weight for model parallel on GPU."""
set_tensor_model_parallel_attributes(
tensor=weight, is_parallel=True, dim=partition_dim, stride=stride
)
if ds_checkpointing.is_configured():
global get_cuda_rng_tracker
get_cuda_rng_tracker = ds_checkpointing.get_cuda_rng_tracker
with get_cuda_rng_tracker().fork():
init_method(weight)
def _initialize_affine_weight_cpu(
weight,
output_size,
input_size,
per_partition_size,
partition_dim,
init_method,
stride=1,
return_master_weight=False,
):
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
set_tensor_model_parallel_attributes(
tensor=weight, is_parallel=True, dim=partition_dim, stride=stride
)
# Initialize master weight
master_weight = torch.empty(
output_size, input_size, dtype=torch.float, requires_grad=False
)
init_method(master_weight)
args = get_args()
master_weight = master_weight.to(dtype=args.params_dtype)
# Split and copy
per_partition_per_stride_size = divide(per_partition_size, stride)
weight_list = torch.split(
master_weight, per_partition_per_stride_size, dim=partition_dim
)
rank = get_tensor_model_parallel_rank()
world_size = get_tensor_model_parallel_world_size()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None
def xavier_uniform_tensor_parallel_(tensor, gain=1.0, tp_degree=1):
r"""
This is a modified torch.nn.init.xavier_uniform_ with changes to support
partitioned on the vocab size dim embedding with tensor parallel.
Additional args:
- tp_degree: degree of tensor parallel
Note: the code assumes all partitions are equal in size
"""
# receptive_field_size=1 as dim==2, so we don't need init._calculate_fan_in_and_fan_out
fan_out, fan_in = tensor.shape
fan_out *= tp_degree # tp splits on num_embeddings dim
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return torch.nn.init._no_grad_uniform_(tensor, -a, a)
class VocabParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(self, num_embeddings, embedding_dim, init_method=init.xavier_normal_):
super(VocabParallelEmbedding, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
# Set the defaults for compatibility.
self.padding_idx = None
self.max_norm = None
self.norm_type = 2.0
self.scale_grad_by_freq = False
self.sparse = False
self._weight = None
self.tensor_model_parallel_size = get_tensor_model_parallel_world_size()
# Divide the weight matrix along the vocabulary dimension.
(
self.vocab_start_index,
self.vocab_end_index,
) = VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings,
get_tensor_model_parallel_rank(),
self.tensor_model_parallel_size,
)
self.num_embeddings_per_partition = (
self.vocab_end_index - self.vocab_start_index
)
# Allocate weights and initialize.
args = get_args()
# only the first stage embedding runs this class' forward. The head's embedding does its own
# thing, so don't waste memory allocating LN weights.
if mpu.is_pipeline_first_stage() and (
args.use_bnb_optimizer or args.embed_layernorm
):
self.norm = LayerNorm(embedding_dim)
if args.use_bnb_optimizer:
# for BNB we ignore the passed init_method and use torch.nn.init.xavier_uniform_
# modified to calculate std on the unpartitioned embedding
init_method = partial(
xavier_uniform_tensor_parallel_,
tp_degree=self.tensor_model_parallel_size,
)
if args.use_cpu_initialization:
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
dtype=args.params_dtype,
)
)
_initialize_affine_weight_cpu(
self.weight,
self.num_embeddings,
self.embedding_dim,
self.num_embeddings_per_partition,
0,
init_method,
)
else:
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
device=torch.cuda.current_device(),
dtype=args.params_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=0, stride=1
)
if args.use_bnb_optimizer:
from bitsandbytes.optim import GlobalOptimManager
GlobalOptimManager.get_instance().override_config(
self.weight, "optim_bits", 32
)
GlobalOptimManager.get_instance().register_parameters(self.weight)
def forward(self, input_):
if torch.any(input_ >= self.num_embeddings):
raise ValueError(
"There is an input id in the input that is greater than the highest"
f" possible input id.\nInput: {input_}\nnum_embeddings:"
f" {self.num_embeddings}"
)
if self.tensor_model_parallel_size > 1:
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (
input_ >= self.vocab_end_index
)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
else:
# input_ is garanted to be in the range [0:self.vocab_end_index - self.vocab_start_index] thanks to the first check
masked_input = input_
# Get the embeddings.
output_parallel = F.embedding(
masked_input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
# Mask the output embedding.
if self.tensor_model_parallel_size > 1:
output_parallel[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs.
output = reduce_from_tensor_model_parallel_region(output_parallel)
if hasattr(self, "norm"):
output = self.norm(output)
return output
class ColumnParallelLinear(torch.nn.Module):
"""Linear layer with column parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias
gather_output: If true, call all-gether on output and make Y avaiable
to all GPUs, otherwise, every GPU will have its output
which is Y_i = XA_i
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimations where bias
can be fused with other elementwise operations. we skip
adding bias but instead return it.
"""
def __init__(
self,
input_size,
output_size,
bias=True,
gather_output=True,
init_method=init.xavier_normal_,
stride=1,
keep_master_weight_for_test=False,
skip_bias_add=False,
):
super(ColumnParallelLinear, self).__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.gather_output = gather_output
# Divide the weight matrix along the last dimension.
world_size = get_tensor_model_parallel_world_size()
self.output_size_per_partition = divide(output_size, world_size)
self.skip_bias_add = skip_bias_add
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
# Initialize weight.
args = get_args()
if args.use_cpu_initialization:
self.weight = Parameter(
torch.empty(
self.output_size_per_partition,
self.input_size,
dtype=args.params_dtype,
)
)
self.master_weight = _initialize_affine_weight_cpu(
self.weight,
self.output_size,
self.input_size,
self.output_size_per_partition,
0,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
)
else:
self.weight = Parameter(
torch.empty(
self.output_size_per_partition,
self.input_size,
device=torch.cuda.current_device(),
dtype=args.params_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=0, stride=stride
)
if bias:
if args.use_cpu_initialization:
self.bias = Parameter(
torch.empty(self.output_size_per_partition, dtype=args.params_dtype)
)
else:
self.bias = Parameter(
torch.empty(
self.output_size_per_partition,
device=torch.cuda.current_device(),
dtype=args.params_dtype,
)
)
set_tensor_model_parallel_attributes(self.bias, True, 0, stride)
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
def forward(self, input_):
# Set up backprop all-reduce.
input_parallel = copy_to_tensor_model_parallel_region(input_)
# Matrix multiply.
bias = self.bias if not self.skip_bias_add else None
output_parallel = F.linear(input_parallel, self.weight, bias)
if self.gather_output:
# All-gather across the partitions.
output = gather_from_tensor_model_parallel_region(output_parallel)
else:
output = output_parallel
output_bias = self.bias if self.skip_bias_add else None
return output, output_bias
class RowParallelLinear(torch.nn.Module):
"""Linear layer with row parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its first dimension and X along its second dimension as:
- -
| A_1 |
| . |
A = | . | X = [X_1, ..., X_p]
| . |
| A_p |
- -
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias. Note that bias is not parallelized.
input_is_parallel: If true, we assume that the input is already
split across the GPUs and we do not split
again.
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimations where bias
can be fused with other elementwise operations. we skip
adding bias but instead return it.
"""
def __init__(
self,
input_size,
output_size,
bias=True,
input_is_parallel=False,
init_method=init.xavier_normal_,
stride=1,
keep_master_weight_for_test=False,
skip_bias_add=False,
):
super(RowParallelLinear, self).__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.input_is_parallel = input_is_parallel
# Divide the weight matrix along the last dimension.
world_size = get_tensor_model_parallel_world_size()
self.input_size_per_partition = divide(input_size, world_size)
self.skip_bias_add = skip_bias_add
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
# Initialize weight.
args = get_args()
if args.use_cpu_initialization:
self.weight = Parameter(
torch.empty(
self.output_size,
self.input_size_per_partition,
dtype=args.params_dtype,
)
)
self.master_weight = _initialize_affine_weight_cpu(
self.weight,
self.output_size,
self.input_size,
self.input_size_per_partition,
1,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
)
else:
self.weight = Parameter(
torch.empty(
self.output_size,
self.input_size_per_partition,
device=torch.cuda.current_device(),
dtype=args.params_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=1, stride=stride
)
if bias:
if args.use_cpu_initialization:
self.bias = Parameter(
torch.empty(self.output_size, dtype=args.params_dtype)
)
else:
self.bias = Parameter(
torch.empty(
self.output_size,
device=torch.cuda.current_device(),
dtype=args.params_dtype,
)
)
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
self.bias_tp_auto_sync = args.sync_tp_duplicated_parameters
def forward(self, input_):
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
input_parallel = scatter_to_tensor_model_parallel_region(input_)
# Matrix multiply.
output_parallel = F.linear(input_parallel, self.weight)
# All-reduce across all the partitions.
output_ = reduce_from_tensor_model_parallel_region(output_parallel)
if self.bias_tp_auto_sync:
torch.distributed.all_reduce(
self.bias,
op=torch.distributed.ReduceOp.AVG,
group=mpu.get_tensor_model_parallel_group(),
)
if not self.skip_bias_add:
output = output_ + self.bias if self.bias is not None else output_
output_bias = None
else:
output = output_
output_bias = self.bias
return output, output_bias
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/layers.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .initialize import (get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_src_rank)
_MAX_DATA_DIM = 5
def _check_data_types(keys, data, target_dtype):
"""Check that all the keys have the same target data type."""
for key in keys:
assert (
data[key].dtype == target_dtype
), "{} has data type {} which is different than {}".format(
key, data[key].dtype, target_dtype
)
def _build_key_size_numel_dictionaries(keys, data):
"""Build the size on rank 0 and broadcast."""
max_dim = _MAX_DATA_DIM
sizes = [0 for _ in range(max_dim) for _ in keys]
# Pack the sizes on rank zero.
if get_tensor_model_parallel_rank() == 0:
offset = 0
for key in keys:
assert data[key].dim() < max_dim, "you should increase MAX_DATA_DIM"
size = data[key].size()
for i, s in enumerate(size):
sizes[i + offset] = s
offset += max_dim
# Move to GPU and broadcast.
sizes_cuda = torch.cuda.LongTensor(sizes)
torch.distributed.broadcast(
sizes_cuda,
get_tensor_model_parallel_src_rank(),
group=get_tensor_model_parallel_group(),
)
# Move back to cpu and unpack.
sizes_cpu = sizes_cuda.cpu()
key_size = {}
key_numel = {}
total_numel = 0
offset = 0
for key in keys:
i = 0
size = []
numel = 1
while sizes_cpu[offset + i] > 0:
this_size = sizes_cpu[offset + i]
size.append(this_size)
numel *= this_size
i += 1
key_size[key] = size
key_numel[key] = numel
total_numel += numel
offset += max_dim
return key_size, key_numel, total_numel
def broadcast_data(keys, data, datatype):
"""Broadcast data from rank zero of each model parallel group to the
members of the same model parallel group.
Arguments:
keys: list of keys in the data disctionary to be broadcasted
data: data dictionary of string keys and cpu tensor values.
datatype: torch data type of all tensors in data associated
with keys.
"""
# Build (key, size) and (key, number of elements) dictionaries along
# with the total number of elements on all ranks.
key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys, data)
# Pack on rank zero.
if get_tensor_model_parallel_rank() == 0:
# Check that all keys have the same data type.
_check_data_types(keys, data, datatype)
# Flatten the data associated with the keys
flatten_data = torch.cat(
[data[key].contiguous().view(-1) for key in keys], dim=0
).cuda()
else:
flatten_data = torch.empty(
total_numel, device=torch.cuda.current_device(), dtype=datatype
)
# Broadcast
torch.distributed.broadcast(
flatten_data,
get_tensor_model_parallel_src_rank(),
group=get_tensor_model_parallel_group(),
)
# Unpack
output = {}
offset = 0
for key in keys:
size = key_size[key]
numel = key_numel[key]
output[key] = flatten_data.narrow(0, offset, numel).view(size)
offset += numel
return output
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/data.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .initialize import (get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size)
from .utils import split_tensor_along_last_dim
def _reduce(input_):
"""All-reduce the the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if get_tensor_model_parallel_world_size() == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group())
return input_
def _split(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Split along last dimension.
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = get_tensor_model_parallel_rank()
output = input_list[rank].contiguous()
return output
def _gather(input_):
"""Gather tensors and concatinate along the last dimension."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = get_tensor_model_parallel_rank()
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
torch.distributed.all_gather(
tensor_list, input_, group=get_tensor_model_parallel_group()
)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output
class _CopyToModelParallelRegion(torch.autograd.Function):
"""Pass the input to the model parallel region."""
@staticmethod
def symbolic(graph, input_):
return input_
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
return _reduce(grad_output)
class _ReduceFromModelParallelRegion(torch.autograd.Function):
"""All-reduce the input from the model parallel region."""
@staticmethod
def symbolic(graph, input_):
return _reduce(input_)
@staticmethod
def forward(ctx, input_):
return _reduce(input_)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _ScatterToModelParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
@staticmethod
def symbolic(graph, input_):
return _split(input_)
@staticmethod
def forward(ctx, input_):
return _split(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather(grad_output)
class _GatherFromModelParallelRegion(torch.autograd.Function):
"""Gather the input from model parallel region and concatinate."""
@staticmethod
def symbolic(graph, input_):
return _gather(input_)
@staticmethod
def forward(ctx, input_):
return _gather(input_)
@staticmethod
def backward(ctx, grad_output):
return _split(grad_output)
# -----------------
# Helper functions.
# -----------------
def copy_to_tensor_model_parallel_region(input_):
return _CopyToModelParallelRegion.apply(input_)
def reduce_from_tensor_model_parallel_region(input_):
return _ReduceFromModelParallelRegion.apply(input_)
def scatter_to_tensor_model_parallel_region(input_):
return _ScatterToModelParallelRegion.apply(input_)
def gather_from_tensor_model_parallel_region(input_):
return _GatherFromModelParallelRegion.apply(input_)
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/mappings.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import sys
import mpu
import torch
import torch.nn.functional as F
from commons import (IdentityLayer, initialize_distributed, print_separator,
set_random_seed)
from mpu.cross_entropy import vocab_parallel_cross_entropy
sys.path.append("../..")
def torch_cross_entropy(batch_size, seq_length, vocab_size, logits_scale, seed):
set_random_seed(seed)
identity = IdentityLayer(
(batch_size, seq_length, vocab_size), scale=logits_scale
).cuda()
logits = identity()
target = torch.cuda.LongTensor(size=(batch_size, seq_length)).random_(0, vocab_size)
loss = (
F.cross_entropy(
logits.view(-1, logits.size()[-1]), target.view(-1), reduction="none"
)
.view_as(target)
.mean()
)
loss.backward()
return loss, identity.weight.grad
def mpu_cross_entropy(batch_size, seq_length, vocab_size, logits_scale, seed):
set_random_seed(seed)
identity = IdentityLayer(
(batch_size, seq_length, vocab_size), scale=logits_scale
).cuda()
logits = identity()
logits_parallel = mpu.scatter_to_tensor_model_parallel_region(logits)
target = torch.cuda.LongTensor(size=(batch_size, seq_length)).random_(0, vocab_size)
loss = vocab_parallel_cross_entropy(logits_parallel, target).mean()
loss.backward()
return loss, identity.weight.grad
def test_cross_entropy(tensor_model_parallel_size):
if torch.distributed.get_rank() == 0:
print(
"> testing cross entropy with model parallel size {} ...".format(
tensor_model_parallel_size
)
)
mpu.initialize_model_parallel(tensor_model_parallel_size)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
batch_size = 13
seq_length = 17
vocab_size_per_partition = 11
logits_scale = 1000.0
vocab_size = vocab_size_per_partition * tensor_model_parallel_size
seed = 1234
loss_torch, grad_torch = torch_cross_entropy(
batch_size, seq_length, vocab_size, logits_scale, seed
)
loss_mpu, grad_mpu = mpu_cross_entropy(
batch_size, seq_length, vocab_size, logits_scale, seed
)
error = loss_torch.sub_(loss_mpu).abs().max()
print(
" max error in loss on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
error = grad_torch.sub_(grad_mpu).abs().max()
print(
" max error in grad on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
# Reset groups
mpu.destroy_tensor_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
if __name__ == "__main__":
initialize_distributed()
world_size = torch.distributed.get_world_size()
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
print_separator("test cross entropy")
test_cross_entropy(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/tests/test_cross_entropy.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import sys
import mpu
import torch
import torch.nn.init as init
from commons import initialize_distributed, print_separator, set_random_seed
from mpu import layers
from torch.nn.parameter import Parameter
sys.path.append("../..")
def test_parallel_embedding(tensor_model_parallel_size):
if torch.distributed.get_rank() == 0:
print(
"> testing parallel embedding with model parallel size {} ...".format(
tensor_model_parallel_size
)
)
mpu.initialize_model_parallel(tensor_model_parallel_size)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
batch_size = 17
seq_length = 23
vocab_size = 48
hidden_size = 16
seed = 1236
set_random_seed(123)
input_data = (
torch.LongTensor(size=(batch_size, seq_length)).random_(0, vocab_size).cuda()
)
loss_weight = torch.randn([batch_size, seq_length, hidden_size]).cuda()
set_random_seed(seed)
embedding_original = torch.nn.Embedding(vocab_size, hidden_size).cuda()
output = embedding_original(input_data)
loss_original = torch.mul(output, loss_weight).sum()
loss_original.backward()
set_random_seed(seed)
embedding_parallel = layers.ParallelEmbedding(
vocab_size, hidden_size, init_method=init.normal_
).cuda()
output = embedding_parallel(input_data)
loss_parallel = torch.mul(output, loss_weight).sum()
loss_parallel.backward()
set_random_seed(seed)
embedding_vocab_parallel = layers.VocabParallelEmbedding(
vocab_size, hidden_size, init_method=init.normal_
).cuda()
output = embedding_vocab_parallel(input_data)
loss_vocab_parallel = torch.mul(output, loss_weight).sum()
loss_vocab_parallel.backward()
torch.distributed.barrier()
error = loss_parallel.sub(loss_original).abs()
print(
" error in loss (parallel) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-12, "error: {}".format(error)
torch.distributed.barrier()
error = loss_vocab_parallel.sub(loss_original).abs()
print(
" error in loss (vocab parallel) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-12, "error: {}".format(error)
weight_grad_orig = torch.split(
embedding_original.weight.grad, hidden_size // tensor_model_parallel_size, 1
)[mpu.get_tensor_model_parallel_rank()]
error = embedding_parallel.weight.grad.sub(weight_grad_orig).abs().max()
print(
" error in grad (parallel) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-12, "error: {}".format(error)
weight_grad_orig = torch.split(
embedding_original.weight.grad, vocab_size // tensor_model_parallel_size, 0
)[mpu.get_tensor_model_parallel_rank()]
error = embedding_vocab_parallel.weight.grad.sub(weight_grad_orig).abs().max()
print(
" error in grad (vocab parallel) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-12, "error: {}".format(error)
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def test_initialize_affine_weight(tensor_model_parallel_size):
mpu.initialize_model_parallel(tensor_model_parallel_size)
if torch.distributed.get_rank() == 0:
print(
"> testing initialize_affine_weight with model parallel size: {}".format(
tensor_model_parallel_size
)
)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
seed = 12345
input_size_coeff = 13
input_size = input_size_coeff * tensor_model_parallel_size
output_size_coeff = 17
output_size = output_size_coeff * tensor_model_parallel_size
# ---------------
# Column parallel
# ---------------
weight = torch.empty(output_size_coeff, input_size)
set_random_seed(seed)
layers._initialize_affine_weight(
weight, output_size, input_size, output_size_coeff, 0, torch.nn.init.normal_
)
# Target.
set_random_seed(seed)
master_weight = torch.empty(output_size, input_size)
torch.nn.init.normal_(master_weight)
rank = mpu.get_tensor_model_parallel_rank()
my_weight = (
torch.split(master_weight, output_size_coeff, dim=0)[rank].contiguous().clone()
)
# Compare.
error = weight.sub(my_weight).abs().max()
torch.distributed.barrier()
print(
" column parallel max error (should be zero) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
# ------------
# Row parallel
# ------------
weight = torch.empty(output_size, input_size_coeff)
set_random_seed(seed)
mpu.layers._initialize_affine_weight(
weight, output_size, input_size, input_size_coeff, 1, torch.nn.init.normal_
)
# Target.
set_random_seed(seed)
master_weight = torch.empty(output_size, input_size)
torch.nn.init.normal_(master_weight)
rank = mpu.get_tensor_model_parallel_rank()
my_weight = (
torch.split(master_weight, input_size_coeff, dim=1)[rank].contiguous().clone()
)
# Compare.
error = weight.sub(my_weight).abs().max()
torch.distributed.barrier()
print(
" row parallel max error (should be zero) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(" >> passed the test :-)")
class IdentityLayer2D(torch.nn.Module):
def __init__(self, m, n):
super(IdentityLayer2D, self).__init__()
self.weight = Parameter(torch.Tensor(m, n))
torch.nn.init.xavier_normal_(self.weight)
def forward(self):
return self.weight
def test_column_parallel_linear(tensor_model_parallel_size):
mpu.initialize_model_parallel(tensor_model_parallel_size)
if torch.distributed.get_rank() == 0:
print(
"> testing ColumnParallelLinear with model parallel size: {}".format(
tensor_model_parallel_size
)
)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
seed = 12345
set_random_seed(seed)
input_size_coeff = 13
input_size = input_size_coeff * tensor_model_parallel_size
output_size_coeff = 17
output_size = output_size_coeff * tensor_model_parallel_size
batch_size = 7
# Network
identity_layer = IdentityLayer2D(batch_size, input_size).cuda()
linear_layer = mpu.ColumnParallelLinear(
input_size, output_size, keep_master_weight_for_test=True
).cuda()
loss_weight = torch.randn([batch_size, output_size]).cuda()
# Forward
input_ = identity_layer()
output = linear_layer(input_)
loss = torch.mul(output, loss_weight).sum()
# Backward
loss.backward()
# Values.
dLdY = loss_weight
X = identity_layer.weight
A = linear_layer.master_weight.cuda()
dLdA = torch.matmul(dLdY.t(), X)
dLdb = torch.matmul(torch.ones(batch_size, 1).cuda().t(), dLdY).view(-1)
dLdX = torch.matmul(dLdY, A)
rank = mpu.get_tensor_model_parallel_rank()
my_dLdA = torch.split(dLdA, output_size_coeff, dim=0)[rank].contiguous().clone()
error = my_dLdA.sub(linear_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(
" error in dLdA on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
my_dLdb = torch.split(dLdb, output_size_coeff, dim=0)[rank].contiguous().clone()
error = my_dLdb.sub(linear_layer.bias.grad).abs().max()
torch.distributed.barrier()
print(
" error in dLdb on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
error = dLdX.sub(identity_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(
" error in dLdX on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(" >> passed the test :-)")
def test_row_parallel_linear(tensor_model_parallel_size):
mpu.initialize_model_parallel(tensor_model_parallel_size)
if torch.distributed.get_rank() == 0:
print(
"> testing RowParallelLinear with model parallel size: {}".format(
tensor_model_parallel_size
)
)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
seed = 12345
set_random_seed(seed)
input_size_coeff = 13
input_size = input_size_coeff * tensor_model_parallel_size
output_size_coeff = 17
output_size = output_size_coeff * tensor_model_parallel_size
batch_size = 7
# Network
identity_layer = IdentityLayer2D(batch_size, input_size).cuda()
linear_layer = mpu.RowParallelLinear(
input_size, output_size, keep_master_weight_for_test=True
).cuda()
loss_weight = torch.randn([batch_size, output_size]).cuda()
# Forward
input_ = identity_layer()
output = linear_layer(input_)
loss = torch.mul(output, loss_weight).sum()
# Backward
loss.backward()
# Values.
dLdY = loss_weight
X = identity_layer.weight
A = linear_layer.master_weight.cuda()
dLdA = torch.matmul(dLdY.t(), X)
dLdb = torch.matmul(torch.ones(batch_size, 1).cuda().t(), dLdY).view(-1)
dLdX = torch.matmul(dLdY, A)
rank = mpu.get_tensor_model_parallel_rank()
my_dLdA = torch.split(dLdA, input_size_coeff, dim=1)[rank].contiguous().clone()
error = my_dLdA.sub(linear_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(
" error in dLdA on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
error = dLdb.sub(linear_layer.bias.grad).abs().max()
torch.distributed.barrier()
print(
" error in dLdb on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
error = dLdX.sub(identity_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(
" error in dLdX on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(" >> passed the test :-)")
class IdentityLayer3D(torch.nn.Module):
def __init__(self, m, n, k):
super(IdentityLayer3D, self).__init__()
self.weight = Parameter(torch.Tensor(m, n, k))
torch.nn.init.xavier_normal_(self.weight)
def forward(self):
return self.weight
def parallel_self_attention(
tensor_model_parallel_size,
num_att_heads_per_partition,
hidden_size_per_att_head,
dropout_prob,
batch_size,
sequence_length,
):
mpu.initialize_model_parallel(tensor_model_parallel_size)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
seed = 12345
set_random_seed(seed)
num_att_heads = num_att_heads_per_partition * torch.distributed.get_world_size()
hidden_size = hidden_size_per_att_head * num_att_heads
# Network
identity_layer = IdentityLayer3D(batch_size, sequence_length, hidden_size).cuda()
attention_layer = mpu.BertParallelSelfAttention(
hidden_size, num_att_heads, dropout_prob
).cuda()
loss_weight = torch.randn([batch_size, sequence_length, hidden_size]).cuda()
attention_mask = torch.randn([batch_size, 1, 1, sequence_length]).cuda()
# Forward
input_ = identity_layer()
output = attention_layer(input_, attention_mask)
loss = torch.mul(output, loss_weight).sum()
# Backward
loss.backward()
rank = mpu.get_tensor_model_parallel_rank()
mpu.destroy_model_parallel()
return (
rank,
hidden_size,
tensor_model_parallel_size,
loss,
attention_layer,
identity_layer,
)
def test_parallel_self_attention(tensor_model_parallel_size):
if torch.distributed.get_rank() == 0:
print(
"> testing ParallelSelfAttention with model parallel size: {}".format(
tensor_model_parallel_size
)
)
num_att_heads_per_partition = 3
hidden_size_per_att_head = 7
dropout_prob = 0.0 # has to be zero
batch_size = 5
sequence_length = 13
(
rank_1,
hideen_size_1,
tensor_model_parallel_size_1,
loss_1,
attention_layer_1,
identity_layer_1,
) = parallel_self_attention(
1,
num_att_heads_per_partition,
hidden_size_per_att_head,
dropout_prob,
batch_size,
sequence_length,
)
(
rank,
hidden_size,
tensor_model_parallel_size,
loss,
attention_layer,
identity_layer,
) = parallel_self_attention(
tensor_model_parallel_size,
num_att_heads_per_partition,
hidden_size_per_att_head,
dropout_prob,
batch_size,
sequence_length,
)
assert hideen_size_1 == hidden_size
error = loss_1.sub(loss).abs().max()
torch.distributed.barrier()
print(
" loss error on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 5.0e-6
my_lin_grad_list = torch.split(
attention_layer_1.query_key_value.weight.grad,
hidden_size // tensor_model_parallel_size,
0,
)[rank::tensor_model_parallel_size]
my_lin_grad = torch.cat(my_lin_grad_list, dim=0)
error = my_lin_grad.sub(attention_layer.query_key_value.weight.grad).abs().max()
torch.distributed.barrier()
print(
" weight gradient error on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 5.0e-6
error = identity_layer_1.weight.grad.sub(identity_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(
" input gradient error on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 5.0e-6
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(" >> passed the test :-)")
def parallel_transformer(
tensor_model_parallel_size,
num_att_heads_per_partition,
hidden_size_per_att_head,
batch_size,
sequence_length,
):
mpu.initialize_model_parallel(tensor_model_parallel_size)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
seed = 12345
set_random_seed(seed)
num_att_heads = num_att_heads_per_partition * torch.distributed.get_world_size()
hidden_size = hidden_size_per_att_head * num_att_heads
intermediate_size = 4 * hidden_size
# Network
identity_layer = IdentityLayer3D(batch_size, sequence_length, hidden_size).cuda()
transformer_layer = mpu.BertParallelTransformerLayer(
hidden_size,
intermediate_size,
num_att_heads,
0.0,
0.0,
torch.nn.functional.relu,
1.0e-5,
).cuda()
loss_weight = torch.randn([batch_size, sequence_length, hidden_size]).cuda()
attention_mask = torch.randn([batch_size, 1, 1, sequence_length]).cuda()
# Forward
input_ = identity_layer()
output = transformer_layer(input_, attention_mask)
loss = torch.mul(output, loss_weight).sum()
# Backward
loss.backward()
rank = mpu.get_tensor_model_parallel_rank()
mpu.destroy_model_parallel()
return (
rank,
hidden_size,
tensor_model_parallel_size,
loss,
transformer_layer,
identity_layer,
)
def test_parallel_transformer_layer(tensor_model_parallel_size):
if torch.distributed.get_rank() == 0:
print(
"> testing ParallelTransformerLayer with model parallel size: {}".format(
tensor_model_parallel_size
)
)
num_att_heads_per_partition = 3
hidden_size_per_att_head = 7
batch_size = 5
sequence_length = 13
(
rank_1,
hidden_size_1,
tensor_model_parallel_size_1,
loss_1,
transformer_layer_1,
identity_layer_1,
) = parallel_transformer(
1,
num_att_heads_per_partition,
hidden_size_per_att_head,
batch_size,
sequence_length,
)
(
rank,
hidden_size,
tensor_model_parallel_size,
loss,
transformer_layer,
identity_layer,
) = parallel_transformer(
tensor_model_parallel_size,
num_att_heads_per_partition,
hidden_size_per_att_head,
batch_size,
sequence_length,
)
error = loss_1.sub(loss).abs().max()
torch.distributed.barrier()
print(
" loss error on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 5.0e-5, "error: {}".format(error)
error = identity_layer_1.weight.grad.sub(identity_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(
" input gradient error on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 5.0e-5, "error: {}".format(error)
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(" >> passed the test :-)")
if __name__ == "__main__":
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
initialize_distributed()
world_size = torch.distributed.get_world_size()
print_separator("test initialize affine weight")
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
test_initialize_affine_weight(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
print_separator("test parallel embedding")
test_parallel_embedding(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
print_separator("test column-parallel linear")
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
test_column_parallel_linear(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
print_separator("test row-parallel linear")
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
test_row_parallel_linear(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
print_separator("test parallel self-attention")
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
test_parallel_self_attention(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
print_separator("test parallel transformer")
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
test_parallel_transformer_layer(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/tests/test_layers.py |
distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/tests/__init__.py |
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import random
import mpu
import numpy
import torch
class IdentityLayer(torch.nn.Module):
def __init__(self, size, scale=1.0):
super(IdentityLayer, self).__init__()
self.weight = torch.nn.Parameter(scale * torch.randn(size))
def forward(self):
return self.weight
def set_random_seed(seed):
"""Set random seed for reproducability."""
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
mpu.model_parallel_cuda_manual_seed(seed)
def initialize_distributed(backend="nccl"):
"""Initialize torch.distributed."""
# Get local rank in case it is provided.
parser = argparse.ArgumentParser()
parser.add_argument(
"--local_rank",
type=int,
default=None,
help="local rank passed from distributed launcher",
)
args = parser.parse_args()
local_rank = args.local_rank
# Get rank and world size.
rank = int(os.getenv("RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "1"))
print(
"> initializing torch.distributed with local rank: {}, "
"rank: {}, world size: {}".format(local_rank, rank, world_size)
)
# Set the device id.
device = rank % torch.cuda.device_count()
if local_rank is not None:
device = local_rank
torch.cuda.set_device(device)
# Call the init process.
init_method = "tcp://"
master_ip = os.getenv("MASTER_ADDR", "localhost")
master_port = os.getenv("MASTER_PORT", "6000")
init_method += master_ip + ":" + master_port
torch.distributed.init_process_group(
backend=backend, world_size=world_size, rank=rank, init_method=init_method
)
def print_separator(message):
torch.distributed.barrier()
filler_len = (78 - len(message)) // 2
filler = "-" * filler_len
string = "\n" + filler + " {} ".format(message) + filler
if torch.distributed.get_rank() == 0:
print(string, flush=True)
torch.distributed.barrier()
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/tests/commons.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import operator
import sys
import mpu
import torch
from commons import initialize_distributed, print_separator
from mpu import data as data_utils
sys.path.append("../..")
def test_broadcast_data(tensor_model_parallel_size):
if torch.distributed.get_rank() == 0:
print(
"> testing broadcast_data with model parallel size {} ...".format(
tensor_model_parallel_size
)
)
mpu.initialize_model_parallel(tensor_model_parallel_size)
torch.manual_seed(1234 + mpu.get_data_parallel_rank())
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
key_size_t = {
"key1": [7, 11],
"key2": [8, 2, 1],
"key3": [13],
"key4": [5, 1, 2],
"key5": [5, 12],
}
keys = list(key_size_t.keys())
data = {}
data_t = {}
for key in key_size_t:
data[key] = torch.LongTensor(size=key_size_t[key]).random_(0, 1000)
data_t[key] = data[key].clone()
data["keyX"] = torch.FloatTensor(size=(5,)).random_(0, 1000)
data_t["keyX"] = data["keyX"].clone()
if mpu.get_tensor_model_parallel_rank() != 0:
data = None
data_utils._check_data_types(keys, data_t, torch.int64)
key_size, key_numel, total_numel = data_utils._build_key_size_numel_dictionaries(
keys, data
)
for key in keys:
assert key_size[key] == key_size_t[key]
total_numel_t = 0
for key in keys:
target_size = functools.reduce(operator.mul, key_size_t[key], 1)
assert key_numel[key] == target_size
total_numel_t += target_size
assert total_numel == total_numel_t
data_b = data_utils.broadcast_data(keys, data, torch.int64)
for key in keys:
tensor = data_t[key].cuda()
assert data_b[key].sub(tensor).abs().max() == 0
# Reset groups
mpu.destroy_tensor_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
if __name__ == "__main__":
initialize_distributed()
world_size = torch.distributed.get_world_size()
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
print_separator("test test broadcast data")
test_broadcast_data(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/tests/test_data.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import mpu
import torch
from commons import initialize_distributed, print_separator
sys.path.append("../..")
def test_initialize_model_parallel(tensor_model_parallel_size):
if torch.distributed.get_rank() == 0:
print(
"> testing initialize_model_parallel with size {} ...".format(
tensor_model_parallel_size
)
)
tensor_model_parallel_size_ = min(
tensor_model_parallel_size, torch.distributed.get_world_size()
)
assert not mpu.model_parallel_is_initialized()
mpu.initialize_model_parallel(tensor_model_parallel_size_)
assert mpu.model_parallel_is_initialized()
# Checks.
def check(group, world_size, rank):
assert world_size == torch.distributed.get_world_size(group=group)
assert rank == torch.distributed.get_rank(group=group)
# Model parallel.
world_size = tensor_model_parallel_size_
rank = torch.distributed.get_rank() % tensor_model_parallel_size_
assert world_size == mpu.get_tensor_model_parallel_world_size()
assert rank == mpu.get_tensor_model_parallel_rank()
check(mpu.get_tensor_model_parallel_group(), world_size, rank)
# Data parallel.
world_size = torch.distributed.get_world_size() // tensor_model_parallel_size_
rank = torch.distributed.get_rank() // tensor_model_parallel_size
assert world_size == mpu.get_data_parallel_world_size()
assert rank == mpu.get_data_parallel_rank()
check(mpu.get_data_parallel_group(), world_size, rank)
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def test_get_tensor_model_parallel_src_rank(tensor_model_parallel_size_):
if torch.distributed.get_rank() == 0:
print(
"> testing get_tensor_model_parallel_src_rank with size {} ...".format(
tensor_model_parallel_size_
)
)
tensor_model_parallel_size = min(
tensor_model_parallel_size_, torch.distributed.get_world_size()
)
assert not mpu.model_parallel_is_initialized()
mpu.initialize_model_parallel(tensor_model_parallel_size)
assert mpu.model_parallel_is_initialized()
# Checks
src_rank = torch.distributed.get_rank() - mpu.get_tensor_model_parallel_rank()
assert mpu.get_tensor_model_parallel_src_rank() == src_rank
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
if __name__ == "__main__":
initialize_distributed()
world_size = torch.distributed.get_world_size()
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
print_separator("test initialize model parallel")
test_initialize_model_parallel(tensor_model_parallel_size)
print_separator("test model parallel source rank")
test_get_tensor_model_parallel_src_rank(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/tests/test_initialize.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import mpu
import torch
from commons import initialize_distributed, print_separator
sys.path.append("../..")
def test_set_cuda_rng_state(tensor_model_parallel_size):
if torch.distributed.get_rank() == 0:
print(
"> testing set_rng_state with size {} ...".format(
tensor_model_parallel_size
)
)
mpu.initialize_model_parallel(tensor_model_parallel_size)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
size = 123
seed = 1234
torch.cuda.manual_seed(1234)
tensor = torch.cuda.FloatTensor(size)
# Get the state
rng_state = torch.cuda.get_rng_state()
rng_state_copy = rng_state.clone()
# Do some stuff.
for _ in range(5):
torch.randn(size, out=tensor)
result_1 = tensor.clone()
assert rng_state.sub(rng_state_copy).max() == 0
assert torch.cuda.get_rng_state().sub(rng_state_copy).max() > 0
# State should be different.
new_rng_state = torch.cuda.get_rng_state()
max_diff = new_rng_state.sub(rng_state).max()
print(
" max diff in rng state (should be non-zero) on global rank {}: {}".format(
torch.distributed.get_rank(), max_diff
)
)
assert max_diff > 0
# Reset the rng state and do the same stuff.
mpu.random._set_cuda_rng_state(rng_state)
for _ in range(5):
torch.randn(size, out=tensor)
mpu.random._set_cuda_rng_state(rng_state)
for _ in range(5):
torch.randn(size, out=tensor)
result_2 = tensor.clone()
# Results should be the same
error = result_2.sub(result_1).abs().max()
print(
" max error in generated tensors (should be zero) on "
"global rank {}: {}".format(torch.distributed.get_rank(), error)
)
assert error < 1.0e-6
# Input state should have remained intact.
error = rng_state.sub(rng_state_copy).max()
print(
" max error in rng state (should be zero) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error == 0
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def test_cuda_rng_tracker(tensor_model_parallel_size):
if torch.distributed.get_rank() == 0:
print(
"> testing cuda rng tracker with size {} ...".format(
tensor_model_parallel_size
)
)
mpu.initialize_model_parallel(tensor_model_parallel_size)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
seed_1 = 1234
seed_2 = 4321
size = [12, 21]
tensor = torch.cuda.FloatTensor(size)
# Set to seed_1 and generate two tensors.
torch.cuda.manual_seed(seed_1)
torch.randn(size, out=tensor)
target_11 = tensor.clone()
torch.randn(size, out=tensor)
target_12 = tensor.clone()
# Set to seed_2 and generate two tensors.
torch.cuda.manual_seed(seed_2)
torch.randn(size, out=tensor)
target_21 = tensor.clone()
torch.randn(size, out=tensor)
target_22 = tensor.clone()
# Now if we interleave seed_1 and seed_2,
# we should still get the same tensors
torch.cuda.manual_seed(seed_1)
mpu.get_cuda_rng_tracker().add("test", seed_2)
torch.randn(size, out=tensor)
result_11 = tensor.clone()
with mpu.get_cuda_rng_tracker().fork("test"):
torch.randn(size, out=tensor)
result_21 = tensor.clone()
torch.randn(size, out=tensor)
result_12 = tensor.clone()
with mpu.get_cuda_rng_tracker().fork("test"):
torch.randn(size, out=tensor)
result_22 = tensor.clone()
diff = result_11.sub(result_21).abs().max()
diff = min(diff, result_12.sub(result_22).abs().max())
print(
" max diff in generated tensors (should be non-zero) on "
"global rank {}: {}".format(torch.distributed.get_rank(), diff)
)
assert diff > 1.0e-6
error = max(
result_11.sub(target_11).abs().max(), result_12.sub(target_12).abs().max()
)
error = max(error, result_21.sub(target_21).abs().max())
error = max(error, result_22.sub(target_22).abs().max())
print(
" max error in generated tensors (should be zero) on "
"global rank {}: {}".format(torch.distributed.get_rank(), error)
)
assert error < 1.0e-6
# Reset the tracker
mpu.get_cuda_rng_tracker().reset()
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def test_model_parallel_cuda_manual_seed(tensor_model_parallel_size):
if torch.distributed.get_rank() == 0:
print(
"> testing model parallel cuda manual seed with size {} ...".format(
tensor_model_parallel_size
)
)
mpu.initialize_model_parallel(tensor_model_parallel_size)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
mpu.model_parallel_cuda_manual_seed(12345)
assert torch.cuda.initial_seed() == 12345
with mpu.get_cuda_rng_tracker().fork():
assert torch.cuda.initial_seed() == (
12345 + 2718 + mpu.get_tensor_model_parallel_rank()
)
# Reset the tracker
mpu.get_cuda_rng_tracker().reset()
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
if __name__ == "__main__":
initialize_distributed()
world_size = torch.distributed.get_world_size()
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
print_separator("test set rng state")
test_set_cuda_rng_state(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
print_separator("test cuda rng tracker")
test_cuda_rng_tracker(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
print_separator("test model parallel cuda manual seed")
test_model_parallel_cuda_manual_seed(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
| distill-bloom-deepspeed-main | distill_bloom/dataset/megatron/mpu/tests/test_random.py |
# coding=utf-8
# Copyright 2020 Optuna, Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Logging utilities. """
import logging
import os
import sys
import threading
from functools import wraps
from logging import CRITICAL # NOQA
from logging import DEBUG # NOQA
from logging import ERROR # NOQA
from logging import FATAL # NOQA
from logging import INFO # NOQA
from logging import NOTSET # NOQA
from logging import WARN # NOQA
from logging import WARNING # NOQA
from typing import Optional
_lock = threading.Lock()
_default_handler: Optional[logging.Handler] = None
log_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_default_log_level = logging.WARNING
def _get_default_logging_level():
"""
If MEGATRON_DEEPSPEED_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is
not - fall back to ``_default_log_level``
"""
env_level_str = os.getenv("MEGATRON_DEEPSPEED_VERBOSITY", None)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option MEGATRON_DEEPSPEED_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys()) }"
)
return _default_log_level
def _get_library_name() -> str:
return __name__.split(".")[0]
def _get_library_root_logger() -> logging.Logger:
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_default_handler = logging.StreamHandler() # Set sys.stderr as stream.
_default_handler.flush = sys.stderr.flush
# Apply our default configuration to the library root logger.
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
library_root_logger.propagate = False
def _reset_library_root_logger() -> None:
global _default_handler
with _lock:
if not _default_handler:
return
library_root_logger = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
_default_handler = None
def get_log_levels_dict():
return log_levels
def get_logger(name: Optional[str] = None) -> logging.Logger:
"""
Return a logger with the specified name.
This function is not supposed to be directly accessed unless you are writing a custom transformers module.
"""
if name is None:
name = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(name)
def get_verbosity() -> int:
"""
Return the current level for the 🤗 Transformers's root logger as an int.
Returns:
:obj:`int`: The logging level.
.. note::
🤗 Transformers has following logging levels:
- 50: ``transformers.logging.CRITICAL`` or ``transformers.logging.FATAL``
- 40: ``transformers.logging.ERROR``
- 30: ``transformers.logging.WARNING`` or ``transformers.logging.WARN``
- 20: ``transformers.logging.INFO``
- 10: ``transformers.logging.DEBUG``
"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def set_verbosity(verbosity: int) -> None:
"""
Set the verbosity level for the 🤗 Transformers's root logger.
Args:
verbosity (:obj:`int`):
Logging level, e.g., one of:
- ``transformers.logging.CRITICAL`` or ``transformers.logging.FATAL``
- ``transformers.logging.ERROR``
- ``transformers.logging.WARNING`` or ``transformers.logging.WARN``
- ``transformers.logging.INFO``
- ``transformers.logging.DEBUG``
"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(verbosity)
def set_verbosity_info():
"""Set the verbosity to the :obj:`INFO` level."""
return set_verbosity(INFO)
def set_verbosity_warning():
"""Set the verbosity to the :obj:`WARNING` level."""
return set_verbosity(WARNING)
def set_verbosity_debug():
"""Set the verbosity to the :obj:`DEBUG` level."""
return set_verbosity(DEBUG)
def set_verbosity_error():
"""Set the verbosity to the :obj:`ERROR` level."""
return set_verbosity(ERROR)
def disable_default_handler() -> None:
"""Disable the default handler of the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def enable_default_handler() -> None:
"""Enable the default handler of the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def add_handler(handler: logging.Handler) -> None:
"""adds a handler to the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(handler)
def remove_handler(handler: logging.Handler) -> None:
"""removes given handler from the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(handler)
def disable_propagation() -> None:
"""
Disable propagation of the library log outputs. Note that log propagation is disabled by default.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = False
def enable_propagation() -> None:
"""
Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to
prevent double logging if the root logger has been configured.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = True
def enable_explicit_format() -> None:
"""
Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows:
::
[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
formatter = logging.Formatter(
"[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s"
)
handler.setFormatter(formatter)
def reset_format() -> None:
"""
Resets the formatting for HuggingFace Transformers's loggers.
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(None)
| distill-bloom-deepspeed-main | distill_bloom/arguments/logging.py |
# Arguments for distillation
import argparse
import collections
import os
import re
import time
import deepspeed
from .logging import log_levels
def parse_args(extra_args_provider=None, defaults={}, ignore_unknown_args=False):
r"""
Helper function to parse all necessarly arguments to perform teacher / student distillation
"""
parser = argparse.ArgumentParser(description="Main Arguments", allow_abbrev=False)
# HF model arguments
parser = _add_hf_model_args(parser)
# Regularization arguments
parser = _add_regularization_args(parser)
# Dataset paths
parser = _add_data_args(parser)
# DeepSpeed args
parser = deepspeed.add_config_arguments(parser)
# Training args
parser = _add_training_args(parser)
# Validation args
parser = _add_validation_args(parser)
# Initialization args
parser = _add_initialization_args(parser)
# Distriubted args
parser = _add_distributed_args(parser)
# Mixed precision args
parser = _add_mixed_precision_args(parser)
# Parse args
args = parser.parse_args()
if args.data_path:
assert args.train_weighted_split_paths is None, message
setattr(args, "valid_weighted_split_names", None)
setattr(args, "valid_weighted_split_weights", None)
setattr(args, "valid_weighted_split_splits", None)
setattr(args, "test_weighted_split_names", None)
setattr(args, "test_weighted_split_weights", None)
setattr(args, "test_weighted_split_splits", None)
# args.split default value in the args is None it is set here in order
# to check that it does not to overlap with the 2nd mode of data loading
if args.split is None:
args.split = "969, 30, 1"
if (
args.train_weighted_split_paths
or args.valid_weighted_split_paths
or args.test_weighted_split_paths
):
assert args.data_path is None and args.split is None, message
return args
def _add_hf_model_args(parser, log_levels=log_levels):
r"""
A wrapper function to add arguments for loading HF models
"""
group = parser.add_argument_group(title="network parameters")
# Teacher & student paths
group.add_argument(
"--teacher-model-path", type=str, help="path to load the teacher weights from"
)
group.add_argument(
"--student-model-path", type=str, help="path to load the teacher weights from"
)
group.add_argument(
"--kill-switch-path",
type=str,
help=(
"path to look for a kill switch, which if found will automatically exit the"
" program"
),
)
# TODO: assess if we need those arguments in the future
group.add_argument(
"--log-level",
type=str,
choices=list(log_levels.keys()),
help=(
"Logger log level to use on the main process. Possible choices are the log"
" levels as strings: 'debug', 'info', 'warning', 'error' and 'critical',"
" plus a 'passive' level which doesn't set anything and lets the"
" application set the level."
),
)
group.add_argument(
"--log-level-replica",
type=str,
choices=list(log_levels.keys()),
help="Logger log level to use on replicas. Same choices as ``log_level``",
)
return parser
def _add_regularization_args(parser):
r"""
Network regularization arguments - modify them at your own risk
"""
group = parser.add_argument_group(title="regularization")
group.add_argument(
"--attention-dropout",
type=float,
default=0.1,
help="Post attention dropout probability.",
)
group.add_argument(
"--hidden-dropout",
type=float,
default=0.1,
help="Dropout probability for hidden state transformer.",
)
group.add_argument(
"--weight-decay",
type=float,
default=0.01,
help="Weight decay coefficient for L2 regularization.",
)
group.add_argument(
"--clip-grad",
type=float,
default=1.0,
help="Gradient clipping based on global L2 norm.",
)
group.add_argument(
"--adam-beta1",
type=float,
default=0.9,
help=(
"First coefficient for computing running averages "
"of gradient and its square"
),
)
group.add_argument(
"--adam-beta2",
type=float,
default=0.999,
help=(
"Second coefficient for computing running averages "
"of gradient and its square"
),
)
group.add_argument(
"--adam-eps",
type=float,
default=1e-08,
help="Term added to the denominator to improvenumerical stability",
)
group.add_argument(
"--sgd-momentum", type=float, default=0.9, help="Momentum factor for sgd"
)
return parser
def _add_data_args(parser):
r"""
Wrapper function to add arguments for loading data - this function is directly copied from Megatron-DeepSpeed
"""
group = parser.add_argument_group(title="data and dataloader")
# option 1 for data loading (mutually exclusive with option2)
group.add_argument(
"--data-path",
nargs="*",
default=None,
help=(
"Path to the training dataset. Accepted format:"
"1) a single data path, 2) multiple datasets in the"
"form: dataset1-weight dataset1-path dataset2-weight "
"dataset2-path ..."
),
)
group.add_argument(
"--split",
type=str,
default=None,
help=(
"Comma-separated list of proportions for training,"
" validation, and test split. For example the split "
"`90,5,5` will use 90%% of data for training, 5%% for "
"validation and 5%% for test."
),
)
# option 2 for data loading (mutually exclusive with option1)
# helper class to parse the --xxx-weighted-split-paths
# note here two args are set: extra valid dataset paths and names
class parse_data_paths(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if option_string == "--train-weighted-split-paths":
assert len(values) == 1, "Only 1 dataset group is allowed to"
"be passed for the argument --train-weighted-split-paths"
# make sure string given in the correct format
err_message = "Each data group should be input on the following format"
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
"where START < END"
for v in values:
# each prefix consists several datasets separated by commas
prefix = ":".join(v.split(":")[1:]) # remove GIVEN_NAME
datasets = prefix.split(",")
# check if each dataset is formatted like `WEIGHT START:END PATH`
for d in datasets:
assert len(d.split()) == 3, err_message
start, end = d.split()[1].split(":")
assert float(start) < float(end), err_message
names = [v.split(":")[0] for v in values]
prefixes = [":".join(v.split(":")[1:]).strip() for v in values]
weights = [[d.split()[0] for d in p.split(",")] for p in prefixes]
splits = [[d.split()[1] for d in p.split(",")] for p in prefixes]
paths = [[d.split()[2] for d in p.split(",")] for p in prefixes]
# # to keep consistency with Option 1 of data loading (through --data-path)
# # paths will contain strings on the following form
# # "WEIGHTS1 PATH1 WEIGHTS2 PATH2 WEIGHTS3 PATH3" for each dataset group
# # while data will be parsed in additional arguments below
# paths_option1_style = []
# for p, w in zip(paths, weights):
# paths_option1_style.append(" ".join([f"{w_i} {p_i}" for p_i, w_i in zip(p,w)]))
# setattr(args, self.dest, paths_option1_style)
setattr(args, self.dest, paths)
setattr(args, self.dest.replace("paths", "weights"), weights)
setattr(args, self.dest.replace("paths", "splits"), splits)
setattr(args, self.dest.replace("paths", "names"), names)
group.add_argument(
"--train-weighted-split-paths",
nargs="*",
default=None,
help=(
"Weights, splits and paths to groups of datasets"
"Accepted format: ONE dataset groups could be"
"submitted in the following form between double quotes"
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0:0.6 A, 0.3 0:1 B, 0.1 0:1 C" '
"WEIGHT is used to up and down sample each dataset A,B,C in the group"
"START:END indicates the split portion of the dataset"
),
action=parse_data_paths,
)
group.add_argument(
"--valid-weighted-split-paths",
nargs="*",
default=None,
help=(
"Weights, splits and paths to groups of datasets"
"Accepted format: one or many dataset groups could be"
"submitted in the following form each between double quotes"
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
"validation will be run on each of those groups independently"
),
action=parse_data_paths,
)
group.add_argument(
"--test-weighted-split-paths",
nargs="*",
default=None,
help=(
"Weights, splits and paths to groups of datasets"
"Accepted format: one or many dataset groups could be"
"submitted in the following form each between double quotes"
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
"test will be run on each of those groups independently"
),
action=parse_data_paths,
)
class parse_data_paths_path(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
expected_option_strings = [
"--train-weighted-split-paths-path",
"--valid-weighted-split-paths-path",
"--test-weighted-split-paths-path",
]
assert (
option_string in expected_option_strings
), f"Expected {option_string} to be in {expected_option_strings}"
with open(values, "r") as fi:
lines = fi.readlines()
assert (
len(lines) == 1
), f"Got multiple lines {len(lines)} instead of 1 expected"
assert (
lines[0][-2:] == '"\n' and lines[0][0] == '"'
), f"Invalid input format, got {lines}"
values = lines[0][1:-2].split('" "')
weighted_split_paths_dest = re.sub(r"_path$", "", self.dest)
weighted_split_paths_option = re.sub(
r"-path$", "", self.option_strings[0]
)
setattr(args, weighted_split_paths_dest, values)
parse_data_paths(
option_strings=[weighted_split_paths_option],
dest=weighted_split_paths_dest,
)(parser, args, values, option_string=weighted_split_paths_option)
group.add_argument(
"--train-weighted-split-paths-path",
type=str,
action=parse_data_paths_path,
default=None,
)
group.add_argument(
"--valid-weighted-split-paths-path",
type=str,
action=parse_data_paths_path,
default=None,
)
group.add_argument(
"--test-weighted-split-paths-path",
type=str,
action=parse_data_paths_path,
default=None,
)
group.add_argument(
"--log-path", type=str, default=None, help="Path to the save arguments file."
)
group.add_argument(
"--vocab-file", type=str, default=None, help="Path to the vocab file."
)
group.add_argument(
"--merge-file", type=str, default=None, help="Path to the BPE merge file."
)
group.add_argument(
"--vocab-extra-ids",
type=int,
default=0,
help=(
"Number of additional vocabulary tokens. "
"They are used for span masking in the T5 model"
),
)
group.add_argument(
"--seq-length",
type=int,
default=None,
help="Maximum sequence length to process.",
)
group.add_argument(
"--encoder-seq-length",
type=int,
default=None,
help=(
"Maximum encoder sequence length to process."
"This should be exclusive of --seq-length"
),
)
group.add_argument(
"--decoder-seq-length",
type=int,
default=None,
help="Maximum decoder sequence length to process.",
)
group.add_argument(
"--retriever-seq-length",
type=int,
default=256,
help="Maximum sequence length for the biencoder model for retriever",
)
group.add_argument(
"--sample-rate",
type=float,
default=1.0,
help="sample rate for training data. Supposed to be 0 < sample_rate < 1",
)
group.add_argument(
"--mask-prob",
type=float,
default=0.15,
help="Probability of replacing a token with mask.",
)
group.add_argument(
"--short-seq-prob",
type=float,
default=0.1,
help="Probability of producing a short sequence.",
)
group.add_argument("--mmap-warmup", action="store_true", help="Warm up mmap files.")
group.add_argument(
"--num-workers", type=int, default=2, help="Dataloader number of workers."
)
group.add_argument(
"--valid-num-workers",
type=int,
default=2,
help="Dataloader number of workers for validation.",
)
group.add_argument(
"--tokenizer-type",
type=str,
default=None,
choices=[
"BertWordPieceLowerCase",
"BertWordPieceCase",
"GPT2BPETokenizer",
"PretrainedFromHF",
],
help="What type of tokenizer to use.",
)
group.add_argument(
"--tokenizer-name-or-path",
type=str,
default=None,
help="Name or path of the huggingface tokenizer.",
)
group.add_argument(
"--data-impl",
type=str,
default="infer",
choices=["lazy", "cached", "mmap", "infer"],
help="Implementation of indexed datasets.",
)
group.add_argument(
"--reset-position-ids",
action="store_true",
help="Reset posistion ids after end-of-document token.",
)
group.add_argument(
"--reset-attention-mask",
action="store_true",
help=(
"Reset self attention maske after end-of-document token. Attention between"
" tokens from different documents is null."
),
)
group.add_argument(
"--eod-mask-loss",
action="store_true",
help="Mask loss for the end of document tokens.",
)
group.add_argument(
"--loss-on-targets-only",
action="store_true",
help="Mask loss on input sequence.",
)
group.add_argument(
"--reweight-loss-based-on-position-frequency",
action="store_true",
help=(
"Some objectives require us to sample loss_mask. This might introduce bias"
" towards specific positions. This option tries to un-bias the loss by"
" reweighting loss on specific positions based on how frequently we train"
" on that position.This is mostly used for prefix_lm training"
),
)
group.add_argument(
"--noise-density",
type=float,
default=None,
help="Span corruption noise density",
)
group.add_argument(
"--mean-noise-span-length",
type=int,
default=None,
help="Span corruption mean noise span length",
)
return parser
def _add_training_args(parser):
group = parser.add_argument_group(title="training")
group.add_argument(
"--micro-batch-size",
type=int,
default=None,
help=(
"Batch size per model instance (local batch size). "
"Global batch size is local batch size times data "
"parallel size times number of micro batches."
),
)
group.add_argument(
"--batch-size",
type=int,
default=None,
help="Old batch size parameter, do not use. Use --micro-batch-size instead",
)
group.add_argument(
"--global-batch-size",
type=int,
default=None,
help=(
"Training batch size. If set, it should be a "
"multiple of micro-batch-size times data-parallel-size. "
"If this value is None, then "
"use micro-batch-size * data-parallel-size as the "
"global batch size. This choice will result in 1 for "
"number of micro-batches."
),
)
group.add_argument(
"--rampup-batch-size",
nargs="*",
default=None,
help=(
"Batch size ramp up with the following values:"
" --rampup-batch-size <start batch size> "
" <batch size increment> "
" <ramp-up samples> "
"For example: "
" --rampup-batch-size 16 8 300000 "
" --global-batch-size 1024 "
"will start with global batch size 16 and over "
" (1024 - 16) / 8 = 126 intervals will increase "
"the batch size linearly to 1024. In each interval "
"we will use approximately 300000 / 126 = 2380 samples."
),
)
group.add_argument(
"--checkpoint-activations",
action="store_true",
help=(
"Checkpoint activation to allow for training "
"with larger models, sequences, and batch sizes."
),
)
group.add_argument(
"--distribute-checkpointed-activations",
action="store_true",
help="If set, distribute checkpointed activations across model parallel group.",
)
group.add_argument(
"--checkpoint-num-layers",
type=int,
default=1,
help="chunk size (number of layers) for checkpointing.",
)
group.add_argument(
"--train-iters",
type=int,
default=None,
help=(
"Total number of iterations to train over all "
"training runs. Note that either train-iters or "
"train-samples should be provided."
),
)
group.add_argument(
"--train-samples",
type=int,
default=None,
help=(
"Total number of samples to train over all "
"training runs. Note that either train-iters or "
"train-samples should be provided."
),
)
group.add_argument(
"--train-tokens",
type=int,
default=None,
help="Total number of tokens to train over all training runs.",
)
group.add_argument(
"--log-interval", type=int, default=100, help="Report loss and timing interval."
)
group.add_argument(
"--exit-interval",
type=int,
default=None,
help="Exit the program after the iteration is divisible by this value.",
)
group.add_argument(
"--exit-duration-in-mins",
type=int,
default=None,
help="Exit the program after this many minutes.",
)
group.add_argument(
"--tensorboard-dir",
type=str,
default=None,
help="Write TensorBoard logs to this directory.",
)
group.add_argument(
"--no-masked-softmax-fusion",
action="store_false",
help="Disable fusion of query_key_value scaling, masking, and softmax.",
dest="masked_softmax_fusion",
)
group.add_argument(
"--no-bias-gelu-fusion",
action="store_false",
help="Disable bias and gelu fusion.",
dest="bias_gelu_fusion",
)
group.add_argument(
"--no-bias-dropout-fusion",
action="store_false",
help="Disable bias and dropout fusion.",
dest="bias_dropout_fusion",
)
group.add_argument(
"--optimizer",
type=str,
default="adam",
choices=["adam", "sgd"],
help="Optimizer function",
)
group.add_argument(
"--use-bnb-optimizer",
action="store_true",
help=(
"Use bitsandbytes optimizer for efficient training,"
"please refer https://github.com/facebookresearch/bitsandbytes."
),
dest="use_bnb_optimizer",
)
group.add_argument(
"--dataloader-type",
type=str,
default=None,
choices=["single", "cyclic"],
help="Single pass vs multiple pass data loader",
)
group.add_argument(
"--cpu-optimizer", action="store_true", help="Run optimizer on CPU"
)
group.add_argument(
"--cpu_torch_adam",
action="store_true",
help="Use Torch Adam as optimizer on CPU.",
)
group.add_argument(
"--codecarbon-dir",
type=str,
default=None,
help="Write CodeCarbon logs to this directory.",
)
group.add_argument(
"--eval-only",
type=bool,
required=False,
help=(
"If set to True, no train step will be performed."
"and only the evaluation on the `valid` and `test` sets "
"will be performed"
),
)
group.add_argument(
"--skip-train-iteration-range",
type=str,
nargs="+",
default=None,
help=(
"Iteration ranges to skip. The values are one or more dash-separated"
" ranges. e.g., 101-200 251-300."
),
)
group.add_argument(
"--inference",
action="store_true",
help=(
"Very basic inference mode: not allocating optim/lr - requires ZERO_STAGE=0"
),
)
group.add_argument(
"--abort-on-unmet-fused-kernel-constraints",
action="store_true",
help=(
"If set to True, the program will abort if the constraints for loading a"
" fused kernel aren't met"
),
)
group.add_argument(
"--pp-partition-method",
type=str,
default=None,
help=(
"Use to override the pipeline stages partitioning method. e.g.,"
" 'type:transformer|embedding'"
),
)
return parser
def _add_validation_args(parser):
group = parser.add_argument_group(title="validation")
group.add_argument(
"--eval-iters",
type=int,
default=100,
help="Number of iterations to run for evaluationvalidation/test for.",
)
group.add_argument(
"--eval-interval",
type=int,
default=1000,
help="Interval between running evaluation on validation set.",
)
return parser
def _add_initialization_args(parser):
group = parser.add_argument_group(title="initialization")
group.add_argument(
"--seed",
type=int,
default=1234,
help="Random seed used for python, numpy, pytorch, and cuda.",
)
group.add_argument(
"--init-method-std",
type=float,
default=0.02,
help=(
"Standard deviation of the zero mean normal "
"distribution used for weight initialization."
),
)
group.add_argument(
"--init-method-xavier-uniform",
action="store_true",
help="Enable Xavier uniform parameter initialization",
)
return parser
def _add_distributed_args(parser):
group = parser.add_argument_group(title="distributed")
group.add_argument(
"--tensor-model-parallel-size",
type=int,
default=1,
help="Degree of tensor model parallelism.",
)
group.add_argument(
"--student-tensor-model-parallel-size",
type=int,
default=1,
help="Degree of tensor model parallelism.",
)
group.add_argument(
"--pipeline-model-parallel-size",
type=int,
default=1,
help="Degree of pipeline model parallelism.",
)
group.add_argument(
"--student-pipeline-model-parallel-size",
type=int,
default=1,
help="Degree of pipeline model parallelism.",
)
group.add_argument(
"--model-parallel-size",
type=int,
default=None,
help=(
"Old model parallel argument, do not use. Use "
"--tensor-model-parallel-size instead."
),
)
group.add_argument(
"--num-layers-per-virtual-pipeline-stage",
type=int,
default=None,
help="Number of layers per virtual pipeline stage",
)
group.add_argument(
"--distributed-backend",
default="nccl",
choices=["nccl", "gloo"],
help="Which backend to use for distributed training.",
)
group.add_argument(
"--DDP-impl",
default="local",
choices=["local", "torch"],
help="which DistributedDataParallel implementation to use.",
)
group.add_argument(
"--use-contiguous-buffers-in-ddp",
action="store_true",
help=(
"If set, use contiguous buffer in DDP. Note that "
"this option only works woth local DDP."
),
)
group.add_argument(
"--no-scatter-gather-tensors-in-pipeline",
action="store_false",
help="Use scatter/gather to optimize communication of tensors in pipeline",
dest="scatter_gather_tensors_in_pipeline",
)
group.add_argument(
"--local_rank",
type=int,
default=None,
help="local rank passed from distributed launcher.",
)
group.add_argument(
"--lazy-mpu-init",
type=bool,
required=False,
help=(
"If set to True, initialize_megatron() "
"skips DDP initialization and returns function to "
"complete it instead.Also turns on "
"--use-cpu-initialization flag. This is for "
"external DDP manager."
),
)
group.add_argument(
"--use-cpu-initialization",
action="store_true",
default=None,
help="If set, affine parallel weights initialization uses CPU",
)
return parser
def _add_mixed_precision_args(parser):
group = parser.add_argument_group(title="mixed precision")
group.add_argument("--fp16", action="store_true", help="Run model in fp16 mode.")
group.add_argument(
"--bf16", action="store_true", help="Run model in bfloat16 mode."
)
return parser
| distill-bloom-deepspeed-main | distill_bloom/arguments/arguments.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
from setuptools import find_packages
extras = {}
extras["quality"] = ["black ~= 23.1", "ruff >= 0.0.241", "hf-doc-builder >= 0.3.0", "urllib3 < 2.0.0"]
extras["docs"] = []
extras["test_prod"] = ["pytest", "pytest-xdist", "pytest-subtests", "parameterized"]
extras["test_dev"] = ["datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm"]
extras["testing"] = extras["test_prod"] + extras["test_dev"]
extras["rich"] = ["rich"]
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard"]
extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"]
extras["sagemaker"] = [
"sagemaker", # boto3 is a required package in sagemaker
]
setup(
name="accelerate",
version="0.20.0.dev0",
description="Accelerate",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="deep learning",
license="Apache",
author="The HuggingFace team",
author_email="sylvain@huggingface.co",
url="https://github.com/huggingface/accelerate",
package_dir={"": "src"},
packages=find_packages("src"),
entry_points={
"console_scripts": [
"accelerate=accelerate.commands.accelerate_cli:main",
"accelerate-config=accelerate.commands.config:main",
"accelerate-launch=accelerate.commands.launch:main",
]
},
python_requires=">=3.7.0",
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.6.0"],
extras_require=extras,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
# Release checklist
# 1. Change the version in __init__.py and setup.py.
# 2. Commit these changes with the message: "Release: VERSION"
# 3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
# Push the tag to git: git push --tags origin main
# 4. Run the following commands in the top-level directory:
# python setup.py bdist_wheel
# python setup.py sdist
# 5. Upload the package to the pypi test server first:
# twine upload dist/* -r pypitest
# twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
# 6. Check that you can install it in a virtualenv by running:
# pip install -i https://testpypi.python.org/pypi accelerate
# accelerate env
# accelerate test
# 7. Upload the final version to actual pypi:
# twine upload dist/* -r pypi
# 8. Add release notes to the tag in github once everything is looking hunky-dory.
# 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
| accelerate-wip-main | setup.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate.big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from accelerate.hooks import remove_hook_from_submodules
from accelerate.test_utils import require_cuda, require_mps, require_multi_gpu, require_torch_min_version, slow
from accelerate.utils import offload_state_dict
class ModelForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 5)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
class ModelForTestTiedWeights(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(4, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 4)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
class BiggerModelForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.linear2 = nn.Linear(4, 5)
self.batchnorm = nn.BatchNorm1d(5)
self.linear3 = nn.Linear(5, 6)
self.linear4 = nn.Linear(6, 5)
def forward(self, x):
return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))
# To test preload_module_classes
class ModuleWithUnusedSubModules(nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
return x @ self.linear.weight.t() + self.linear.bias
class ModelWithUnusedSubModulesForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = ModuleWithUnusedSubModules(3, 4)
self.linear2 = ModuleWithUnusedSubModules(4, 5)
self.batchnorm = nn.BatchNorm1d(5)
self.linear3 = ModuleWithUnusedSubModules(5, 6)
self.linear4 = ModuleWithUnusedSubModules(6, 5)
def forward(self, x):
return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))
@require_torch_min_version(version="1.9.0")
class BigModelingTester(unittest.TestCase):
def test_init_empty_weights(self):
# base use
with init_empty_weights():
module = nn.Linear(4, 5)
self.assertEqual(module.weight.device, torch.device("meta"))
# base use with buffers, they are not touched
with init_empty_weights():
module = nn.BatchNorm1d(4)
self.assertEqual(module.weight.device, torch.device("meta"))
self.assertEqual(module.running_mean.device, torch.device("cpu"))
# Use with include_buffers=True
with init_empty_weights(include_buffers=True):
module = nn.BatchNorm1d(4)
self.assertEqual(module.weight.device, torch.device("meta"))
self.assertEqual(module.running_mean.device, torch.device("meta"))
# Double check we didn't break PyTorch
module = nn.BatchNorm1d(4)
self.assertEqual(module.weight.device, torch.device("cpu"))
self.assertEqual(module.running_mean.device, torch.device("cpu"))
def test_init_empty_weights_very_large_model(self):
# This is a 100 billion parameters model.
with init_empty_weights():
_ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
@require_cuda
def test_init_on_device_cuda(self):
device = torch.device("cuda:0")
with init_on_device(device):
model = nn.Linear(10, 10)
self.assertEqual(model.weight.device, device)
self.assertEqual(model.weight.device, device)
@require_mps
def test_init_on_device_mps(self):
device = torch.device("mps:0")
with init_on_device(device):
model = nn.Linear(10, 10)
self.assertEqual(model.weight.device, device)
self.assertEqual(model.weight.device, device)
def test_cpu_offload(self):
model = ModelForTest()
x = torch.randn(2, 3)
expected = model(x)
device = torch.device(0 if torch.cuda.is_available() else "cpu")
cpu_offload(model, execution_device=device)
output = model(x)
self.assertTrue(
torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}"
)
# Clean up for next test.
remove_hook_from_submodules(model)
cpu_offload(model, execution_device=device, offload_buffers=True)
output = model(x)
self.assertTrue(
torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}"
)
def test_cpu_offload_with_unused_submodules(self):
model = ModelWithUnusedSubModulesForTest()
x = torch.randn(2, 3)
expected = model(x)
device = torch.device(0 if torch.cuda.is_available() else "cpu")
cpu_offload(model, execution_device=device, preload_module_classes=["ModuleWithUnusedSubModules"])
output = model(x)
self.assertTrue(
torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}"
)
# Clean up for next test.
remove_hook_from_submodules(model)
cpu_offload(
model,
execution_device=device,
offload_buffers=True,
preload_module_classes=["ModuleWithUnusedSubModules"],
)
output = model(x)
self.assertTrue(
torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}"
)
@slow
@require_cuda
def test_cpu_offload_gpt2(self):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0)
gpt2 = AutoModelForCausalLM.from_pretrained("gpt2")
cpu_offload(gpt2, execution_device=0)
outputs = gpt2.generate(inputs["input_ids"])
self.assertEqual(
tokenizer.decode(outputs[0].tolist()),
"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo",
)
def test_disk_offload(self):
model = ModelForTest()
x = torch.randn(2, 3)
expected = model(x)
device = torch.device(0 if torch.cuda.is_available() else "cpu")
with TemporaryDirectory() as tmp_dir:
disk_offload(model, tmp_dir, execution_device=device)
output = model(x)
self.assertTrue(
torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}"
)
# Clean up for next test.
remove_hook_from_submodules(model)
with TemporaryDirectory() as tmp_dir:
disk_offload(model, tmp_dir, execution_device=device, offload_buffers=True)
output = model(x)
self.assertTrue(
torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}"
)
def test_disk_offload_with_unused_submodules(self):
model = ModelWithUnusedSubModulesForTest()
x = torch.randn(2, 3)
expected = model(x)
device = torch.device(0 if torch.cuda.is_available() else "cpu")
with TemporaryDirectory() as tmp_dir:
disk_offload(
model, tmp_dir, execution_device=device, preload_module_classes=["ModuleWithUnusedSubModules"]
)
output = model(x)
self.assertTrue(
torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}"
)
# Clean up for next test.
remove_hook_from_submodules(model)
with TemporaryDirectory() as tmp_dir:
disk_offload(
model,
tmp_dir,
execution_device=device,
offload_buffers=True,
preload_module_classes=["ModuleWithUnusedSubModules"],
)
output = model(x)
self.assertTrue(
torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}"
)
@slow
@require_cuda
def test_disk_offload_gpt2(self):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0)
gpt2 = AutoModelForCausalLM.from_pretrained("gpt2")
with TemporaryDirectory() as tmp_dir:
disk_offload(gpt2, tmp_dir, execution_device=0)
outputs = gpt2.generate(inputs["input_ids"])
self.assertEqual(
tokenizer.decode(outputs[0].tolist()),
"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo",
)
@require_cuda
def test_dispatch_model(self):
model = ModelForTest()
device_map = {"linear1": "disk", "batchnorm": "cpu", "linear2": 0}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
dispatch_model(model, device_map, offload_dir=tmp_dir)
output = model(x)
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
@require_cuda
def test_dispatch_model_tied_weights(self):
model = ModelForTestTiedWeights()
model.linear1.weight = model.linear2.weight
device_map = {"linear1": 0, "batchnorm": 0, "linear2": 0}
dispatch_model(model, device_map)
self.assertIs(model.linear2.weight, model.linear1.weight)
@require_multi_gpu
def test_dispatch_model_multi_gpu(self):
model = BiggerModelForTest()
device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 1}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
dispatch_model(model, device_map, offload_dir=tmp_dir)
output = model(x)
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
@slow
@require_multi_gpu
def test_dispatch_model_gpt2_on_two_gpus(self):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0)
gpt2 = AutoModelForCausalLM.from_pretrained("gpt2")
# Dispatch on GPUs 0 and 1
device_map = {
"transformer.wte": 0,
"transformer.wpe": 0,
"transformer.ln_f": 1,
"lm_head": 0,
}
for i in range(12):
device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1
gpt2 = dispatch_model(gpt2, device_map)
outputs = gpt2.generate(inputs["input_ids"])
self.assertEqual(
tokenizer.decode(outputs[0].tolist()),
"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo",
)
# Dispatch with a bit of CPU offload
gpt2 = AutoModelForCausalLM.from_pretrained("gpt2")
for i in range(4):
device_map[f"transformer.h.{i}"] = "cpu"
gpt2 = dispatch_model(gpt2, device_map)
outputs = gpt2.generate(inputs["input_ids"])
self.assertEqual(
tokenizer.decode(outputs[0].tolist()),
"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo",
)
# Dispatch with a bit of CPU and disk offload
gpt2 = AutoModelForCausalLM.from_pretrained("gpt2")
for i in range(2):
device_map[f"transformer.h.{i}"] = "disk"
with TemporaryDirectory() as tmp_dir:
state_dict = {
k: p for k, p in gpt2.state_dict().items() if "transformer.h.0" in k or "transformer.h.1" in k
}
offload_state_dict(tmp_dir, state_dict)
gpt2 = dispatch_model(gpt2, device_map, offload_dir=tmp_dir)
outputs = gpt2.generate(inputs["input_ids"])
self.assertEqual(
tokenizer.decode(outputs[0].tolist()),
"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo",
)
@require_cuda
def test_dispatch_model_with_unused_submodules(self):
model = ModelWithUnusedSubModulesForTest()
device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 0}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
dispatch_model(
model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"]
)
output = model(x)
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
@require_multi_gpu
def test_dispatch_model_with_unused_submodules_multi_gpu(self):
model = ModelWithUnusedSubModulesForTest()
device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 1}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
dispatch_model(
model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"]
)
output = model(x)
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
@require_cuda
def test_load_checkpoint_and_dispatch(self):
model = ModelForTest()
device_map = {"linear1": "cpu", "batchnorm": "cpu", "linear2": 0}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
checkpoint = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), checkpoint)
new_model = ModelForTest()
new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map)
# CPU-offloaded weights are on the meta device while waiting for the forward pass.
self.assertEqual(new_model.linear1.weight.device, torch.device("meta"))
self.assertEqual(new_model.linear2.weight.device, torch.device(0))
output = new_model(x)
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
@require_multi_gpu
def test_load_checkpoint_and_dispatch_multi_gpu(self):
model = BiggerModelForTest()
device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 1}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
checkpoint = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), checkpoint)
new_model = BiggerModelForTest()
new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map)
# CPU-offloaded weights are on the meta device while waiting for the forward pass.
self.assertEqual(new_model.linear1.weight.device, torch.device("meta"))
self.assertEqual(new_model.linear2.weight.device, torch.device("meta"))
self.assertEqual(new_model.linear3.weight.device, torch.device(0))
self.assertEqual(new_model.linear4.weight.device, torch.device(1))
output = new_model(x)
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
@require_cuda
def test_load_checkpoint_and_dispatch_with_unused_submodules(self):
model = ModelWithUnusedSubModulesForTest()
device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 0}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
checkpoint = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), checkpoint)
new_model = ModelWithUnusedSubModulesForTest()
new_model = load_checkpoint_and_dispatch(
new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"]
)
# CPU-offloaded weights are on the meta device while waiting for the forward pass.
self.assertEqual(new_model.linear1.linear.weight.device, torch.device("meta"))
self.assertEqual(new_model.linear2.linear.weight.device, torch.device("meta"))
self.assertEqual(new_model.linear3.linear.weight.device, torch.device(0))
self.assertEqual(new_model.linear4.linear.weight.device, torch.device(0))
output = new_model(x)
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
@require_multi_gpu
def test_load_checkpoint_and_dispatch_multi_gpu_with_unused_submodules(self):
model = ModelWithUnusedSubModulesForTest()
device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 1}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
checkpoint = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), checkpoint)
new_model = ModelWithUnusedSubModulesForTest()
new_model = load_checkpoint_and_dispatch(
new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"]
)
# CPU-offloaded weights are on the meta device while waiting for the forward pass.
self.assertEqual(new_model.linear1.linear.weight.device, torch.device("meta"))
self.assertEqual(new_model.linear2.linear.weight.device, torch.device("meta"))
self.assertEqual(new_model.linear3.linear.weight.device, torch.device(0))
self.assertEqual(new_model.linear4.linear.weight.device, torch.device(1))
output = new_model(x)
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
@require_cuda
def test_cpu_offload_with_hook(self):
model1 = torch.nn.Linear(4, 5)
model1, hook1 = cpu_offload_with_hook(model1)
self.assertEqual(model1.weight.device, torch.device("cpu"))
inputs = torch.randn(3, 4)
outputs = model1(inputs)
self.assertEqual(outputs.device, torch.device(0))
self.assertEqual(model1.weight.device, torch.device(0))
hook1.offload()
self.assertEqual(model1.weight.device, torch.device("cpu"))
model2 = torch.nn.Linear(5, 5)
model2, hook2 = cpu_offload_with_hook(model2, prev_module_hook=hook1)
self.assertEqual(model2.weight.device, torch.device("cpu"))
outputs = model1(inputs)
self.assertEqual(outputs.device, torch.device(0))
self.assertEqual(model1.weight.device, torch.device(0))
outputs = model2(outputs)
self.assertEqual(outputs.device, torch.device(0))
self.assertEqual(model1.weight.device, torch.device("cpu"))
self.assertEqual(model2.weight.device, torch.device(0))
hook2.offload()
self.assertEqual(model2.weight.device, torch.device("cpu"))
@slow
@require_multi_gpu
def test_dispatch_model_bnb(self):
"""Tests that `dispatch_model` quantizes int8 layers"""
from huggingface_hub import hf_hub_download
from transformers import AutoConfig, AutoModel
from transformers.utils.bitsandbytes import replace_8bit_linear
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
model = replace_8bit_linear(model, modules_to_not_convert=["lm_head"])
# TODO: @younesbelkada remove this block on the next `transformers` release
for p in model.parameters():
p.requires_grad = False
model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin")
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
# device_map="auto",
device_map="balanced",
)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)
self.assertTrue(model.h[-1].self_attention.query_key_value.weight.dtype == torch.int8)
self.assertTrue(model.h[-1].self_attention.query_key_value.weight.device.index == 1)
@slow
def test_dipatch_model_int8_simple(self):
"""Tests that `dispatch_model` quantizes int8 layers"""
from huggingface_hub import hf_hub_download
from transformers import AutoConfig, AutoModel
from transformers.utils.bitsandbytes import replace_8bit_linear
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
model = replace_8bit_linear(model, modules_to_not_convert=["lm_head"])
# TODO: @younesbelkada remove this block on the next `transformers` release
for p in model.parameters():
p.requires_grad = False
model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin")
# test with auto
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map="auto",
)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
model = replace_8bit_linear(model, modules_to_not_convert=["lm_head"])
for p in model.parameters():
p.requires_grad = False
# test with str device map
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map={"": torch.device("cuda:0")},
)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
model = replace_8bit_linear(model, modules_to_not_convert=["lm_head"])
# TODO: @younesbelkada remove this block on the next `transformers` release
for p in model.parameters():
p.requires_grad = False
# test with torch.device device map
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map={"": "cuda:0"},
)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)
@slow
@unittest.skip("Un-skip in the next transformers release")
def test_dipatch_model_fp4_simple(self):
"""Tests that `dispatch_model` quantizes fp4 layers"""
from huggingface_hub import hf_hub_download
from transformers import AutoConfig, AutoModel, BitsAndBytesConfig
from transformers.utils.bitsandbytes import replace_with_bnb_linear
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin")
# test with auto
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map="auto",
)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
# test with str device map
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map={"": torch.device("cuda:0")},
)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
# test with torch.device device map
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map={"": "cuda:0"},
)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8)
self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)
| accelerate-wip-main | tests/test_big_modeling.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class OptimizerTester(unittest.TestCase):
def test_accelerated_optimizer_pickling(self):
model = torch.nn.Linear(10, 10)
optimizer = torch.optim.SGD(model.parameters(), 0.1)
accelerator = Accelerator()
optimizer = accelerator.prepare(optimizer)
try:
pickle.loads(pickle.dumps(optimizer))
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}")
AcceleratorState._reset_state()
| accelerate-wip-main | tests/test_optimizer.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate.data_loader import (
BatchSamplerShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class RandomIterableDataset(IterableDataset):
# For testing, an iterable dataset of random length
def __init__(self, p_stop=0.01, max_length=1000):
self.p_stop = p_stop
self.max_length = max_length
def __iter__(self):
count = 0
stop = False
while not stop and count < self.max_length:
yield count
count += 1
stop = random.random() < self.p_stop
class DataLoaderTester(unittest.TestCase):
def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False, even_batches=True):
batch_sampler_shards = [
BatchSamplerShard(batch_sampler, 2, i, split_batches=split_batches, even_batches=even_batches)
for i in range(2)
]
batch_sampler_lists = [list(batch_sampler_shard) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(shard) for shard in batch_sampler_shards], [len(e) for e in expected])
self.assertListEqual(batch_sampler_lists, expected)
def test_batch_sampler_shards_with_no_splits(self):
# Check the shards when the dataset is a round multiple of total batch size.
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False)
expected = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected)
def test_batch_sampler_shards_with_splits(self):
# Check the shards when the dataset is a round multiple of batch size.
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is not a round multiple of batch size.
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False)
expected = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
def test_batch_sampler_shards_with_no_splits_no_even(self):
# Check the shards when the dataset is a round multiple of total batch size.
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False)
expected = [[[0, 1]], []]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
def test_batch_sampler_shards_with_splits_no_even(self):
# Check the shards when the dataset is a round multiple of batch size.
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
# Check the shards when the dataset is not a round multiple of batch size.
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False)
expected = [[[0, 1]], []]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
def test_batch_sampler_with_varying_batch_size(self):
batch_sampler = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, even_batches=False) for i in range(2)]
self.assertEqual(len(batch_sampler_shards[0]), 3)
self.assertEqual(len(batch_sampler_shards[1]), 2)
self.assertListEqual(list(batch_sampler_shards[0]), [[0, 1, 2], [5, 6, 7, 8], [12, 13]])
self.assertListEqual(list(batch_sampler_shards[1]), [[3, 4], [9, 10, 11]])
def check_iterable_dataset_shards(
self, dataset, seed, batch_size, drop_last=False, num_processes=2, split_batches=False
):
random.seed(seed)
reference = list(dataset)
iterable_dataset_shards = [
IterableDatasetShard(
dataset,
batch_size=batch_size,
drop_last=drop_last,
num_processes=num_processes,
process_index=i,
split_batches=split_batches,
)
for i in range(num_processes)
]
iterable_dataset_lists = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(seed)
iterable_dataset_lists.append(list(iterable_dataset_shard))
shard_batch_size = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
first_list = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(l), len(first_list))
self.assertTrue(len(l) % shard_batch_size == 0)
observed = []
for idx in range(0, len(first_list), shard_batch_size):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
def test_iterable_dataset_shard(self):
seed = 42
dataset = RandomIterableDataset()
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True)
# Edge case with a very small dataset
dataset = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True)
def test_skip_batch_sampler(self):
batch_sampler = BatchSampler(range(16), batch_size=4, drop_last=False)
new_batch_sampler = SkipBatchSampler(batch_sampler, 2)
self.assertListEqual(list(new_batch_sampler), [[8, 9, 10, 11], [12, 13, 14, 15]])
def test_skip_data_loader(self):
dataloader = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2)
self.assertListEqual([t.tolist() for t in dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]])
def test_skip_first_batches(self):
dataloader = DataLoader(list(range(16)), batch_size=4)
new_dataloader = skip_first_batches(dataloader, num_batches=2)
self.assertListEqual([t.tolist() for t in new_dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]])
| accelerate-wip-main | tests/test_data_loader.py |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class MockLaunchConfig(SageMakerConfig):
compute_environment = ComputeEnvironment.AMAZON_SAGEMAKER
fp16 = True
ec2_instance_type = "ml.p3.2xlarge"
iam_role_name = "accelerate_sagemaker_execution_role"
profile = "hf-sm"
region = "us-east-1"
num_machines = 1
base_job_name = "accelerate-sagemaker-1"
pytorch_version = "1.6"
transformers_version = "4.4"
training_script = "train.py"
success_training_script_args = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
fail_training_script_args = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class SageMakerLaunch(unittest.TestCase):
def test_args_convert(self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
converted_args = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args["model_name_or_path"], str)
assert isinstance(converted_args["do_train"], bool)
assert isinstance(converted_args["epochs"], int)
assert isinstance(converted_args["learning_rate"], float)
assert isinstance(converted_args["max_steps"], float)
with pytest.raises(ValueError):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
| accelerate-wip-main | tests/test_sagemaker.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import unittest
from collections import UserDict, namedtuple
import torch
from accelerate.test_utils.testing import require_cuda, require_torch_min_version
from accelerate.test_utils.training import RegressionModel
from accelerate.utils import (
convert_outputs_to_fp32,
extract_model_from_parallel,
find_device,
patch_environment,
recursively_apply,
send_to_device,
)
ExampleNamedTuple = namedtuple("ExampleNamedTuple", "a b c")
class UtilsTester(unittest.TestCase):
def test_send_to_device(self):
tensor = torch.randn(5, 2)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
result1 = send_to_device(tensor, device)
self.assertTrue(torch.equal(result1.cpu(), tensor))
result2 = send_to_device((tensor, [tensor, tensor], 1), device)
self.assertIsInstance(result2, tuple)
self.assertTrue(torch.equal(result2[0].cpu(), tensor))
self.assertIsInstance(result2[1], list)
self.assertTrue(torch.equal(result2[1][0].cpu(), tensor))
self.assertTrue(torch.equal(result2[1][1].cpu(), tensor))
self.assertEqual(result2[2], 1)
result2 = send_to_device({"a": tensor, "b": [tensor, tensor], "c": 1}, device)
self.assertIsInstance(result2, dict)
self.assertTrue(torch.equal(result2["a"].cpu(), tensor))
self.assertIsInstance(result2["b"], list)
self.assertTrue(torch.equal(result2["b"][0].cpu(), tensor))
self.assertTrue(torch.equal(result2["b"][1].cpu(), tensor))
self.assertEqual(result2["c"], 1)
result3 = send_to_device(ExampleNamedTuple(a=tensor, b=[tensor, tensor], c=1), device)
self.assertIsInstance(result3, ExampleNamedTuple)
self.assertTrue(torch.equal(result3.a.cpu(), tensor))
self.assertIsInstance(result3.b, list)
self.assertTrue(torch.equal(result3.b[0].cpu(), tensor))
self.assertTrue(torch.equal(result3.b[1].cpu(), tensor))
self.assertEqual(result3.c, 1)
result4 = send_to_device(UserDict({"a": tensor, "b": [tensor, tensor], "c": 1}), device)
self.assertIsInstance(result4, UserDict)
self.assertTrue(torch.equal(result4["a"].cpu(), tensor))
self.assertIsInstance(result4["b"], list)
self.assertTrue(torch.equal(result4["b"][0].cpu(), tensor))
self.assertTrue(torch.equal(result4["b"][1].cpu(), tensor))
self.assertEqual(result4["c"], 1)
def test_honor_type(self):
with self.assertRaises(TypeError) as cm:
_ = recursively_apply(torch.tensor, (torch.tensor(1), 1), error_on_other_type=True)
self.assertEqual(
str(cm.exception),
"Unsupported types (<class 'int'>) passed to `tensor`. Only nested list/tuple/dicts of objects that are valid for `is_torch_tensor` should be passed.",
)
def test_patch_environment(self):
with patch_environment(aa=1, BB=2):
self.assertEqual(os.environ.get("AA"), "1")
self.assertEqual(os.environ.get("BB"), "2")
self.assertNotIn("AA", os.environ)
self.assertNotIn("BB", os.environ)
def test_can_undo_convert_outputs(self):
model = RegressionModel()
model._original_forward = model.forward
model.forward = convert_outputs_to_fp32(model.forward)
model = extract_model_from_parallel(model, keep_fp32_wrapper=False)
_ = pickle.dumps(model)
@require_cuda
def test_can_undo_fp16_conversion(self):
model = RegressionModel()
model._original_forward = model.forward
model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)
model.forward = convert_outputs_to_fp32(model.forward)
model = extract_model_from_parallel(model, keep_fp32_wrapper=False)
_ = pickle.dumps(model)
@require_cuda
@require_torch_min_version(version="2.0")
def test_dynamo(self):
model = RegressionModel()
model._original_forward = model.forward
model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)
model.forward = convert_outputs_to_fp32(model.forward)
model.forward = torch.compile(model.forward, backend="inductor")
inputs = torch.randn(4, 10).cuda()
_ = model(inputs)
def test_extract_model(self):
model = RegressionModel()
# could also do a test with DistributedDataParallel, but difficult to run on CPU or single GPU
distributed_model = torch.nn.parallel.DataParallel(model)
model_unwrapped = extract_model_from_parallel(distributed_model)
self.assertEqual(model, model_unwrapped)
@require_torch_min_version(version="2.0")
def test_dynamo_extract_model(self):
model = RegressionModel()
compiled_model = torch.compile(model)
# could also do a test with DistributedDataParallel, but difficult to run on CPU or single GPU
distributed_model = torch.nn.parallel.DataParallel(model)
distributed_compiled_model = torch.compile(distributed_model)
compiled_model_unwrapped = extract_model_from_parallel(distributed_compiled_model)
self.assertEqual(compiled_model._orig_mod, compiled_model_unwrapped._orig_mod)
def test_find_device(self):
self.assertEqual(find_device([1, "a", torch.tensor([1, 2, 3])]), torch.device("cpu"))
self.assertEqual(find_device({"a": 1, "b": torch.tensor([1, 2, 3])}), torch.device("cpu"))
self.assertIsNone(find_device([1, "a"]))
| accelerate-wip-main | tests/test_utils.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class MultiCPUTester(unittest.TestCase):
def test_cpu(self):
debug_launcher(test_script.main)
def test_ops(self):
debug_launcher(test_ops.main)
| accelerate-wip-main | tests/test_cpu.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def raise_fake_out_of_memory():
raise RuntimeError("CUDA out of memory.")
class ModelForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 5)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
class MemoryTest(unittest.TestCase):
def test_memory_implicit(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(batch_size):
nonlocal batch_sizes
batch_sizes.append(batch_size)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])
def test_memory_explicit(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(batch_size, arg1):
nonlocal batch_sizes
batch_sizes.append(batch_size)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arg1
bs, arg1 = mock_training_loop_function("hello")
self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])
self.assertListEqual([bs, arg1], [8, "hello"])
def test_start_zero(self):
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(batch_size):
pass
with self.assertRaises(RuntimeError) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero.", cm.exception.args[0])
def test_approach_zero(self):
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(batch_size):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(RuntimeError) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero.", cm.exception.args[0])
def test_verbose_guard(self):
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(batch_size, arg1, arg2):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(TypeError) as cm:
mock_training_loop_function(128, "hello", "world")
self.assertIn("Batch size was passed into `f`", cm.exception.args[0])
self.assertIn("`f(arg1='hello', arg2='world')", cm.exception.args[0])
def test_any_other_error(self):
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(batch_size):
raise ValueError("Oops, we had an error!")
with self.assertRaises(ValueError) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!", cm.exception.args[0])
@require_cuda
def test_release_memory(self):
starting_memory = torch.cuda.memory_allocated()
model = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated(), starting_memory)
model = release_memory(model)
self.assertEqual(torch.cuda.memory_allocated(), starting_memory)
| accelerate-wip-main | tests/test_memory_utils.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
require_torch_min_version,
)
from accelerate.utils import get_launch_prefix, patch_environment
@require_huggingface_suite
@require_torch_min_version(version="1.8.0")
class MetricTester(unittest.TestCase):
def setUp(self):
mod_file = inspect.getfile(accelerate.test_utils)
self.test_file_path = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"]
)
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
self.test_metrics = test_metrics
@require_cpu
def test_metric_cpu_noop(self):
debug_launcher(self.test_metrics.main, num_processes=1)
@require_cpu
def test_metric_cpu_multi(self):
debug_launcher(self.test_metrics.main)
@require_single_gpu
def test_metric_gpu(self):
self.test_metrics.main()
@require_multi_gpu
def test_metric_gpu_multi(self):
print(f"Found {torch.cuda.device_count()} devices.")
cmd = get_launch_prefix() + [f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd, env=os.environ.copy())
| accelerate-wip-main | tests/test_metrics.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler, get_launch_prefix
@dataclass
class MockClass(KwargsHandler):
a: int = 0
b: bool = False
c: float = 3.0
class DataLoaderTester(unittest.TestCase):
def test_kwargs_handler(self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs(), {})
self.assertDictEqual(MockClass(a=2).to_kwargs(), {"a": 2})
self.assertDictEqual(MockClass(a=2, b=True).to_kwargs(), {"a": 2, "b": True})
self.assertDictEqual(MockClass(a=2, c=2.25).to_kwargs(), {"a": 2, "c": 2.25})
@require_cuda
def test_grad_scaler_kwargs(self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
scaler_handler = GradScalerKwargs(init_scale=1024, growth_factor=2)
AcceleratorState._reset_state()
accelerator = Accelerator(mixed_precision="fp16", kwargs_handlers=[scaler_handler])
print(accelerator.use_fp16)
scaler = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale, 1024.0)
self.assertEqual(scaler._growth_factor, 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor, 0.5)
self.assertEqual(scaler._growth_interval, 2000)
self.assertEqual(scaler._enabled, True)
@require_multi_gpu
def test_ddp_kwargs(self):
cmd = get_launch_prefix()
cmd += [f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
execute_subprocess_async(cmd, env=os.environ.copy())
if __name__ == "__main__":
ddp_scaler = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
accelerator = Accelerator(kwargs_handlers=[ddp_scaler])
model = torch.nn.Linear(100, 200)
model = accelerator.prepare(model)
# Check the values changed in kwargs
error_msg = ""
observed_bucket_cap_map = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| accelerate-wip-main | tests/test_kwargs_handlers.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import partial
import torch
from accelerate import Accelerator, debug_launcher
from accelerate.state import AcceleratorState, GradientState
from accelerate.test_utils import require_cpu, require_huggingface_suite
from accelerate.utils import GradientAccumulationPlugin
def one_cycle_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):
accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)
model = torch.nn.Linear(2, 4)
optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1)
model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
# Optimizer has stepped
scheduler.step()
if step_scheduler_with_optimizer or (num_processes == 1):
assert (
scheduler.scheduler.last_epoch == num_processes
), f"Last Epoch ({scheduler.scheduler.last_epoch}) != Num Processes ({num_processes})"
else:
assert (
scheduler.scheduler.last_epoch != num_processes
), f"Last Epoch ({scheduler.scheduler.last_epoch}) == Num Processes ({num_processes})"
def lambda_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):
accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)
model = torch.nn.Linear(2, 4)
optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda n: 1 - n / 10)
model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
# Optimizer has stepped
optimizer._is_overflow = False
scheduler.step()
expected_lr = 1 - (num_processes if (step_scheduler_with_optimizer and not split_batches) else 1) / 10
assert (
scheduler.get_last_lr()[0] == expected_lr
), f"Wrong lr found at first step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}"
# Optimizer has not stepped
optimizer._is_overflow = True
scheduler.step()
if not step_scheduler_with_optimizer:
expected_lr = 1 - 2 / 10
assert (
scheduler.get_last_lr()[0] == expected_lr
), f"Wrong lr found at second step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}"
def accumulation_test(num_processes: int = 2):
"""
With this test, an observed batch size of 64 should result in neglible
differences in the scheduler after going through the correct number of steps.
Uses single, two, and four steps to test.
"""
from transformers import get_linear_schedule_with_warmup
steps = [1, 2, 4]
for num_steps in steps:
plugin = GradientAccumulationPlugin(num_steps=num_steps, adjust_scheduler=num_steps > 1)
accelerator = Accelerator(gradient_accumulation_plugin=plugin)
model = torch.nn.Linear(2, 4)
optimizer = torch.optim.AdamW(model.parameters(), lr=10.0)
scheduler = get_linear_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=0, num_training_steps=20)
model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
for i in range(10 * num_steps):
with accelerator.accumulate(model):
optimizer.step()
scheduler.step()
if i == (10 * num_steps - 2):
assert (
scheduler.get_last_lr()[0] != 0
), f"Wrong lr found at second-to-last step, expected non-zero, got {scheduler.get_last_lr()[0]}. num_steps: {num_steps}"
assert (
scheduler.get_last_lr()[0] == 0
), f"Wrong lr found at last step, expected 0, got {scheduler.get_last_lr()[0]}"
GradientState._reset_state()
@require_cpu
class SchedulerTester(unittest.TestCase):
def test_lambda_scheduler_steps_with_optimizer_single_process(self):
debug_launcher(partial(lambda_test, num_processes=1), num_processes=1)
debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1)
def test_one_cycle_scheduler_steps_with_optimizer_single_process(self):
debug_launcher(partial(one_cycle_test, num_processes=1), num_processes=1)
debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1)
def test_lambda_scheduler_not_step_with_optimizer_single_process(self):
debug_launcher(partial(lambda_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)
def test_one_cycle_scheduler_not_step_with_optimizer_single_process(self):
debug_launcher(partial(one_cycle_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)
def test_lambda_scheduler_steps_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(lambda_test)
debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1)
def test_one_cycle_scheduler_steps_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(one_cycle_test)
debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1)
def test_lambda_scheduler_not_step_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(partial(lambda_test, step_scheduler_with_optimizer=False))
def test_one_cycle_scheduler_not_step_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(partial(one_cycle_test, step_scheduler_with_optimizer=False))
@require_huggingface_suite
def test_accumulation(self):
AcceleratorState._reset_state(True)
debug_launcher(partial(accumulation_test, num_processes=1))
debug_launcher(accumulation_test)
| accelerate-wip-main | tests/test_scheduler.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import unittest
from collections import OrderedDict
import torch
import torch.nn as nn
from accelerate import init_empty_weights
from accelerate.test_utils import require_cuda, require_huggingface_suite, require_multi_gpu, require_safetensors
from accelerate.test_utils.testing import require_torch_min_version
from accelerate.utils.modeling import (
check_device_map,
clean_device_map,
compute_module_sizes,
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
)
class ModelForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 5)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
def sequential_model(num_layers):
layers = OrderedDict([(f"linear{i}", nn.Linear(1000, 1000)) for i in range(1, num_layers + 1)])
return nn.Sequential(layers)
@require_torch_min_version(version="1.9.0")
class ModelingUtilsTester(unittest.TestCase):
def check_set_module_tensor_for_device(self, model, device1, device2):
self.assertEqual(model.linear1.weight.device, torch.device(device1))
with self.subTest("Access by submodule and direct name for a parameter"):
set_module_tensor_to_device(model.linear1, "weight", device2)
self.assertEqual(model.linear1.weight.device, torch.device(device2))
if torch.device(device2) == torch.device("meta"):
with self.assertRaises(ValueError):
# We need a `value` to set the weight back on device1
set_module_tensor_to_device(model.linear1, "weight", device1)
set_module_tensor_to_device(model.linear1, "weight", device1, value=torch.randn(4, 3))
else:
set_module_tensor_to_device(model.linear1, "weight", device1)
self.assertEqual(model.linear1.weight.device, torch.device(device1))
with self.subTest("Access by module and full name for a parameter"):
set_module_tensor_to_device(model, "linear1.weight", device2)
self.assertEqual(model.linear1.weight.device, torch.device(device2))
if torch.device(device2) == torch.device("meta"):
with self.assertRaises(ValueError):
# We need a `value` to set the weight back on device1
set_module_tensor_to_device(model, "linear1.weight", device1)
set_module_tensor_to_device(model, "linear1.weight", device1, value=torch.randn(4, 3))
else:
set_module_tensor_to_device(model, "linear1.weight", device1)
self.assertEqual(model.linear1.weight.device, torch.device(device1))
self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1))
with self.subTest("Access by submodule and direct name for a buffer"):
set_module_tensor_to_device(model.batchnorm, "running_mean", device2)
self.assertEqual(model.batchnorm.running_mean.device, torch.device(device2))
if torch.device(device2) == torch.device("meta"):
with self.assertRaises(ValueError):
# We need a `value` to set the weight back on device1
set_module_tensor_to_device(model.batchnorm, "running_mean", device1)
set_module_tensor_to_device(model.batchnorm, "running_mean", device1, value=torch.randn(4))
else:
set_module_tensor_to_device(model.batchnorm, "running_mean", device1)
self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1))
with self.subTest("Access by module and full name for a parameter"):
set_module_tensor_to_device(model, "batchnorm.running_mean", device2)
self.assertEqual(model.batchnorm.running_mean.device, torch.device(device2))
if torch.device(device2) == torch.device("meta"):
with self.assertRaises(ValueError):
# We need a `value` to set the weight back on CPU
set_module_tensor_to_device(model, "batchnorm.running_mean", device1)
set_module_tensor_to_device(model, "batchnorm.running_mean", device1, value=torch.randn(4))
else:
set_module_tensor_to_device(model, "batchnorm.running_mean", device1)
self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1))
def test_set_module_tensor_to_meta_and_cpu(self):
model = ModelForTest()
self.check_set_module_tensor_for_device(model, "cpu", "meta")
@require_cuda
def test_set_module_tensor_to_cpu_and_gpu(self):
model = ModelForTest()
self.check_set_module_tensor_for_device(model, "cpu", 0)
@require_cuda
def test_set_module_tensor_to_meta_and_gpu(self):
model = ModelForTest().to(0)
self.check_set_module_tensor_for_device(model, 0, "meta")
@require_multi_gpu
def test_set_module_tensor_between_gpus(self):
model = ModelForTest().to(0)
self.check_set_module_tensor_for_device(model, 0, 1)
def test_set_module_tensor_sets_dtype(self):
model = ModelForTest()
set_module_tensor_to_device(model, "linear1.weight", "cpu", value=model.linear1.weight, dtype=torch.float16)
self.assertEqual(model.linear1.weight.dtype, torch.float16)
def test_named_tensors(self):
model = nn.BatchNorm1d(4)
named_tensors = named_module_tensors(model)
self.assertListEqual(
[name for name, _ in named_tensors],
["weight", "bias", "running_mean", "running_var", "num_batches_tracked"],
)
named_tensors = named_module_tensors(model, include_buffers=False)
self.assertListEqual([name for name, _ in named_tensors], ["weight", "bias"])
model = ModelForTest()
named_tensors = named_module_tensors(model)
self.assertListEqual([name for name, _ in named_tensors], [])
named_tensors = named_module_tensors(model, recurse=True)
self.assertListEqual(
[name for name, _ in named_tensors],
[
"linear1.weight",
"linear1.bias",
"batchnorm.weight",
"batchnorm.bias",
"linear2.weight",
"linear2.bias",
"batchnorm.running_mean",
"batchnorm.running_var",
"batchnorm.num_batches_tracked",
],
)
named_tensors = named_module_tensors(model, include_buffers=False, recurse=True)
self.assertListEqual(
[name for name, _ in named_tensors],
["linear1.weight", "linear1.bias", "batchnorm.weight", "batchnorm.bias", "linear2.weight", "linear2.bias"],
)
def test_find_tied_parameters(self):
model = sequential_model(4)
self.assertListEqual(find_tied_parameters(model), [])
model.linear2.weight = model.linear1.weight
self.assertListEqual(find_tied_parameters(model), [["linear1.weight", "linear2.weight"]])
model.linear4.weight = model.linear1.weight
self.assertListEqual(find_tied_parameters(model), [["linear1.weight", "linear2.weight", "linear4.weight"]])
model = sequential_model(5)
model.linear1.weight = model.linear4.weight
model.linear2.weight = model.linear3.weight
model.linear5.weight = model.linear2.weight
tied_params = sorted(find_tied_parameters(model), key=lambda x: len(x))
self.assertListEqual(
tied_params, [["linear1.weight", "linear4.weight"], ["linear2.weight", "linear3.weight", "linear5.weight"]]
)
model = nn.Sequential(OrderedDict([("block1", sequential_model(4)), ("block2", sequential_model(4))]))
model.block1.linear1.weight = model.block2.linear1.weight
self.assertListEqual(find_tied_parameters(model), [["block1.linear1.weight", "block2.linear1.weight"]])
def test_retie_parameters(self):
model = sequential_model(2)
retie_parameters(model, [["linear1.weight", "linear2.weight"]])
self.assertIs(model.linear1.weight, model.linear2.weight)
model = sequential_model(3)
retie_parameters(model, [["linear1.weight", "linear2.weight", "linear3.weight"]])
self.assertIs(model.linear1.weight, model.linear2.weight)
self.assertIs(model.linear1.weight, model.linear3.weight)
model = sequential_model(5)
retie_parameters(
model, [["linear1.weight", "linear4.weight"], ["linear2.weight", "linear3.weight", "linear5.weight"]]
)
self.assertIs(model.linear1.weight, model.linear4.weight)
self.assertIs(model.linear2.weight, model.linear3.weight)
self.assertIs(model.linear2.weight, model.linear5.weight)
model = nn.Sequential(OrderedDict([("block1", sequential_model(4)), ("block2", sequential_model(4))]))
retie_parameters(model, [["block1.linear1.weight", "block2.linear1.weight"]])
self.assertIs(model.block1.linear1.weight, model.block2.linear1.weight)
def test_compute_module_sizes(self):
model = ModelForTest()
expected_sizes = {"": 236, "linear1": 64, "linear1.weight": 48, "linear1.bias": 16}
expected_sizes.update({"linear2": 100, "linear2.weight": 80, "linear2.bias": 20})
expected_sizes.update({"batchnorm": 72, "batchnorm.weight": 16, "batchnorm.bias": 16})
expected_sizes.update(
{"batchnorm.running_mean": 16, "batchnorm.running_var": 16, "batchnorm.num_batches_tracked": 8}
)
module_sizes = compute_module_sizes(model)
self.assertDictEqual(module_sizes, expected_sizes)
model.half()
expected_sizes = {k: s // 2 for k, s in expected_sizes.items()}
# This one is not converted to half.
expected_sizes["batchnorm.num_batches_tracked"] = 8
# This impacts batchnorm and total
expected_sizes["batchnorm"] += 4
expected_sizes[""] += 4
module_sizes = compute_module_sizes(model)
self.assertDictEqual(module_sizes, expected_sizes)
def test_check_device_map(self):
model = ModelForTest()
check_device_map(model, {"": 0})
with self.assertRaises(ValueError):
check_device_map(model, {"linear1": 0, "linear2": 1})
check_device_map(model, {"linear1": 0, "linear2": 1, "batchnorm": 1})
def shard_test_model(self, model, tmp_dir):
module_index = {
"linear1": "checkpoint_part1.bin",
"batchnorm": "checkpoint_part2.bin",
"linear2": "checkpoint_part3.bin",
}
index = {}
for name, _ in model.state_dict().items():
module = name.split(".")[0]
index[name] = module_index[module]
with open(os.path.join(tmp_dir, "weight_map.index.json"), "w") as f:
json.dump(index, f)
for module, fname in module_index.items():
state_dict = {k: v for k, v in model.state_dict().items() if k.startswith(module)}
full_fname = os.path.join(tmp_dir, fname)
torch.save(state_dict, full_fname)
def test_load_checkpoint_in_model(self):
# Check with whole checkpoint
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
fname = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), fname)
load_checkpoint_in_model(model, fname)
# Check with sharded index
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
self.shard_test_model(model, tmp_dir)
index_file = os.path.join(tmp_dir, "weight_map.index.json")
load_checkpoint_in_model(model, index_file)
# Check with sharded checkpoint
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
self.shard_test_model(model, tmp_dir)
load_checkpoint_in_model(model, tmp_dir)
@require_cuda
def test_load_checkpoint_in_model_one_gpu(self):
device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": "cpu"}
# Check with whole checkpoint
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
fname = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), fname)
load_checkpoint_in_model(model, fname, device_map=device_map)
self.assertEqual(model.linear1.weight.device, torch.device(0))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
# Check with sharded index
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
self.shard_test_model(model, tmp_dir)
index_file = os.path.join(tmp_dir, "weight_map.index.json")
load_checkpoint_in_model(model, index_file, device_map=device_map)
self.assertEqual(model.linear1.weight.device, torch.device(0))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
# Check with sharded checkpoint folder
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
self.shard_test_model(model, tmp_dir)
load_checkpoint_in_model(model, tmp_dir, device_map=device_map)
self.assertEqual(model.linear1.weight.device, torch.device(0))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
@require_cuda
def test_load_checkpoint_in_model_disk_offload(self):
device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "cpu"}
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
fname = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), fname)
load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir)
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("meta"))
# Buffers are not offloaded by default
self.assertEqual(model.batchnorm.running_mean.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
fname = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), fname)
load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir, offload_buffers=True)
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("meta"))
self.assertEqual(model.batchnorm.running_mean.device, torch.device("meta"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
@require_multi_gpu
def test_load_checkpoint_in_model_two_gpu(self):
device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 1}
# Check with whole checkpoint
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
fname = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), fname)
load_checkpoint_in_model(model, fname, device_map=device_map)
self.assertEqual(model.linear1.weight.device, torch.device(0))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device(1))
# Check with sharded index
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
self.shard_test_model(model, tmp_dir)
index_file = os.path.join(tmp_dir, "weight_map.index.json")
load_checkpoint_in_model(model, index_file, device_map=device_map)
self.assertEqual(model.linear1.weight.device, torch.device(0))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device(1))
# Check with sharded checkpoint
model = ModelForTest()
with tempfile.TemporaryDirectory() as tmp_dir:
self.shard_test_model(model, tmp_dir)
load_checkpoint_in_model(model, tmp_dir, device_map=device_map)
self.assertEqual(model.linear1.weight.device, torch.device(0))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device(1))
def test_clean_device_map(self):
# Regroup everything if all is on the same device
self.assertDictEqual(clean_device_map({"a": 0, "b": 0, "c": 0}), {"": 0})
# Regroups children of level 1 on the same device
self.assertDictEqual(
clean_device_map({"a.x": 0, "a.y": 0, "b.x": 1, "b.y": 1, "c": 1}), {"a": 0, "b": 1, "c": 1}
)
# Regroups children of level 2 on the same device
self.assertDictEqual(
clean_device_map({"a.x": 0, "a.y": 0, "b.x.0": 1, "b.x.1": 1, "b.y.0": 2, "b.y.1": 2, "c": 2}),
{"a": 0, "b.x": 1, "b.y": 2, "c": 2},
)
def test_infer_auto_device_map(self):
model = ModelForTest()
# model has size 236: linear1 64, batchnorm 72, linear2 100
device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200})
# only linear1 fits on device 0 as we keep memory available for the maximum layer in case of offload
self.assertDictEqual(device_map, {"linear1": 0, "batchnorm": 1, "linear2": 1})
device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 172, 2: 200})
# On device 1, we don't care about keeping size available for the max layer, so even if there is just the
# size available for batchnorm + linear2, they fit here.
self.assertDictEqual(device_map, {"linear1": 0, "batchnorm": 1, "linear2": 1})
model.linear1.weight = model.linear2.weight
device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200})
# By tying weights, the whole model fits on device 0
self.assertDictEqual(device_map, {"": 0})
# When splitting a bigger model, the split is done at the layer level
model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest())
device_map = infer_auto_device_map(model, max_memory={0: 500, 1: 500})
self.assertDictEqual(device_map, {"0": 0, "1.linear1": 0, "1.batchnorm": 0, "1.linear2": 1, "2": 1})
# With no_split_module_classes, it's done at that module level
model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest())
device_map = infer_auto_device_map(
model, max_memory={0: 500, 1: 500}, no_split_module_classes=["ModelForTest"]
)
self.assertDictEqual(device_map, {"0": 0, "1": 1, "2": 1})
def test_infer_auto_device_map_with_tied_weights(self):
model = nn.Sequential(
OrderedDict([("layer1", ModelForTest()), ("layer2", ModelForTest()), ("layer3", ModelForTest())])
)
model.layer3.linear2.weight = model.layer1.linear2.weight
device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500})
expected = {"layer1": 0, "layer3.linear2": 0, "layer2": 1, "layer3.linear1": 1, "layer3.batchnorm": 1}
self.assertDictEqual(device_map, expected)
# With three weights tied together
model.layer2.linear2.weight = model.layer1.linear2.weight
device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500})
expected = {
"layer1": 0,
"layer2.linear2": 0,
"layer3.linear2": 0,
"layer2.linear1": 1,
"layer2.batchnorm": 1,
"layer3.linear1": 1,
"layer3.batchnorm": 1,
}
self.assertDictEqual(device_map, expected)
# With two groups of weights tied together
model.layer2.linear1.weight = model.layer1.linear1.weight
device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500})
expected = {
"layer1": 0,
"layer2.linear1": 0,
"layer2.linear2": 0,
"layer3.linear2": 0,
"layer2.batchnorm": 1,
"layer3.linear1": 1,
"layer3.batchnorm": 1,
}
self.assertDictEqual(device_map, expected)
@require_huggingface_suite
def test_infer_auto_device_map_on_t0pp(self):
from transformers import AutoConfig, AutoModelForSeq2SeqLM
config = AutoConfig.from_pretrained("bigscience/T0pp")
with init_empty_weights():
model = AutoModelForSeq2SeqLM.from_config(config)
model.tie_weights()
special_dtypes = {n: torch.float32 for n, _ in model.named_parameters() if "wo" in n}
max_memory = {0: 10**10, 1: 10**10, "cpu": 10**10}
device_map = infer_auto_device_map(
model,
no_split_module_classes=["T5Block"],
dtype=torch.float16,
max_memory=max_memory,
special_dtypes=special_dtypes,
)
# The 3 tied weights should all be on device 0
self.assertEqual(device_map["shared"], 0)
self.assertEqual(device_map["encoder.embed_tokens"], 0)
self.assertEqual(device_map["decoder.embed_tokens"], 0)
@require_cuda
def test_get_balanced_memory(self):
model = ModelForTest()
# model has size 236: linear1 64, batchnorm 72, linear2 100
max_memory = get_balanced_memory(model, max_memory={0: 200, 1: 200})
self.assertDictEqual({0: 200, 1: 200}, max_memory)
max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 300})
self.assertDictEqual({0: 215, 1: 300}, max_memory)
# Last device always get max memory to give more buffer and avoid accidental CPU offload
max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500})
self.assertDictEqual({0: 215, 1: 500}, max_memory)
# Last device always get max memory to give more buffer, even if CPU is provided
max_memory = get_balanced_memory(model, max_memory={0: 300, "cpu": 1000})
self.assertDictEqual({0: 300, "cpu": 1000}, max_memory)
# If we set a device to 0, it's not counted.
max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300})
self.assertDictEqual({0: 0, 1: 215, 2: 300}, max_memory)
@require_cuda
@require_safetensors
def test_load_state_dict(self):
from safetensors.torch import save_file
state_dict = {k: torch.randn(4, 5) for k in ["a", "b", "c"]}
device_maps = [{"a": "cpu", "b": 0, "c": "disk"}, {"a": 0, "b": 0, "c": "disk"}, {"a": 0, "b": 0, "c": 0}]
for device_map in device_maps:
with tempfile.TemporaryDirectory() as tmp_dir:
checkpoint_file = os.path.join(tmp_dir, "model.safetensors")
save_file(state_dict, checkpoint_file, metadata={"format": "pt"})
loaded_state_dict = load_state_dict(checkpoint_file, device_map=device_map)
for param, device in device_map.items():
device = device if device != "disk" else "cpu"
self.assertEqual(loaded_state_dict[param].device, torch.device(device))
| accelerate-wip-main | tests/test_modeling_utils.py |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def create_components():
model = torch.nn.Linear(2, 4)
optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1)
train_dl = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))
valid_dl = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))
return model, optimizer, scheduler, train_dl, valid_dl
def get_signature(model):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def load_random_weights(model):
state = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict()
model.load_state_dict(state)
class AcceleratorTester(AccelerateTestCase):
@require_cuda
def test_accelerator_can_be_reinstantiated(self):
_ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(ValueError):
_ = Accelerator(cpu=True)
def test_mutable_states(self):
accelerator = Accelerator()
state = GradientState()
assert state.num_steps == 1
accelerator.gradient_accumulation_steps = 4
assert state.num_steps == 4
assert state.sync_gradients is True
accelerator.sync_gradients = False
assert state.sync_gradients is False
GradientState._reset_state()
def test_prepared_objects_are_referenced(self):
accelerator = Accelerator()
model, optimizer, scheduler, train_dl, valid_dl = create_components()
(
prepared_model,
prepared_optimizer,
prepared_scheduler,
prepared_train_dl,
prepared_valid_dl,
) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def test_free_memory_dereferences_prepared_components(self):
accelerator = Accelerator()
model, optimizer, scheduler, train_dl, valid_dl = create_components()
accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def test_env_var_device(self):
"""Tests that setting the torch device with ACCELERATE_TORCH_DEVICE overrides default device."""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*args, **kwargs):
pass
with patch("torch.cuda.set_device", noop), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64"):
accelerator = Accelerator()
self.assertEqual(str(accelerator.state.device), "cuda:64")
def test_save_load_model(self):
accelerator = Accelerator()
model, optimizer, scheduler, train_dl, valid_dl = create_components()
accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)
model_signature = get_signature(model)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(tmpdirname)
# make sure random weights don't match
load_random_weights(model)
self.assertTrue(abs(model_signature - get_signature(model)) > 1e-3)
# make sure loaded weights match
accelerator.load_state(tmpdirname)
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
def test_save_load_model_with_hooks(self):
accelerator = Accelerator()
model, optimizer, scheduler, train_dl, valid_dl = create_components()
accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)
model_signature = get_signature(model)
# saving hook
def save_config(models, weights, output_dir):
config = {"class_name": models[0].__class__.__name__}
with open(os.path.join(output_dir, "data.json"), "w") as f:
json.dump(config, f)
# loading hook
def load_config(models, input_dir):
with open(os.path.join(input_dir, "data.json"), "r") as f:
config = json.load(f)
models[0].class_name = config["class_name"]
save_hook = accelerator.register_save_state_pre_hook(save_config)
load_hook = accelerator.register_load_state_pre_hook(load_config)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(tmpdirname)
# make sure random weights don't match with hooks
load_random_weights(model)
self.assertTrue(abs(model_signature - get_signature(model)) > 1e-3)
# random class name to verify correct one is loaded
model.class_name = "random"
# make sure loaded weights match with hooks
accelerator.load_state(tmpdirname)
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(tmpdirname)
# make sure random weights don't match with hooks removed
load_random_weights(model)
self.assertTrue(abs(model_signature - get_signature(model)) > 1e-3)
# random class name to verify correct one is loaded
model.class_name = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(tmpdirname)
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
@slow
def test_accelerator_bnb(self):
"""Tests that the accelerator can be used with the BNB library."""
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m",
load_in_8bit=True,
device_map={"": 0},
)
accelerator = Accelerator()
# This should work
model = accelerator.prepare(model)
@slow
def test_accelerator_bnb_cpu_error(self):
"""Tests that the accelerator can be used with the BNB library. This should fail as we are trying to load a model
that is loaded between cpu and gpu"""
from transformers import AutoModelForCausalLM
accelerator = Accelerator()
with init_empty_weights():
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m",
)
device_map = infer_auto_device_map(model)
device_map["lm_head"] = "cpu"
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m", device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True
)
# This should not work and get value error
with self.assertRaises(ValueError):
model = accelerator.prepare(model)
@slow
@require_multi_gpu
def test_accelerator_bnb_multi_gpu(self):
"""Tests that the accelerator can be used with the BNB library."""
from transformers import AutoModelForCausalLM
with init_empty_weights():
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m",
)
device_map = infer_auto_device_map(model)
device_map["lm_head"] = 1
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m",
load_in_8bit=True,
device_map=device_map,
)
accelerator = Accelerator()
# This should not work and get value error
with self.assertRaises(ValueError):
_ = accelerator.prepare(model)
@require_cuda
def test_accelerator_cpu_flag_prepare(self):
model = torch.nn.Linear(10, 10)
sgd = torch.optim.SGD(model.parameters(), lr=0.01)
accelerator = Accelerator(cpu=True)
_ = accelerator.prepare(sgd)
| accelerate-wip-main | tests/test_accelerator.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import logging
import os
import re
import subprocess
import tempfile
import unittest
import zipfile
from pathlib import Path
from typing import Optional
from unittest import mock
# We use TF to parse the logs
from accelerate import Accelerator
from accelerate.test_utils.testing import (
MockingTestCase,
TempDirTestCase,
require_comet_ml,
require_tensorboard,
require_wandb,
skip,
)
from accelerate.tracking import CometMLTracker, GeneralTracker
from accelerate.utils import is_comet_ml_available
if is_comet_ml_available():
from comet_ml import OfflineExperiment
logger = logging.getLogger(__name__)
@require_tensorboard
class TensorBoardTrackingTest(unittest.TestCase):
def test_init_trackers(self):
project_name = "test_project_with_config"
with tempfile.TemporaryDirectory() as dirpath:
accelerator = Accelerator(log_with="tensorboard", logging_dir=dirpath)
config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"}
accelerator.init_trackers(project_name, config)
accelerator.end_training()
for child in Path(f"{dirpath}/{project_name}").glob("*/**"):
log = list(filter(lambda x: x.is_file(), child.iterdir()))[0]
self.assertNotEqual(str(log), "")
def test_log(self):
project_name = "test_project_with_log"
with tempfile.TemporaryDirectory() as dirpath:
accelerator = Accelerator(log_with="tensorboard", project_dir=dirpath)
accelerator.init_trackers(project_name)
values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"}
accelerator.log(values, step=0)
accelerator.end_training()
# Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord
# Names are randomly generated each time
log = list(filter(lambda x: x.is_file(), Path(f"{dirpath}/{project_name}").iterdir()))[0]
self.assertNotEqual(str(log), "")
def test_project_dir(self):
with self.assertRaisesRegex(ValueError, "Logging with `tensorboard` requires a `logging_dir`"):
_ = Accelerator(log_with="tensorboard")
with tempfile.TemporaryDirectory() as dirpath:
_ = Accelerator(log_with="tensorboard", project_dir=dirpath)
with tempfile.TemporaryDirectory() as dirpath:
_ = Accelerator(log_with="tensorboard", logging_dir=dirpath)
@require_wandb
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
class WandBTrackingTest(TempDirTestCase, MockingTestCase):
def setUp(self):
super().setUp()
# wandb let's us override where logs are stored to via the WANDB_DIR env var
self.add_mocks(mock.patch.dict(os.environ, {"WANDB_DIR": self.tmpdir}))
@staticmethod
def parse_log(log: str, section: str, record: bool = True):
"""
Parses wandb log for `section` and returns a dictionary of
all items in that section. Section names are based on the
output of `wandb sync --view --verbose` and items starting
with "Record" in that result
"""
# Big thanks to the W&B team for helping us parse their logs
pattern = rf"{section} ([\S\s]*?)\n\n"
if record:
pattern = rf"Record: {pattern}"
cleaned_record = re.findall(pattern, log)[0]
# A config
if section == "config" or section == "history":
cleaned_record = re.findall(r'"([a-zA-Z0-9_.,]+)', cleaned_record)
return {key: val for key, val in zip(cleaned_record[0::2], cleaned_record[1::2])}
# Everything else
else:
return dict(re.findall(r'(\w+): "([^\s]+)"', cleaned_record))
@skip
def test_wandb(self):
project_name = "test_project_with_config"
accelerator = Accelerator(log_with="wandb")
config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"}
kwargs = {"wandb": {"tags": ["my_tag"]}}
accelerator.init_trackers(project_name, config, kwargs)
values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"}
accelerator.log(values, step=0)
accelerator.end_training()
# The latest offline log is stored at wandb/latest-run/*.wandb
for child in Path(f"{self.tmpdir}/wandb/latest-run").glob("*"):
if child.is_file() and child.suffix == ".wandb":
content = subprocess.check_output(
["wandb", "sync", "--view", "--verbose", str(child)], env=os.environ.copy()
).decode("utf8", "ignore")
break
# Check HPS through careful parsing and cleaning
logged_items = self.parse_log(content, "config")
self.assertEqual(logged_items["num_iterations"], "12")
self.assertEqual(logged_items["learning_rate"], "0.01")
self.assertEqual(logged_items["some_boolean"], "false")
self.assertEqual(logged_items["some_string"], "some_value")
self.assertEqual(logged_items["some_string"], "some_value")
# Run tags
logged_items = self.parse_log(content, "run", False)
self.assertEqual(logged_items["tags"], "my_tag")
# Actual logging
logged_items = self.parse_log(content, "history")
self.assertEqual(logged_items["total_loss"], "0.1")
self.assertEqual(logged_items["iteration"], "1")
self.assertEqual(logged_items["my_text"], "some_value")
self.assertEqual(logged_items["_step"], "0")
# Comet has a special `OfflineExperiment` we need to use for testing
def offline_init(self, run_name: str, tmpdir: str):
self.run_name = run_name
self.writer = OfflineExperiment(project_name=run_name, offline_directory=tmpdir)
logger.info(f"Initialized offline CometML project {self.run_name}")
logger.info("Make sure to log any initial configurations with `self.store_init_configuration` before training!")
@require_comet_ml
@mock.patch.object(CometMLTracker, "__init__", offline_init)
class CometMLTest(unittest.TestCase):
@staticmethod
def get_value_from_key(log_list, key: str, is_param: bool = False):
"Extracts `key` from Comet `log`"
for log in log_list:
j = json.loads(log)["payload"]
if is_param and "param" in j.keys():
if j["param"]["paramName"] == key:
return j["param"]["paramValue"]
if "log_other" in j.keys():
if j["log_other"]["key"] == key:
return j["log_other"]["val"]
if "metric" in j.keys():
if j["metric"]["metricName"] == key:
return j["metric"]["metricValue"]
def test_init_trackers(self):
with tempfile.TemporaryDirectory() as d:
tracker = CometMLTracker("test_project_with_config", d)
accelerator = Accelerator(log_with=tracker)
config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"}
accelerator.init_trackers(None, config)
accelerator.end_training()
log = os.listdir(d)[0] # Comet is nice, it's just a zip file here
# We parse the raw logs
p = os.path.join(d, log)
archive = zipfile.ZipFile(p, "r")
log = archive.open("messages.json").read().decode("utf-8")
list_of_json = log.split("\n")[:-1]
self.assertEqual(self.get_value_from_key(list_of_json, "num_iterations", True), 12)
self.assertEqual(self.get_value_from_key(list_of_json, "learning_rate", True), 0.01)
self.assertEqual(self.get_value_from_key(list_of_json, "some_boolean", True), False)
self.assertEqual(self.get_value_from_key(list_of_json, "some_string", True), "some_value")
def test_log(self):
with tempfile.TemporaryDirectory() as d:
tracker = CometMLTracker("test_project_with_config", d)
accelerator = Accelerator(log_with=tracker)
accelerator.init_trackers(None)
values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"}
accelerator.log(values, step=0)
accelerator.end_training()
log = os.listdir(d)[0] # Comet is nice, it's just a zip file here
# We parse the raw logs
p = os.path.join(d, log)
archive = zipfile.ZipFile(p, "r")
log = archive.open("messages.json").read().decode("utf-8")
list_of_json = log.split("\n")[:-1]
self.assertEqual(self.get_value_from_key(list_of_json, "curr_step", True), 0)
self.assertEqual(self.get_value_from_key(list_of_json, "total_loss"), 0.1)
self.assertEqual(self.get_value_from_key(list_of_json, "iteration"), 1)
self.assertEqual(self.get_value_from_key(list_of_json, "my_text"), "some_value")
class MyCustomTracker(GeneralTracker):
"Basic tracker that writes to a csv for testing"
_col_names = [
"total_loss",
"iteration",
"my_text",
"learning_rate",
"num_iterations",
"some_boolean",
"some_string",
]
name = "my_custom_tracker"
requires_logging_directory = False
def __init__(self, dir: str):
self.f = open(f"{dir}/log.csv", "w+")
self.writer = csv.DictWriter(self.f, fieldnames=self._col_names)
self.writer.writeheader()
@property
def tracker(self):
return self.writer
def store_init_configuration(self, values: dict):
logger.info("Call init")
self.writer.writerow(values)
def log(self, values: dict, step: Optional[int]):
logger.info("Call log")
self.writer.writerow(values)
def finish(self):
self.f.close()
class CustomTrackerTestCase(unittest.TestCase):
def test_init_trackers(self):
with tempfile.TemporaryDirectory() as d:
tracker = MyCustomTracker(d)
accelerator = Accelerator(log_with=tracker)
config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"}
accelerator.init_trackers("Some name", config)
accelerator.end_training()
with open(f"{d}/log.csv", "r") as f:
data = csv.DictReader(f)
data = next(data)
truth = {
"total_loss": "",
"iteration": "",
"my_text": "",
"learning_rate": "0.01",
"num_iterations": "12",
"some_boolean": "False",
"some_string": "some_value",
}
self.assertDictEqual(data, truth)
def test_log(self):
with tempfile.TemporaryDirectory() as d:
tracker = MyCustomTracker(d)
accelerator = Accelerator(log_with=tracker)
accelerator.init_trackers("Some name")
values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"}
accelerator.log(values, step=0)
accelerator.end_training()
with open(f"{d}/log.csv", "r") as f:
data = csv.DictReader(f)
data = next(data)
truth = {
"total_loss": "0.1",
"iteration": "1",
"my_text": "some_value",
"learning_rate": "",
"num_iterations": "",
"some_boolean": "",
"some_string": "",
}
self.assertDictEqual(data, truth)
| accelerate-wip-main | tests/test_tracking.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu, require_torch_min_version
class ModelForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 5)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
class PreForwardHook(ModelHook):
def pre_forward(self, module, *args, **kwargs):
return (args[0] + 1,) + args[1:], kwargs
class PostForwardHook(ModelHook):
def post_forward(self, module, output):
return output + 1
@require_torch_min_version(version="1.9.0")
class HooksModelTester(unittest.TestCase):
def test_add_and_remove_hooks(self):
test_model = ModelForTest()
test_hook = ModelHook()
add_hook_to_module(test_model, test_hook)
self.assertEqual(test_model._hf_hook, test_hook)
self.assertTrue(hasattr(test_model, "_old_forward"))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, "forward")
self.assertListEqual(list(inspect.signature(test_model.forward).parameters), ["x"])
remove_hook_from_module(test_model)
self.assertFalse(hasattr(test_model, "_hf_hook"))
self.assertFalse(hasattr(test_model, "_old_forward"))
def test_append_and_remove_hooks(self):
test_model = ModelForTest()
test_hook = ModelHook()
add_hook_to_module(test_model, test_hook)
add_hook_to_module(test_model, test_hook, append=True)
self.assertEqual(isinstance(test_model._hf_hook, SequentialHook), True)
self.assertEqual(len(test_model._hf_hook.hooks), 2)
self.assertTrue(hasattr(test_model, "_old_forward"))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, "forward")
self.assertListEqual(list(inspect.signature(test_model.forward).parameters), ["x"])
remove_hook_from_module(test_model)
self.assertFalse(hasattr(test_model, "_hf_hook"))
self.assertFalse(hasattr(test_model, "_old_forward"))
def test_pre_forward_hook_is_executed(self):
test_model = ModelForTest()
x = torch.randn(2, 3)
expected = test_model(x + 1)
expected2 = test_model(x + 2)
test_hook = PreForwardHook()
add_hook_to_module(test_model, test_hook)
output1 = test_model(x)
self.assertTrue(torch.allclose(output1, expected, atol=1e-5))
# Attaching a hook to a model when it already has one replaces, does not chain
test_hook = PreForwardHook()
add_hook_to_module(test_model, test_hook)
output1 = test_model(x)
self.assertTrue(torch.allclose(output1, expected, atol=1e-5))
# You need to use the sequential hook to chain two or more hooks
test_hook = SequentialHook(PreForwardHook(), PreForwardHook())
add_hook_to_module(test_model, test_hook)
output2 = test_model(x)
assert torch.allclose(output2, expected2, atol=1e-5)
def test_post_forward_hook_is_executed(self):
test_model = ModelForTest()
x = torch.randn(2, 3)
output = test_model(x)
test_hook = PostForwardHook()
add_hook_to_module(test_model, test_hook)
output1 = test_model(x)
self.assertTrue(torch.allclose(output1, output + 1, atol=1e-5))
# Attaching a hook to a model when it already has one replaces, does not chain
test_hook = PostForwardHook()
add_hook_to_module(test_model, test_hook)
output1 = test_model(x)
self.assertTrue(torch.allclose(output1, output + 1, atol=1e-5))
# You need to use the sequential hook to chain two or more hooks
test_hook = SequentialHook(PostForwardHook(), PostForwardHook())
add_hook_to_module(test_model, test_hook)
output2 = test_model(x)
assert torch.allclose(output2, output + 2, atol=1e-5)
def test_no_grad_in_hook(self):
test_model = ModelForTest()
x = torch.randn(2, 3)
output = test_model(x)
test_hook = PostForwardHook()
add_hook_to_module(test_model, test_hook)
output1 = test_model(x)
self.assertTrue(torch.allclose(output1, output + 1))
self.assertTrue(output1.requires_grad)
test_hook.no_grad = True
output1 = test_model(x)
self.assertFalse(output1.requires_grad)
@require_multi_gpu
def test_align_devices_as_model_parallelism(self):
model = ModelForTest()
# Everything is on CPU
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
# This will move each submodule on different devices
add_hook_to_module(model.linear1, AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0))
add_hook_to_module(model.linear2, AlignDevicesHook(execution_device=1))
self.assertEqual(model.linear1.weight.device, torch.device(0))
self.assertEqual(model.batchnorm.weight.device, torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0))
self.assertEqual(model.linear2.weight.device, torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
x = torch.randn(2, 3)
output = model(x)
self.assertEqual(output.device, torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(model, AlignDevicesHook(io_same_device=True))
x = torch.randn(2, 3).to(0)
output = model(x)
self.assertEqual(output.device, torch.device(0))
def test_align_devices_as_cpu_offload(self):
model = ModelForTest()
# Everything is on CPU
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
# This will move each submodule on different devices
hook_kwargs = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.linear1, AlignDevicesHook(**hook_kwargs))
add_hook_to_module(model.batchnorm, AlignDevicesHook(**hook_kwargs))
add_hook_to_module(model.linear2, AlignDevicesHook(**hook_kwargs))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.linear1.weight.device, torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device, torch.device("meta"))
self.assertEqual(model.linear2.weight.device, torch.device("meta"))
# Buffers are not included in the offload by default, so are on the execution device
device = torch.device(hook_kwargs["execution_device"])
self.assertEqual(model.batchnorm.running_mean.device, device)
x = torch.randn(2, 3)
output = model(x)
self.assertEqual(output.device, device)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.linear1)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.linear2)
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
# Now test with buffers included in the offload
hook_kwargs = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.linear1, AlignDevicesHook(**hook_kwargs))
add_hook_to_module(model.batchnorm, AlignDevicesHook(**hook_kwargs))
add_hook_to_module(model.linear2, AlignDevicesHook(**hook_kwargs))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.linear1.weight.device, torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device, torch.device("meta"))
self.assertEqual(model.linear2.weight.device, torch.device("meta"))
self.assertEqual(model.batchnorm.running_mean.device, torch.device("meta"))
x = torch.randn(2, 3)
output = model(x)
self.assertEqual(output.device, device)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.linear1)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.linear2)
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
def test_attach_align_device_hook_as_cpu_offload(self):
model = ModelForTest()
# Everything is on CPU
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
# This will move each submodule on different devices
execution_device = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(model, execution_device=execution_device, offload=True)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.linear1.weight.device, torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device, torch.device("meta"))
self.assertEqual(model.linear2.weight.device, torch.device("meta"))
# Buffers are not included in the offload by default, so are on the execution device
device = torch.device(execution_device)
self.assertEqual(model.batchnorm.running_mean.device, device)
x = torch.randn(2, 3)
output = model(x)
self.assertEqual(output.device, device)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(model)
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
# Now test with buffers included in the offload
attach_align_device_hook(model, execution_device=execution_device, offload=True, offload_buffers=True)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.linear1.weight.device, torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device, torch.device("meta"))
self.assertEqual(model.linear2.weight.device, torch.device("meta"))
self.assertEqual(model.batchnorm.running_mean.device, torch.device("meta"))
x = torch.randn(2, 3)
output = model(x)
self.assertEqual(output.device, device)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(model)
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
def test_attach_align_device_hook_as_cpu_offload_with_weight_map(self):
model = ModelForTest()
# Everything is on CPU
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
# This will move each submodule on different devices
execution_device = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
model, execution_device=execution_device, offload=True, weights_map=model.state_dict()
)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.linear1.weight.device, torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device, torch.device("meta"))
self.assertEqual(model.linear2.weight.device, torch.device("meta"))
# Buffers are not included in the offload by default, so are on the execution device
device = torch.device(execution_device)
self.assertEqual(model.batchnorm.running_mean.device, device)
x = torch.randn(2, 3)
output = model(x)
self.assertEqual(output.device, device)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(model)
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
# Now test with buffers included in the offload
attach_align_device_hook(
model,
execution_device=execution_device,
offload=True,
weights_map=model.state_dict(),
offload_buffers=True,
)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.linear1.weight.device, torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device, torch.device("meta"))
self.assertEqual(model.linear2.weight.device, torch.device("meta"))
self.assertEqual(model.batchnorm.running_mean.device, torch.device("meta"))
x = torch.randn(2, 3)
output = model(x)
self.assertEqual(output.device, device)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(model)
self.assertEqual(model.linear1.weight.device, torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device, torch.device("cpu"))
self.assertEqual(model.linear2.weight.device, torch.device("cpu"))
| accelerate-wip-main | tests/test_hooks.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import get_launch_prefix, patch_environment
class MultiGPUTester(unittest.TestCase):
def setUp(self):
mod_file = inspect.getfile(accelerate.test_utils)
self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
self.data_loop_file_path = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]
)
self.operation_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def test_multi_gpu(self):
print(f"Found {torch.cuda.device_count()} devices.")
cmd = get_launch_prefix() + [f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd, env=os.environ.copy())
@require_multi_gpu
def test_multi_gpu_ops(self):
print(f"Found {torch.cuda.device_count()} devices.")
cmd = get_launch_prefix() + [f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}")
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd, env=os.environ.copy())
@require_multi_gpu
def test_pad_across_processes(self):
cmd = get_launch_prefix() + [f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd, env=os.environ.copy())
@require_multi_gpu
def test_distributed_data_loop(self):
"""
This TestCase checks the behaviour that occurs during distributed training or evaluation,
when the batch size does not evenly divide the dataset size.
"""
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only")
cmd = get_launch_prefix() + [f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1, cuda_visible_devices="0,1"):
execute_subprocess_async(cmd, env=os.environ.copy())
if __name__ == "__main__":
accelerator = Accelerator()
shape = (accelerator.state.process_index + 2, 10)
tensor = torch.randint(0, 10, shape).to(accelerator.device)
error_msg = ""
tensor1 = accelerator.pad_across_processes(tensor)
if tensor1.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensor1.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensor1[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensor1[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
tensor2 = accelerator.pad_across_processes(tensor, pad_first=True)
if tensor2.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensor2.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
index = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensor2[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensor2[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| accelerate-wip-main | tests/test_multigpu.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_multi_gpu,
require_single_gpu,
test_sync,
)
from accelerate.utils import get_launch_prefix, patch_environment
class SyncScheduler(unittest.TestCase):
def setUp(self):
mod_file = inspect.getfile(accelerate.test_utils)
self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_sync.py"])
@require_cpu
def test_gradient_sync_cpu_noop(self):
debug_launcher(test_sync.main, num_processes=1)
@require_cpu
def test_gradient_sync_cpu_multi(self):
debug_launcher(test_sync.main)
@require_single_gpu
def test_gradient_sync_gpu(self):
test_sync.main()
@require_multi_gpu
def test_gradient_sync_gpu_multi(self):
print(f"Found {torch.cuda.device_count()} devices.")
cmd = get_launch_prefix() + [f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd, env=os.environ.copy())
| accelerate-wip-main | tests/test_grad_sync.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A simple launcher script for TPU training
Inspired by https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py
::
>>> python xla_spawn.py --num_cores=NUM_CORES_YOU_HAVE
YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other
arguments of your training script)
"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
)
)
# Optional arguments for the launch helper
parser.add_argument("--num_cores", type=int, default=1, help="Number of TPU cores to use (1 or 8).")
# positional
parser.add_argument(
"training_script",
type=str,
help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
),
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# Import training_script as a module.
script_fpath = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
mod_name = script_fpath.stem
mod = importlib.import_module(mod_name)
# Patch sys.argv
sys.argv = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores)]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores)
if __name__ == "__main__":
main()
| accelerate-wip-main | tests/xla_spawn.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
is_torch_version,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class ModelForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 5)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
class OffloadTester(unittest.TestCase):
def test_offload_state_dict(self):
model = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(tmp_dir, model.state_dict())
index_file = os.path.join(tmp_dir, "index.json")
self.assertTrue(os.path.isfile(index_file))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
weight_file = os.path.join(tmp_dir, f"{key}.dat")
self.assertTrue(os.path.isfile(weight_file))
# TODO: add tests on the fact weights are properly loaded
def test_offload_weight(self):
dtypes = [torch.float16, torch.float32]
if is_torch_version(">=", "1.10"):
dtypes.append(torch.bfloat16)
for dtype in dtypes:
weight = torch.randn(2, 3, dtype=dtype)
with TemporaryDirectory() as tmp_dir:
index = offload_weight(weight, "weight", tmp_dir, {})
weight_file = os.path.join(tmp_dir, "weight.dat")
self.assertTrue(os.path.isfile(weight_file))
self.assertDictEqual(index, {"weight": {"shape": [2, 3], "dtype": str(dtype).split(".")[1]}})
new_weight = load_offloaded_weight(weight_file, index["weight"])
self.assertTrue(torch.equal(weight, new_weight))
def test_offload_weights_loader(self):
model = ModelForTest()
state_dict = model.state_dict()
cpu_part = {k: v for k, v in state_dict.items() if "linear2" not in k}
disk_part = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(tmp_dir, disk_part)
weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir)
# Every key is there with the right value
self.assertEqual(sorted(weight_map), sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(param, weight_map[key]))
cpu_part = {k: v for k, v in state_dict.items() if "weight" in k}
disk_part = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(tmp_dir, disk_part)
weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir)
# Every key is there with the right value
self.assertEqual(sorted(weight_map), sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(param, weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(tmp_dir, state_dict)
# Duplicates are removed
weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir)
# Every key is there with the right value
self.assertEqual(sorted(weight_map), sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(param, weight_map[key]))
def test_extract_submodules_state_dict(self):
state_dict = {"a.1": 0, "a.10": 1, "a.2": 2}
extracted = extract_submodules_state_dict(state_dict, ["a.1", "a.2"])
self.assertDictEqual(extracted, {"a.1": 0, "a.2": 2})
state_dict = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
extracted = extract_submodules_state_dict(state_dict, ["a.1", "a.2"])
self.assertDictEqual(extracted, {"a.1.a": 0, "a.2.a": 2})
| accelerate-wip-main | tests/test_offload.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class AccelerateLauncherTester(unittest.TestCase):
"""
Test case for verifying the `accelerate launch` CLI operates correctly.
If a `default_config.yaml` file is located in the cache it will temporarily move it
for the duration of the tests.
"""
mod_file = inspect.getfile(accelerate.test_utils)
test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_cli.py"])
base_cmd = ["accelerate", "launch"]
config_folder = Path.home() / ".cache/huggingface/accelerate"
config_file = "default_config.yaml"
config_path = config_folder / config_file
changed_path = config_folder / "_default_config.yaml"
test_config_path = Path("tests/test_configs")
@classmethod
def setUpClass(cls):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def tearDownClass(cls):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def test_no_config(self):
cmd = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path], env=os.environ.copy())
def test_config_compatibility(self):
for config in sorted(self.test_config_path.glob("**/*.yaml")):
with self.subTest(config_file=config):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(config), self.test_file_path], env=os.environ.copy()
)
def test_accelerate_test(self):
execute_subprocess_async(["accelerate", "test"], env=os.environ.copy())
class TpuConfigTester(unittest.TestCase):
"""
Test case for verifying the `accelerate tpu-config` CLI passes the right `gcloud` command.
"""
tpu_name = "test-tpu"
tpu_zone = "us-central1-a"
command = "ls"
cmd = ["accelerate", "tpu-config"]
base_output = "cd /usr/share"
command_file = "tests/test_samples/test_command_file.sh"
gcloud = "Running gcloud compute tpus tpu-vm ssh"
@staticmethod
def clean_output(output):
return "".join(output).rstrip()
def test_base(self):
output = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"],
return_stdout=True,
)
self.assertEqual(
self.clean_output(output),
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all",
)
def test_base_backward_compatibility(self):
output = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
],
return_stdout=True,
)
self.assertEqual(
self.clean_output(output),
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all",
)
def test_with_config_file(self):
output = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"], return_stdout=True
)
self.assertEqual(
self.clean_output(output),
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all',
)
def test_with_config_file_and_command(self):
output = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"],
return_stdout=True,
)
self.assertEqual(
self.clean_output(output),
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all",
)
def test_with_config_file_and_multiple_command(self):
output = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
'echo "Hello World"',
"--debug",
],
return_stdout=True,
)
self.assertEqual(
self.clean_output(output),
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all',
)
def test_with_config_file_and_command_file(self):
output = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"],
return_stdout=True,
)
self.assertEqual(
self.clean_output(output),
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all',
)
def test_with_config_file_and_command_file_backward_compatibility(self):
output = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
],
return_stdout=True,
)
self.assertEqual(
self.clean_output(output),
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all',
)
def test_accelerate_install(self):
output = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"],
return_stdout=True,
)
self.assertEqual(
self.clean_output(output),
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all',
)
def test_accelerate_install_version(self):
output = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
],
return_stdout=True,
)
self.assertEqual(
self.clean_output(output),
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all',
)
| accelerate-wip-main | tests/test_cli.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class MultiTPUTester(unittest.TestCase):
def setUp(self):
mod_file = inspect.getfile(accelerate.test_utils)
self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
self.test_dir = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])
@require_tpu
def test_tpu(self):
distributed_args = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
cmd = [sys.executable] + distributed_args
execute_subprocess_async(cmd, env=os.environ.copy())
| accelerate-wip-main | tests/test_tpu.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
EXCLUDE_EXAMPLES = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class ExampleDifferenceTests(unittest.TestCase):
"""
This TestCase checks that all of the `complete_*` scripts contain all of the
information found in the `by_feature` scripts, line for line. If one fails,
then a complete example does not contain all of the features in the features
scripts, and should be updated.
Each example script should be a single test (such as `test_nlp_example`),
and should run `one_complete_example` twice: once with `parser_only=True`,
and the other with `parser_only=False`. This is so that when the test
failures are returned to the user, they understand if the discrepancy lies in
the `main` function, or the `training_loop` function. Otherwise it will be
unclear.
Also, if there are any expected differences between the base script used and
`complete_nlp_example.py` (the canonical base script), these should be included in
`special_strings`. These would be differences in how something is logged, print statements,
etc (such as calls to `Accelerate.log()`)
"""
def one_complete_example(
self, complete_file_name: str, parser_only: bool, secondary_filename: str = None, special_strings: list = None
):
"""
Tests a single `complete` example against all of the implemented `by_feature` scripts
Args:
complete_file_name (`str`):
The filename of a complete example
parser_only (`bool`):
Whether to look at the main training function, or the argument parser
secondary_filename (`str`, *optional*):
A potential secondary base file to strip all script information not relevant for checking,
such as "cv_example.py" when testing "complete_cv_example.py"
special_strings (`list`, *optional*):
A list of strings to potentially remove before checking no differences are left. These should be
diffs that are file specific, such as different logging variations between files.
"""
self.maxDiff = None
by_feature_path = os.path.abspath(os.path.join("examples", "by_feature"))
examples_path = os.path.abspath("examples")
for item in os.listdir(by_feature_path):
if item not in EXCLUDE_EXAMPLES:
item_path = os.path.join(by_feature_path, item)
if os.path.isfile(item_path) and ".py" in item_path:
with self.subTest(
tested_script=complete_file_name,
feature_script=item,
tested_section="main()" if parser_only else "training_function()",
):
diff = compare_against_test(
os.path.join(examples_path, complete_file_name), item_path, parser_only, secondary_filename
)
diff = "\n".join(diff)
if special_strings is not None:
for string in special_strings:
diff = diff.replace(string, "")
self.assertEqual(diff, "")
def test_nlp_examples(self):
self.one_complete_example("complete_nlp_example.py", True)
self.one_complete_example("complete_nlp_example.py", False)
def test_cv_examples(self):
cv_path = os.path.abspath(os.path.join("examples", "cv_example.py"))
special_strings = [
" " * 16 + "{\n\n",
" " * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
" " * 20 + '"f1": eval_metric["f1"],\n\n',
" " * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
" " * 20 + '"epoch": epoch,\n\n',
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py", True, cv_path, special_strings)
self.one_complete_example("complete_cv_example.py", False, cv_path, special_strings)
@mock.patch.dict(os.environ, {"TESTING_MOCKED_DATALOADERS": "1"})
class FeatureExamplesTests(TempDirTestCase):
clear_on_setup = False
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._tmpdir = tempfile.mkdtemp()
cls.configPath = os.path.join(cls._tmpdir, "default_config.yml")
write_basic_config(save_location=cls.configPath)
cls._launch_args = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def test_checkpointing_by_epoch(self):
testargs = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "epoch_0")))
def test_checkpointing_by_steps(self):
testargs = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
_ = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "step_2")))
def test_load_states_by_epoch(self):
testargs = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir, "epoch_0")}
""".split()
output = run_command(self._launch_args + testargs, return_stdout=True)
self.assertNotIn("epoch 0:", output)
self.assertIn("epoch 1:", output)
def test_load_states_by_steps(self):
testargs = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir, "step_2")}
""".split()
output = run_command(self._launch_args + testargs, return_stdout=True)
if torch.cuda.is_available():
num_processes = torch.cuda.device_count()
else:
num_processes = 1
if num_processes > 1:
self.assertNotIn("epoch 0:", output)
self.assertIn("epoch 1:", output)
else:
self.assertIn("epoch 0:", output)
self.assertIn("epoch 1:", output)
@slow
def test_cross_validation(self):
testargs = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ, {"TESTING_MOCKED_DATALOADERS": "0"}):
output = run_command(self._launch_args + testargs, return_stdout=True)
results = ast.literal_eval(re.findall("({.+})", output)[-1])
self.assertGreaterEqual(results["accuracy"], 0.75)
def test_multi_process_metrics(self):
testargs = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def test_tracking(self):
with tempfile.TemporaryDirectory() as tmpdir:
testargs = f"""
examples/by_feature/tracking.py
--with_tracking
--logging_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(tmpdir, "tracking")))
def test_gradient_accumulation(self):
testargs = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs)
def test_local_sgd(self):
testargs = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs)
| accelerate-wip-main | tests/test_examples.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, get_launch_prefix, set_seed
logger = logging.getLogger(__name__)
def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):
"Generates a tuple of dummy DataLoaders to test with"
def get_dataset(n_batches):
x = torch.randn(batch_size * n_batches, 1)
return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))
train_dataset = get_dataset(n_train_batches)
valid_dataset = get_dataset(n_valid_batches)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)
valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)
return (train_dataloader, valid_dataloader)
def train(num_epochs, model, dataloader, optimizer, accelerator, scheduler=None):
"Trains for `num_epochs`"
rands = []
for epoch in range(num_epochs):
# Train quickly
model.train()
for batch in dataloader:
x, y = batch
outputs = model(x)
loss = torch.nn.functional.mse_loss(outputs, y)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
rands.append(random.random()) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class DummyModel(nn.Module):
"Simple model to do y=mx+b"
def __init__(self):
super().__init__()
self.a = nn.Parameter(torch.randn(1))
self.b = nn.Parameter(torch.randn(1))
def forward(self, x):
return x * self.a + self.b
class CheckpointTest(unittest.TestCase):
def test_with_save_limit(self):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
model = DummyModel()
optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
train_dataloader, valid_dataloader = dummy_dataloaders()
project_config = ProjectConfiguration(total_limit=1, project_dir=tmpdir, automatic_checkpoint_naming=True)
# Train baseline
accelerator = Accelerator(project_config=project_config)
model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader
)
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir)), 1)
def test_can_resume_training_with_folder(self):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
model = DummyModel()
optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
train_dataloader, valid_dataloader = dummy_dataloaders()
# Train baseline
accelerator = Accelerator()
model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader
)
# Save initial
initial = os.path.join(tmpdir, "initial")
accelerator.save_state(initial)
(a, b) = model.a.item(), model.b.item()
opt_state = optimizer.state_dict()
ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator)
(a1, b1) = model.a.item(), model.b.item()
opt_state1 = optimizer.state_dict()
# Train partially
set_seed(42)
model = DummyModel()
optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
train_dataloader, valid_dataloader = dummy_dataloaders()
accelerator = Accelerator()
model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader
)
accelerator.load_state(initial)
(a2, b2) = model.a.item(), model.b.item()
opt_state2 = optimizer.state_dict()
self.assertEqual(a, a2)
self.assertEqual(b, b2)
self.assertEqual(opt_state, opt_state2)
test_rands = train(2, model, train_dataloader, optimizer, accelerator)
# Save everything
checkpoint = os.path.join(tmpdir, "checkpoint")
accelerator.save_state(checkpoint)
# Load everything back in and make sure all states work
accelerator.load_state(checkpoint)
test_rands += train(1, model, train_dataloader, optimizer, accelerator)
(a3, b3) = model.a.item(), model.b.item()
opt_state3 = optimizer.state_dict()
self.assertEqual(a1, a3)
self.assertEqual(b1, b3)
self.assertEqual(opt_state1, opt_state3)
self.assertEqual(ground_truth_rands, test_rands)
def test_can_resume_training(self):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
model = DummyModel()
optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
train_dataloader, valid_dataloader = dummy_dataloaders()
project_config = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)
model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader
)
# Save initial
accelerator.save_state()
(a, b) = model.a.item(), model.b.item()
opt_state = optimizer.state_dict()
ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator)
(a1, b1) = model.a.item(), model.b.item()
opt_state1 = optimizer.state_dict()
# Train partially
set_seed(42)
model = DummyModel()
optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
train_dataloader, valid_dataloader = dummy_dataloaders()
project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True)
accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)
model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader
)
accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0"))
(a2, b2) = model.a.item(), model.b.item()
opt_state2 = optimizer.state_dict()
self.assertEqual(a, a2)
self.assertEqual(b, b2)
self.assertEqual(opt_state, opt_state2)
test_rands = train(2, model, train_dataloader, optimizer, accelerator)
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1"))
test_rands += train(1, model, train_dataloader, optimizer, accelerator)
(a3, b3) = model.a.item(), model.b.item()
opt_state3 = optimizer.state_dict()
self.assertEqual(a1, a3)
self.assertEqual(b1, b3)
self.assertEqual(opt_state1, opt_state3)
self.assertEqual(ground_truth_rands, test_rands)
def test_invalid_registration(self):
t = torch.tensor([1, 2, 3])
t1 = torch.tensor([2, 3, 4])
net = DummyModel()
opt = torch.optim.Adam(net.parameters())
accelerator = Accelerator()
with self.assertRaises(ValueError) as ve:
accelerator.register_for_checkpointing(t, t1, net, opt)
message = str(ve.exception)
self.assertTrue("Item at index 0" in message)
self.assertTrue("Item at index 1" in message)
self.assertFalse("Item at index 2" in message)
self.assertFalse("Item at index 3" in message)
def test_with_scheduler(self):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
model = DummyModel()
optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
train_dataloader, valid_dataloader = dummy_dataloaders()
project_config = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)
model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
# Save initial
accelerator.save_state()
scheduler_state = scheduler.state_dict()
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
self.assertNotEqual(scheduler_state, scheduler.state_dict())
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0"))
self.assertEqual(scheduler_state, scheduler.state_dict())
def test_checkpoint_deletion(self):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
model = DummyModel()
project_config = ProjectConfiguration(automatic_checkpoint_naming=True, total_limit=2)
# Train baseline
accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)
model = accelerator.prepare(model)
# Save 3 states:
for _ in range(11):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_0")))
self.assertTrue(os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_9")))
self.assertTrue(os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_10")))
@require_cuda
def test_map_location(self):
cmd = get_launch_prefix()
cmd += [f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
execute_subprocess_async(cmd, env=os.environ.copy())
if __name__ == "__main__":
savedir = "/tmp/accelerate/state_checkpointing"
model = DummyModel()
optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
train_dataloader, valid_dataloader = dummy_dataloaders()
project_config = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
accelerator = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
model, optimizer = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
param_device = group["params"][0].device
break
assert param_device.type == accelerator.device.type
model = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
param_device = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
param_device = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| accelerate-wip-main | tests/test_state_checkpointing.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import io
import itertools
import json
import os
import tempfile
from copy import deepcopy
from pathlib import Path
import torch
from parameterized import parameterized
from torch.utils.data import DataLoader
from transformers import AutoModel, AutoModelForCausalLM, get_scheduler
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
from transformers.utils import is_torch_bf16_available
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.scheduler import AcceleratedScheduler
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_deepspeed,
require_multi_gpu,
slow,
)
from accelerate.test_utils.training import RegressionDataset
from accelerate.utils.dataclasses import DeepSpeedPlugin
from accelerate.utils.deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
)
from accelerate.utils.other import patch_environment
set_seed(42)
T5_SMALL = "t5-small"
T5_TINY = "patrickvonplaten/t5-tiny-random"
GPT2_TINY = "sshleifer/tiny-gpt2"
ZERO2 = "zero2"
ZERO3 = "zero3"
FP16 = "fp16"
BF16 = "bf16"
CUSTOM_OPTIMIZER = "custom_optimizer"
CUSTOM_SCHEDULER = "custom_scheduler"
DS_OPTIMIZER = "deepspeed_optimizer"
DS_SCHEDULER = "deepspeed_scheduler"
stages = [ZERO2, ZERO3]
optims = [CUSTOM_OPTIMIZER, DS_OPTIMIZER]
schedulers = [CUSTOM_SCHEDULER, DS_SCHEDULER]
if is_torch_bf16_available():
dtypes = [FP16, BF16]
else:
dtypes = [FP16]
def parameterized_custom_name_func(func, param_num, param):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args))
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
params = list(itertools.product(stages, dtypes))
optim_scheduler_params = list(itertools.product(optims, schedulers))
@require_deepspeed
@require_cuda
class DeepSpeedConfigIntegration(AccelerateTestCase):
def setUp(self):
super().setUp()
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self.test_file_dir_str = str(path.parents[0])
self.ds_config_file = dict(
zero2=f"{self.test_file_dir_str}/ds_config_zero2.json",
zero3=f"{self.test_file_dir_str}/ds_config_zero3.json",
)
# use self.get_config_dict(stage) to use these to ensure the original is not modified
with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f:
config_zero2 = json.load(f)
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
config_zero3 = json.load(f)
# The following setting slows things down, so don't enable it by default unless needed by a test.
# It's in the file as a demo for users since we want everything to work out of the box even if slower.
config_zero3["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = False
self.ds_config_dict = dict(zero2=config_zero2, zero3=config_zero3)
self.dist_env = dict(
ACCELERATE_USE_DEEPSPEED="true",
MASTER_ADDR="localhost",
MASTER_PORT="10999",
RANK="0",
LOCAL_RANK="0",
WORLD_SIZE="1",
)
def get_config_dict(self, stage):
# As some tests modify the dict, always make a copy
return deepcopy(self.ds_config_dict[stage])
@parameterized.expand(stages, name_func=parameterized_custom_name_func)
def test_deepspeed_plugin(self, stage):
# Test zero3_init_flag will be set to False when ZeRO stage != 3
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
zero3_init_flag=True,
)
self.assertFalse(deepspeed_plugin.zero3_init_flag)
deepspeed_plugin.deepspeed_config = None
# Test zero3_init_flag will be set to True only when ZeRO stage == 3
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=3,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
zero3_init_flag=True,
)
self.assertTrue(deepspeed_plugin.zero3_init_flag)
deepspeed_plugin.deepspeed_config = None
# Test config files are loaded correctly
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[stage], zero3_init_flag=True)
if stage == ZERO2:
self.assertFalse(deepspeed_plugin.zero3_init_flag)
elif stage == ZERO3:
self.assertTrue(deepspeed_plugin.zero3_init_flag)
# Test `gradient_accumulation_steps` is set to 1 if unavailable in config file
with tempfile.TemporaryDirectory() as dirpath:
ds_config = self.get_config_dict(stage)
del ds_config["gradient_accumulation_steps"]
with open(os.path.join(dirpath, "ds_config.json"), "w") as out_file:
json.dump(ds_config, out_file)
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=os.path.join(dirpath, "ds_config.json"))
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 1)
deepspeed_plugin.deepspeed_config = None
# Test `ValueError` is raised if `zero_optimization` is unavailable in config file
with tempfile.TemporaryDirectory() as dirpath:
ds_config = self.get_config_dict(stage)
del ds_config["zero_optimization"]
with open(os.path.join(dirpath, "ds_config.json"), "w") as out_file:
json.dump(ds_config, out_file)
with self.assertRaises(ValueError) as cm:
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=os.path.join(dirpath, "ds_config.json"))
self.assertTrue(
"Please specify the ZeRO optimization config in the DeepSpeed config." in str(cm.exception)
)
deepspeed_plugin.deepspeed_config = None
# Test `deepspeed_config_process`
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[stage])
kwargs = {
"fp16.enabled": True,
"bf16.enabled": False,
"optimizer.params.lr": 5e-5,
"optimizer.params.weight_decay": 0.0,
"scheduler.params.warmup_min_lr": 0.0,
"scheduler.params.warmup_max_lr": 5e-5,
"scheduler.params.warmup_num_steps": 0,
"train_micro_batch_size_per_gpu": 16,
"gradient_clipping": 1.0,
"train_batch_size": 16,
"zero_optimization.reduce_bucket_size": 5e5,
"zero_optimization.stage3_prefetch_bucket_size": 5e5,
"zero_optimization.stage3_param_persistence_threshold": 5e5,
"zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
}
deepspeed_plugin.deepspeed_config_process(**kwargs)
for ds_key_long, value in kwargs.items():
config, ds_key = deepspeed_plugin.hf_ds_config.find_config_node(ds_key_long)
if config.get(ds_key) is not None:
self.assertEqual(config.get(ds_key), value)
# Test mismatches
mismatches = {
"optimizer.params.lr": 1e-5,
"optimizer.params.weight_decay": 1e-5,
"gradient_accumulation_steps": 2,
}
with self.assertRaises(ValueError) as cm:
new_kwargs = deepcopy(kwargs)
new_kwargs.update(mismatches)
deepspeed_plugin.deepspeed_config_process(**new_kwargs)
for key in mismatches.keys():
self.assertTrue(
key in str(cm.exception),
f"{key} is not in the exception message:\n{cm.exception}",
)
# Test `ValueError` is raised if some config file fields with `auto` value is missing in `kwargs`
deepspeed_plugin.deepspeed_config["optimizer"]["params"]["lr"] = "auto"
with self.assertRaises(ValueError) as cm:
del kwargs["optimizer.params.lr"]
deepspeed_plugin.deepspeed_config_process(**kwargs)
self.assertTrue("`optimizer.params.lr` not found in kwargs." in str(cm.exception))
@parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)
def test_accelerate_state_deepspeed(self, dtype):
AcceleratorState._reset_state(True)
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=ZERO2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
zero3_init_flag=True,
)
with mockenv_context(**self.dist_env):
state = Accelerator(mixed_precision=dtype, deepspeed_plugin=deepspeed_plugin).state
self.assertTrue(state.deepspeed_plugin.deepspeed_config[dtype]["enabled"])
def test_init_zero3(self):
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=3,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
zero3_init_flag=True,
)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin) # noqa: F841
from transformers.deepspeed import is_deepspeed_zero3_enabled
self.assertTrue(is_deepspeed_zero3_enabled())
@parameterized.expand(optim_scheduler_params, name_func=parameterized_custom_name_func)
def test_prepare_deepspeed(self, optim_type, scheduler_type):
# 1. Testing with one of the ZeRO Stages is enough to test the `_prepare_deepspeed` function.
# Here we test using ZeRO Stage 2 with FP16 enabled.
from deepspeed.runtime.engine import DeepSpeedEngine
kwargs = {
"optimizer.params.lr": 5e-5,
"optimizer.params.weight_decay": 0.0,
"scheduler.params.warmup_min_lr": 0.0,
"scheduler.params.warmup_max_lr": 5e-5,
"scheduler.params.warmup_num_steps": 0,
"train_micro_batch_size_per_gpu": 16,
"gradient_clipping": 1.0,
"train_batch_size": 16,
"zero_optimization.reduce_bucket_size": 5e5,
"zero_optimization.stage3_prefetch_bucket_size": 5e5,
"zero_optimization.stage3_param_persistence_threshold": 5e5,
"zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
}
if optim_type == CUSTOM_OPTIMIZER and scheduler_type == CUSTOM_SCHEDULER:
# Test custom optimizer + custom scheduler
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=False,
zero3_init_flag=False,
)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(mixed_precision="fp16", deepspeed_plugin=deepspeed_plugin)
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)
model = AutoModel.from_pretrained(GPT2_TINY)
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
dummy_optimizer = DummyOptim(params=model.parameters())
dummy_lr_scheduler = DummyScheduler(dummy_optimizer)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(
"You cannot create a `DummyOptim` without specifying an optimizer in the config file."
in str(cm.exception)
)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(
"You cannot create a `DummyScheduler` without specifying a scheduler in the config file."
in str(cm.exception)
)
with self.assertRaises(ValueError) as cm:
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
self.assertTrue(
"When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders "
"or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file"
"or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`."
in str(cm.exception)
)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(accelerator.deepspeed_config["zero_allow_untested_optimizer"])
self.assertTrue(accelerator.deepspeed_config["train_batch_size"], 16)
self.assertEqual(type(model), DeepSpeedEngine)
self.assertEqual(type(optimizer), DeepSpeedOptimizerWrapper)
self.assertEqual(type(lr_scheduler), AcceleratedScheduler)
self.assertEqual(type(accelerator.deepspeed_engine_wrapped), DeepSpeedEngineWrapper)
elif optim_type == DS_OPTIMIZER and scheduler_type == DS_SCHEDULER:
# Test DeepSpeed optimizer + DeepSpeed scheduler
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16")
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False)
model = AutoModel.from_pretrained(GPT2_TINY)
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
dummy_optimizer = DummyOptim(params=model.parameters())
dummy_lr_scheduler = DummyScheduler(dummy_optimizer)
kwargs["train_batch_size"] = (
kwargs["train_micro_batch_size_per_gpu"]
* deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]
* accelerator.num_processes
)
accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(
"You cannot specify an optimizer in the config file and in the code at the same time"
in str(cm.exception)
)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(
"You cannot specify a scheduler in the config file and in the code at the same time"
in str(cm.exception)
)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(
"You cannot specify a scheduler in the config file and in the code at the same time"
in str(cm.exception)
)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(type(model) == DeepSpeedEngine)
self.assertTrue(type(optimizer) == DeepSpeedOptimizerWrapper)
self.assertTrue(type(lr_scheduler) == DeepSpeedSchedulerWrapper)
self.assertTrue(type(accelerator.deepspeed_engine_wrapped) == DeepSpeedEngineWrapper)
elif optim_type == CUSTOM_OPTIMIZER and scheduler_type == DS_SCHEDULER:
# Test custom optimizer + DeepSpeed scheduler
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16")
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False)
model = AutoModel.from_pretrained(GPT2_TINY)
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
dummy_optimizer = DummyOptim(params=model.parameters())
dummy_lr_scheduler = DummyScheduler(dummy_optimizer)
kwargs["train_batch_size"] = (
kwargs["train_micro_batch_size_per_gpu"]
* deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]
* accelerator.num_processes
)
accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)
del accelerator.state.deepspeed_plugin.deepspeed_config["optimizer"]
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(type(model) == DeepSpeedEngine)
self.assertTrue(type(optimizer) == DeepSpeedOptimizerWrapper)
self.assertTrue(type(lr_scheduler) == DeepSpeedSchedulerWrapper)
self.assertTrue(type(accelerator.deepspeed_engine_wrapped) == DeepSpeedEngineWrapper)
elif optim_type == DS_OPTIMIZER and scheduler_type == CUSTOM_SCHEDULER:
# Test deepspeed optimizer + custom scheduler
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16")
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False)
model = AutoModel.from_pretrained(GPT2_TINY)
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
dummy_optimizer = DummyOptim(params=model.parameters())
dummy_lr_scheduler = DummyScheduler(dummy_optimizer)
kwargs["train_batch_size"] = (
kwargs["train_micro_batch_size_per_gpu"]
* deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]
* accelerator.num_processes
)
accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)
del accelerator.state.deepspeed_plugin.deepspeed_config["scheduler"]
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(
"You can only specify `accelerate.utils.DummyScheduler` in the code when using `accelerate.utils.DummyOptim`."
in str(cm.exception)
)
def test_save_checkpoints(self):
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=self.ds_config_file[ZERO3],
zero3_init_flag=True,
)
del deepspeed_plugin.deepspeed_config["bf16"]
kwargs = {
"optimizer.params.lr": 5e-5,
"optimizer.params.weight_decay": 0.0,
"scheduler.params.warmup_min_lr": 0.0,
"scheduler.params.warmup_max_lr": 5e-5,
"scheduler.params.warmup_num_steps": 0,
"train_micro_batch_size_per_gpu": 16,
"gradient_clipping": 1.0,
"train_batch_size": 16,
"zero_optimization.reduce_bucket_size": 5e5,
"zero_optimization.stage3_prefetch_bucket_size": 5e5,
"zero_optimization.stage3_param_persistence_threshold": 5e5,
"zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
}
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16")
kwargs["train_batch_size"] = (
kwargs["train_micro_batch_size_per_gpu"]
* deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]
* accelerator.num_processes
)
accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)
model = AutoModelForCausalLM.from_pretrained("gpt2")
dummy_optimizer = DummyOptim(params=model.parameters())
dummy_lr_scheduler = DummyScheduler(dummy_optimizer)
model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
with self.assertRaises(ValueError) as cm:
accelerator.get_state_dict(model)
msg = (
"Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. "
"To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or "
"set `zero3_save_16bit_model` to True when using `accelerate config`. "
"To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights."
)
self.assertTrue(msg in str(cm.exception))
def test_autofill_dsconfig(self):
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=self.ds_config_file[ZERO3],
zero3_init_flag=True,
)
del deepspeed_plugin.deepspeed_config["bf16"]
del deepspeed_plugin.deepspeed_config["fp16"]
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)
model = AutoModelForCausalLM.from_pretrained("gpt2")
dummy_optimizer = DummyOptim(params=model.parameters(), lr=5e-5, weight_decay=1e-4)
dummy_lr_scheduler = DummyScheduler(dummy_optimizer, warmup_num_steps=10, total_num_steps=1000)
hidden_size = model.config.hidden_size
model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertEqual(accelerator.deepspeed_config["train_micro_batch_size_per_gpu"], 16)
self.assertEqual(accelerator.deepspeed_config["train_batch_size"], 16)
self.assertEqual(accelerator.deepspeed_config["optimizer"]["params"]["lr"], 5e-5)
self.assertEqual(accelerator.deepspeed_config["optimizer"]["params"]["weight_decay"], 1e-4)
self.assertEqual(accelerator.deepspeed_config["scheduler"]["params"]["warmup_min_lr"], 0.0)
self.assertEqual(accelerator.deepspeed_config["scheduler"]["params"]["warmup_max_lr"], 5e-5)
self.assertEqual(accelerator.deepspeed_config["scheduler"]["params"]["warmup_num_steps"], 10)
self.assertEqual(accelerator.deepspeed_config["gradient_clipping"], 1.0)
self.assertEqual(
accelerator.deepspeed_config["zero_optimization"]["reduce_bucket_size"], hidden_size * hidden_size
)
self.assertEqual(
accelerator.deepspeed_config["zero_optimization"]["stage3_prefetch_bucket_size"],
0.9 * hidden_size * hidden_size,
)
self.assertEqual(
accelerator.deepspeed_config["zero_optimization"]["stage3_param_persistence_threshold"],
10 * hidden_size,
)
self.assertFalse(
accelerator.deepspeed_config["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"]
)
@parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)
def test_autofill_dsconfig_from_ds_plugin(self, dtype):
ds_config = self.ds_config_dict["zero3"]
if dtype == BF16:
del ds_config["fp16"]
else:
del ds_config["bf16"]
ds_config[dtype]["enabled"] = "auto"
ds_config["zero_optimization"]["stage"] = "auto"
ds_config["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = "auto"
ds_config["zero_optimization"]["offload_optimizer"]["device"] = "auto"
ds_config["zero_optimization"]["offload_param"]["device"] = "auto"
ds_config["gradient_accumulation_steps"] = "auto"
ds_config["gradient_clipping"] = "auto"
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=ds_config,
zero3_init_flag=True,
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype)
deepspeed_plugin = accelerator.state.deepspeed_plugin
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_clipping"], 1.0)
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 1)
self.assertEqual(deepspeed_plugin.deepspeed_config["zero_optimization"]["stage"], 2)
self.assertEqual(
deepspeed_plugin.deepspeed_config["zero_optimization"]["offload_optimizer"]["device"], "cpu"
)
self.assertEqual(deepspeed_plugin.deepspeed_config["zero_optimization"]["offload_param"]["device"], "cpu")
self.assertTrue(
deepspeed_plugin.deepspeed_config["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"]
)
self.assertTrue(deepspeed_plugin.deepspeed_config[dtype]["enabled"])
AcceleratorState._reset_state(True)
diff_dtype = "bf16" if dtype == "fp16" else "fp16"
with mockenv_context(**self.dist_env):
with self.assertRaises(ValueError) as cm:
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=diff_dtype)
self.assertTrue(
f"`--mixed_precision` arg cannot be set to `{diff_dtype}` when `{dtype}` is set in the DeepSpeed config file."
in str(cm.exception)
)
def test_ds_config_assertions(self):
ambiguous_env = self.dist_env.copy()
ambiguous_env[
"ACCELERATE_CONFIG_DS_FIELDS"
] = "gradient_accumulation_steps,gradient_clipping,zero_stage,offload_optimizer_device,offload_param_device,zero3_save_16bit_model,mixed_precision"
with mockenv_context(**ambiguous_env):
with self.assertRaises(ValueError) as cm:
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=self.ds_config_file[ZERO3],
zero3_init_flag=True,
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=ZERO2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
)
_ = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=FP16)
self.assertTrue(
"If you are using an accelerate config file, remove others config variables mentioned in the above specified list."
in str(cm.exception)
)
@parameterized.expand(stages, name_func=parameterized_custom_name_func)
def test_ds_config(self, stage):
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=self.ds_config_file[stage],
zero3_init_flag=True,
)
self.assertEqual(deepspeed_plugin.zero_stage, int(stage.replace("zero", "")))
def test_basic_run(self):
mod_file = inspect.getfile(accelerate.test_utils)
test_file_path = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_performance.py"]
)
with tempfile.TemporaryDirectory() as dirpath:
cmd = [
"accelerate",
"launch",
"--num_processes=1",
"--num_machines=1",
"--machine_rank=0",
"--mixed_precision=fp16",
"--use_deepspeed",
"--gradient_accumulation_steps=1",
"--zero_stage=2",
"--offload_optimizer_device=none",
"--offload_param_device=none",
test_file_path,
"--model_name_or_path=distilbert-base-uncased",
"--num_epochs=1",
f"--output_dir={dirpath}",
]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd, env=os.environ.copy())
@require_deepspeed
@require_multi_gpu
@slow
class DeepSpeedIntegrationTest(TempDirTestCase):
def setUp(self):
super().setUp()
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self.test_file_dir_str = str(path.parents[0])
self.ds_config_file = dict(
zero2=f"{self.test_file_dir_str}/ds_config_zero2.json",
zero3=f"{self.test_file_dir_str}/ds_config_zero3.json",
)
self.stages = [1, 2, 3]
self.zero3_offload_config = False
self.performance_lower_bound = 0.82
self.peak_memory_usage_upper_bound = {
"multi_gpu_fp16": 3200,
"deepspeed_stage_1_fp16": 1600,
"deepspeed_stage_2_fp16": 2500,
"deepspeed_stage_3_zero_init_fp16": 2800,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "deepspeed_stage_3_cpu_offload_fp16": 1900,
}
self.n_train = 160
self.n_val = 160
mod_file = inspect.getfile(accelerate.test_utils)
self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"])
def test_performance(self):
self.test_file_path = os.path.join(self.test_scripts_folder, "test_performance.py")
cmd = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--mixed_precision=fp16",
"--use_deepspeed",
"--gradient_accumulation_steps=1",
"--gradient_clipping=1",
"--zero3_init_flag=True",
"--zero3_save_16bit_model=True",
]
for stage in self.stages:
if stage == 1:
continue
cmd_stage = cmd.copy()
cmd_stage.extend([f"--zero_stage={stage}"])
cmd_stage.extend(["--offload_optimizer_device=none", "--offload_param_device=none"])
if self.zero3_offload_config:
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
ds_config = json.load(f)
del ds_config["bf16"]
del ds_config["optimizer"]["params"]["torch_adam"]
del ds_config["optimizer"]["params"]["adam_w_mode"]
ds_config["fp16"]["enabled"] = True
ds_config_path = os.path.join(self.tmpdir, "ds_config.json")
with open(ds_config_path, "w") as out_file:
json.dump(ds_config, out_file)
cmd_stage.extend([f"--deepspeed_config_file={ds_config_path}"])
cmd_stage.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--performance_lower_bound={self.performance_lower_bound}",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_stage, env=os.environ.copy())
def test_checkpointing(self):
self.test_file_path = os.path.join(self.test_scripts_folder, "test_checkpointing.py")
cmd = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--mixed_precision=fp16",
"--use_deepspeed",
"--gradient_accumulation_steps=1",
"--gradient_clipping=1",
"--zero3_init_flag=True",
"--zero3_save_16bit_model=True",
]
for stage in self.stages:
if stage == 1:
continue
cmd_stage = cmd.copy()
cmd_stage.extend([f"--zero_stage={stage}"])
cmd_stage.extend(["--offload_optimizer_device=none", "--offload_param_device=none"])
if self.zero3_offload_config:
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
ds_config = json.load(f)
del ds_config["bf16"]
del ds_config["optimizer"]["params"]["torch_adam"]
del ds_config["optimizer"]["params"]["adam_w_mode"]
ds_config["fp16"]["enabled"] = True
ds_config_path = os.path.join(self.tmpdir, "ds_config.json")
with open(ds_config_path, "w") as out_file:
json.dump(ds_config, out_file)
cmd_stage.extend([f"--deepspeed_config_file={ds_config_path}"])
cmd_stage.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
"--partial_train_epoch=1",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_stage, env=os.environ.copy())
cmd_stage = cmd_stage[:-1]
resume_from_checkpoint = os.path.join(self.tmpdir, "epoch_0")
cmd_stage.extend(
[
f"--resume_from_checkpoint={resume_from_checkpoint}",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_stage, env=os.environ.copy())
def test_peak_memory_usage(self):
self.test_file_path = os.path.join(self.test_scripts_folder, "test_peak_memory_usage.py")
cmd = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
cmd_stage = cmd.copy()
if "fp16" in spec:
cmd_stage.extend(["--mixed_precision=fp16"])
if "multi_gpu" in spec:
continue
else:
cmd_stage.extend(
[
"--use_deepspeed",
"--gradient_accumulation_steps=1",
"--gradient_clipping=1",
"--zero3_init_flag=True",
"--zero3_save_16bit_model=True",
]
)
for i in range(3):
if f"stage_{i+1}" in spec:
cmd_stage.extend([f"--zero_stage={i+1}"])
break
cmd_stage.extend(["--offload_optimizer_device=none", "--offload_param_device=none"])
if "cpu_offload" in spec:
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
ds_config = json.load(f)
del ds_config["bf16"]
del ds_config["fp16"]
del ds_config["optimizer"]["params"]["torch_adam"]
del ds_config["optimizer"]["params"]["adam_w_mode"]
ds_config_path = os.path.join(self.tmpdir, "ds_config.json")
with open(ds_config_path, "w") as out_file:
json.dump(ds_config, out_file)
cmd_stage.extend([f"--deepspeed_config_file={ds_config_path}"])
cmd_stage.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--peak_memory_upper_bound={peak_mem_upper_bound}",
f"--n_train={self.n_train}",
f"--n_val={self.n_val}",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_stage, env=os.environ.copy())
| accelerate-wip-main | tests/deepspeed/test_deepspeed.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
BERT_BASE_CASED = "bert-base-cased"
FP16 = "fp16"
BF16 = "bf16"
dtypes = [FP16, BF16]
@require_fsdp
@require_cuda
class FSDPPluginIntegration(AccelerateTestCase):
def setUp(self):
super().setUp()
self.dist_env = dict(
ACCELERATE_USE_FSDP="true",
MASTER_ADDR="localhost",
MASTER_PORT="10999",
RANK="0",
LOCAL_RANK="0",
WORLD_SIZE="1",
)
def test_sharding_strategy(self):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):
env = self.dist_env.copy()
env["FSDP_SHARDING_STRATEGY"] = f"{i + 1}"
env["FSDP_SHARDING_STRATEGY_NAME"] = strategy
with mockenv_context(**env):
fsdp_plugin = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy, ShardingStrategy(i + 1))
def test_backward_prefetch(self):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH):
env = self.dist_env.copy()
env["FSDP_BACKWARD_PREFETCH"] = prefetch_policy
with mockenv_context(**env):
fsdp_plugin = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch, BackwardPrefetch(i + 1))
def test_state_dict_type(self):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):
env = self.dist_env.copy()
env["FSDP_STATE_DICT_TYPE"] = state_dict_type
with mockenv_context(**env):
fsdp_plugin = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type, StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.rank0_only)
def test_auto_wrap_policy(self):
model = AutoModel.from_pretrained(BERT_BASE_CASED)
for policy in FSDP_AUTO_WRAP_POLICY:
env = self.dist_env.copy()
env["FSDP_AUTO_WRAP_POLICY"] = policy
if policy == "TRANSFORMER_BASED_WRAP":
env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
env["FSDP_MIN_NUM_PARAMS"] = "2000"
with mockenv_context(**env):
fsdp_plugin = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(model)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
env = self.dist_env.copy()
env["FSDP_AUTO_WRAP_POLICY"] = "TRANSFORMER_BASED_WRAP"
env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = "T5Layer"
with mockenv_context(**env):
fsdp_plugin = FullyShardedDataParallelPlugin()
with self.assertRaises(Exception) as cm:
fsdp_plugin.set_auto_wrap_policy(model)
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception))
env = self.dist_env.copy()
env["FSDP_AUTO_WRAP_POLICY"] = "SIZE_BASED_WRAP"
env["FSDP_MIN_NUM_PARAMS"] = "0"
with mockenv_context(**env):
fsdp_plugin = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(model)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def test_mixed_precision(self):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
env = self.dist_env.copy()
env["ACCELERATE_MIXED_PRECISION"] = mp_dtype
with mockenv_context(**env):
accelerator = Accelerator()
if mp_dtype == "fp16":
dtype = torch.float16
elif mp_dtype == "bf16":
dtype = torch.bfloat16
mp_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy, mp_policy)
if mp_dtype == FP16:
self.assertTrue(isinstance(accelerator.scaler, ShardedGradScaler))
elif mp_dtype == BF16:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(True)
def test_cpu_offload(self):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
env = self.dist_env.copy()
env["FSDP_OFFLOAD_PARAMS"] = str(flag).lower()
with mockenv_context(**env):
fsdp_plugin = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload, CPUOffload(offload_params=flag))
@require_fsdp
@require_multi_gpu
@slow
class FSDPIntegrationTest(TempDirTestCase):
def setUp(self):
super().setUp()
self.performance_lower_bound = 0.82
self.performance_configs = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
self.peak_memory_usage_upper_bound = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
self.n_train = 160
self.n_val = 160
mod_file = inspect.getfile(accelerate.test_utils)
self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"])
def test_performance(self):
self.test_file_path = os.path.join(self.test_scripts_folder, "test_performance.py")
cmd = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
cmd_config = cmd.copy()
for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):
if strategy.lower() in config:
cmd_config.append(f"--fsdp_sharding_strategy={i+1}")
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no")
else:
cmd_config.append("--mixed_precision=fp16")
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"--fsdp_auto_wrap_policy={policy}")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--performance_lower_bound={self.performance_lower_bound}",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_config, env=os.environ.copy())
def test_checkpointing(self):
self.test_file_path = os.path.join(self.test_scripts_folder, "test_checkpointing.py")
cmd = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):
cmd_config = cmd.copy()
cmd_config.append(f"--fsdp_sharding_strategy={i+1}")
if strategy != "FULL_SHARD":
continue
state_dict_config_index = len(cmd_config)
for state_dict_type in FSDP_STATE_DICT_TYPE:
cmd_config = cmd_config[:state_dict_config_index]
if state_dict_type == "SHARDED_STATE_DICT":
continue
cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}")
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
"--partial_train_epoch=1",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_config, env=os.environ.copy())
cmd_config = cmd_config[:-1]
resume_from_checkpoint = os.path.join(self.tmpdir, "epoch_0")
cmd_config.extend(
[
f"--resume_from_checkpoint={resume_from_checkpoint}",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_config, env=os.environ.copy())
def test_peak_memory_usage(self):
self.test_file_path = os.path.join(self.test_scripts_folder, "test_peak_memory_usage.py")
cmd = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
cmd_config = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"])
else:
cmd_config.extend(["--mixed_precision=no"])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"])
for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):
if strategy.lower() in spec:
cmd_config.append(f"--fsdp_sharding_strategy={i+1}")
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"--fsdp_auto_wrap_policy={policy}")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--peak_memory_upper_bound={peak_mem_upper_bound}",
f"--n_train={self.n_train}",
f"--n_val={self.n_val}",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_config, env=os.environ.copy())
| accelerate-wip-main | tests/fsdp/test_fsdp.py |
# Copyright 2022 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to close stale issue. Taken in part from the AllenNLP repository.
https://github.com/allenai/allennlp.
"""
import os
from datetime import datetime as dt
from github import Github
LABELS_TO_EXEMPT = [
"good first issue",
"feature request",
"wip",
]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/accelerate")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True)
last_comment = comments[0] if len(comments) > 0 else None
current_time = dt.utcnow()
days_since_updated = (current_time - issue.updated_at).days
days_since_creation = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed")
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored."
)
if __name__ == "__main__":
main()
| accelerate-wip-main | utils/stale.py |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
hf_table_format = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
failed = []
group_info = []
no_error_payload = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
payload = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"emoji": True,
},
}
]
total_num_failed = 0
for log in Path().glob("*.log"):
section_num_failed = 0
with open(log, "r") as f:
for line in f:
line = json.loads(line)
if line.get("nodeid", "") != "":
test = line["nodeid"]
if line.get("duration", None) is not None:
duration = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
failed = []
log.unlink()
message = ""
all_files2failed = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
failed_table = []
files2failed = {}
for test in failed_tests:
data = test[0].split("::")
data[0] = data[0].split("/")[-1]
if data[0] not in files2failed:
files2failed[data[0]] = [data[1:]]
else:
files2failed[data[0]] += [data[1:]]
failed_table.append(data)
files = [test[0] for test in failed_table]
individual_files = list(set(files))
# Count number of instances in failed_tests
table = []
for file in individual_files:
table.append([file, len(files2failed[file])])
failed_table = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_files2failed.append(files2failed)
if len(message) > 3000:
err = "Too many failed tests, please see the full report in the Action results."
offset = len(err) + 10
message = message[: 3000 - offset] + f"\n...\n```\n{err}"
print(f"### {message}")
else:
message = "No failed tests! 🤗"
print(f"## {message}")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
client = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
md_report = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
action_button = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
date_report = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
response = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
ts = response.data["ts"]
for failed_file in all_files2failed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
test_class = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
test_class = row[0]
else:
test_failures[i][0] = ""
payload = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| accelerate-wip-main | utils/log_reports.py |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"],
shuffle=False,
collate_fn=collate_fn,
batch_size=EVAL_BATCH_SIZE,
drop_last=(accelerator.mixed_precision == "fp8"),
)
return train_dataloader, eval_dataloader
def training_function(config, args):
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/nlp_example.py |
import argparse
import runhouse as rh
import torch
from nlp_example import training_function
from accelerate.utils import PrepareForLaunch, patch_environment
def launch_train(*args):
num_processes = torch.cuda.device_count()
print(f"Device count: {num_processes}")
with patch_environment(
world_size=num_processes, master_addr="127.0.01", master_port="29500", mixed_precision=args[1].mixed_precision
):
launcher = PrepareForLaunch(training_function, distributed_type="MULTI_GPU")
torch.multiprocessing.start_processes(launcher, args=args, nprocs=num_processes, start_method="spawn")
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/main/rh_primitives/cluster.html#hardware-setup
# for cloud access setup instructions (if using on-demand hardware), and for API specifications.
# on-demand GPU
# gpu = rh.cluster(name='rh-cluster', instance_type='V100:1', provider='cheapest', use_spot=False) # single GPU
gpu = rh.cluster(name="rh-cluster", instance_type="V100:4", provider="cheapest", use_spot=False) # multi GPU
gpu.up_if_not()
# on-prem GPU
# gpu = rh.cluster(
# ips=["ip_addr"], ssh_creds={ssh_user:"<username>", ssh_private_key:"<key_path>"}, name="rh-cluster"
# )
# Set up remote function
reqs = [
"pip:./",
"transformers",
"datasets",
"evaluate",
"tqdm",
"scipy",
"scikit-learn",
"tensorboard",
"torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117",
]
launch_train_gpu = rh.function(fn=launch_train, system=gpu, reqs=reqs, name="train_bert_glue")
# Define train args/config, run train function
train_args = argparse.Namespace(cpu=False, mixed_precision="fp16")
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
launch_train_gpu(config, train_args, stream_logs=True)
# Alternatively, we can just run as instructed in the README (but only because there's already a wrapper CLI):
# gpu.install_packages(reqs)
# gpu.run(['accelerate launch --multi_gpu accelerate/examples/nlp_example.py'])
| accelerate-wip-main | examples/multigpu_remote_launcher.py |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a ResNet50 on the Oxford-IIT Pet Dataset
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
# Function to get the label from the filename
def extract_label(fname):
stem = fname.split(os.path.sep)[-1]
return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0]
class PetsDataset(Dataset):
def __init__(self, file_names, image_transform=None, label_to_id=None):
self.file_names = file_names
self.image_transform = image_transform
self.label_to_id = label_to_id
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
fname = self.file_names[idx]
raw_image = PIL.Image.open(fname)
image = raw_image.convert("RGB")
if self.image_transform is not None:
image = self.image_transform(image)
label = extract_label(fname)
if self.label_to_id is not None:
label = self.label_to_id[label]
return {"image": image, "label": label}
def training_function(config, args):
# Initialize accelerator
if args.with_tracking:
accelerator = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", logging_dir=args.logging_dir
)
else:
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
image_size = config["image_size"]
if not isinstance(image_size, (list, tuple)):
image_size = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit"):
if args.checkpointing_steps == "epoch":
checkpointing_steps = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
checkpointing_steps = int(args.checkpointing_steps)
else:
raise ValueError(
f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed."
)
else:
checkpointing_steps = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
run = os.path.split(__file__)[-1].split(".")[0]
accelerator.init_trackers(run, config)
# Grab all the image filenames
file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")]
# Build the label correspondences
all_labels = [extract_label(fname) for fname in file_names]
id_to_label = list(set(all_labels))
id_to_label.sort()
label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)}
# Set the seed before splitting the data.
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Split our filenames between train and validation
random_perm = np.random.permutation(len(file_names))
cut = int(0.8 * len(file_names))
train_split = random_perm[:cut]
eval_split = random_perm[cut:]
# For training we use a simple RandomResizedCrop
train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()])
train_dataset = PetsDataset(
[file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id
)
# For evaluation, we use a deterministic Resize
eval_tfm = Compose([Resize(image_size), ToTensor()])
eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id)
# Instantiate dataloaders.
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)
eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id))
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Freezing the base model
for param in model.parameters():
param.requires_grad = False
for param in model.get_classifier().parameters():
param.requires_grad = True
# We normalize the batches of images to be a bit faster.
mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device)
std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device)
# Instantiate optimizer
optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25)
# Instantiate learning rate scheduler
lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader))
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to keep track of how many total steps we have iterated over
overall_step = 0
# We also need to keep track of the starting epoch so files are named properly
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
else:
resume_step = int(training_difference.replace("step_", ""))
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
# Now we train the model
for epoch in range(starting_epoch, num_epochs):
model.train()
if args.with_tracking:
total_loss = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
active_dataloader = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
inputs = (batch["image"] - mean) / std
outputs = model(inputs)
loss = torch.nn.functional.cross_entropy(outputs, batch["label"])
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(checkpointing_steps, int):
output_dir = f"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
model.eval()
accurate = 0
num_elems = 0
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
inputs = (batch["image"] - mean) / std
with torch.no_grad():
outputs = model(inputs)
predictions = outputs.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["label"]))
accurate_preds = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
eval_metric = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}")
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(train_dataloader),
"epoch": epoch,
},
step=overall_step,
)
if checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if args.with_tracking:
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument("--data_dir", required=True, help="The data folder on disk.")
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to load in all available experiment trackers from the environment and use them for logging.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help="Location on where to store experiment tracking logs`",
)
args = parser.parse_args()
config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/complete_cv_example.py |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# This example also demonstrates the checkpointing and sharding capabilities
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def training_function(config, args):
# Initialize accelerator
if args.with_tracking:
accelerator = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", logging_dir=args.logging_dir
)
else:
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
if hasattr(args.checkpointing_steps, "isdigit"):
if args.checkpointing_steps == "epoch":
checkpointing_steps = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
checkpointing_steps = int(args.checkpointing_steps)
else:
raise ValueError(
f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed."
)
else:
checkpointing_steps = None
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
run = os.path.split(__file__)[-1].split(".")[0]
accelerator.init_trackers(run, config)
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
metric = evaluate.load("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
set_seed(seed)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to keep track of how many total steps we have iterated over
overall_step = 0
# We also need to keep track of the stating epoch so files are named properly
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
else:
resume_step = int(training_difference.replace("step_", ""))
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
# Now we train the model
for epoch in range(starting_epoch, num_epochs):
model.train()
if args.with_tracking:
total_loss = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
active_dataloader = train_dataloader
for step, batch in enumerate(active_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(checkpointing_steps, int):
output_dir = f"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(train_dataloader),
"epoch": epoch,
},
step=epoch,
)
if checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if args.with_tracking:
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to load in all available experiment trackers from the environment and use them for logging.",
)
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help="Location on where to store experiment tracking logs`",
)
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/complete_nlp_example.py |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a ResNet50 on the Oxford-IIT Pet Dataset
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
# Function to get the label from the filename
def extract_label(fname):
stem = fname.split(os.path.sep)[-1]
return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0]
class PetsDataset(Dataset):
def __init__(self, file_names, image_transform=None, label_to_id=None):
self.file_names = file_names
self.image_transform = image_transform
self.label_to_id = label_to_id
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
fname = self.file_names[idx]
raw_image = PIL.Image.open(fname)
image = raw_image.convert("RGB")
if self.image_transform is not None:
image = self.image_transform(image)
label = extract_label(fname)
if self.label_to_id is not None:
label = self.label_to_id[label]
return {"image": image, "label": label}
def training_function(config, args):
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
image_size = config["image_size"]
if not isinstance(image_size, (list, tuple)):
image_size = (image_size, image_size)
# Grab all the image filenames
file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")]
# Build the label correspondences
all_labels = [extract_label(fname) for fname in file_names]
id_to_label = list(set(all_labels))
id_to_label.sort()
label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)}
# Set the seed before splitting the data.
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Split our filenames between train and validation
random_perm = np.random.permutation(len(file_names))
cut = int(0.8 * len(file_names))
train_split = random_perm[:cut]
eval_split = random_perm[cut:]
# For training we use a simple RandomResizedCrop
train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()])
train_dataset = PetsDataset(
[file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id
)
# For evaluation, we use a deterministic Resize
eval_tfm = Compose([Resize(image_size), ToTensor()])
eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id)
# Instantiate dataloaders.
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)
eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id))
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Freezing the base model
for param in model.parameters():
param.requires_grad = False
for param in model.get_classifier().parameters():
param.requires_grad = True
# We normalize the batches of images to be a bit faster.
mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device)
std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device)
# Instantiate optimizer
optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25)
# Instantiate learning rate scheduler
lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader))
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
inputs = (batch["image"] - mean) / std
outputs = model(inputs)
loss = torch.nn.functional.cross_entropy(outputs, batch["label"])
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
accurate = 0
num_elems = 0
for _, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
inputs = (batch["image"] - mean) / std
with torch.no_grad():
outputs = model(inputs)
predictions = outputs.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["label"]))
accurate_preds = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
eval_metric = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}")
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument("--data_dir", required=True, help="The data folder on disk.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/cv_example.py |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
get_dataloaders = mocked_dataloaders # noqa: F811
def training_function(config, args):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
config["num_epochs"] = 2
# New Code #
gradient_accumulation_steps = int(args.gradient_accumulation_steps)
# Initialize accelerator
accelerator = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps
)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`"
)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs),
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(model):
output = model(**batch)
loss = output.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
# New Code #
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="The number of minibatches to be ran before gradients are accumulated.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/by_feature/gradient_accumulation.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
get_dataloaders = mocked_dataloaders # noqa: F811
def training_function(config, args):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
config["num_epochs"] = 2
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=batch_size)
def inner_training_loop(batch_size):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(seed)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs),
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/by_feature/memory.py |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
get_dataloaders = mocked_dataloaders # noqa: F811
def training_function(config, args):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
config["num_epochs"] = 2
# New Code #
gradient_accumulation_steps = int(args.gradient_accumulation_steps)
local_sgd_steps = int(args.local_sgd_steps)
# Initialize accelerator
accelerator = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps
)
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs),
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
with LocalSGD(
accelerator=accelerator, model=model, local_sgd_steps=local_sgd_steps, enabled=local_sgd_steps is not None
) as local_sgd:
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(model):
output = model(**batch)
loss = output.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
# New Code #
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="The number of minibatches to be ran before gradients are accumulated.",
)
parser.add_argument(
"--local_sgd_steps", type=int, default=8, help="Number of local SGD steps or None to disable local SGD"
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/by_feature/local_sgd.py |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to combine both the gradient accumulation
# and automatic batch size finder utilities of Accelerate to perfrom
# automatic gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
get_dataloaders = mocked_dataloaders # noqa: F811
def training_function(config, args):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
config["num_epochs"] = 2
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
observed_batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
# New Code #
# We use the `find_executable_batch_size` decorator, passing in the desired observed batch size
# to train on. If a CUDA OOM error occurs, it will retry this loop cutting the batch size in
# half each time. From this, we can calculate the number of gradient accumulation steps needed
# and modify the Accelerator object as a result
@find_executable_batch_size(starting_batch_size=int(observed_batch_size))
def inner_training_loop(batch_size):
# Since we need to modify the outside accelerator object, we need to bring it
# to the local scope
nonlocal accelerator
# We can calculate the number of gradient accumulation steps based on the current
# batch size vs the starting batch size
num_gradient_accumulation_steps = observed_batch_size // batch_size
# And then set it in the Accelerator directly:
accelerator.gradient_accumulation_steps = num_gradient_accumulation_steps
# Next we need to free all of the stored model references in the Accelerator each time
accelerator.free_memory()
# And set the seed so our results are reproducable each reset
set_seed(seed)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs),
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# And perform gradient accumulation
with accelerator.accumulate(model):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
# New Code #
# We modify the starting batch size to be an observed batch size of 256, to guarentee an initial CUDA OOM
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 256}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/by_feature/automatic_gradient_accumulation.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)
on a text file or a dataset without using HuggingFace Trainer.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=text-generation
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import argparse
import json
import logging
import math
import os
import random
from itertools import chain
from pathlib import Path
import datasets
import torch
import transformers
from datasets import load_dataset
from huggingface_hub import Repository
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
SchedulerType,
default_data_collator,
get_scheduler,
)
from transformers.utils import get_full_repo_name
from transformers.utils.versions import require_version
from accelerate import Accelerator, DistributedType
from accelerate.logging import get_logger
from accelerate.utils import DummyOptim, DummyScheduler, set_seed
logger = get_logger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--validation_split_percentage",
default=5,
help="The percentage of the train set used as validation set in case there's no validation split",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--model_type",
type=str,
default=None,
help="Model type to use if training from scratch.",
choices=MODEL_TYPES,
)
parser.add_argument(
"--block_size",
type=int,
default=None,
help=(
"Optional input sequence length after tokenization. The training dataset will be truncated in block of"
" this size for training. Default to the model max input length for single sentence inputs (take into"
" account special tokens)."
),
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
# New Code #
# Whether to load the best model at the end of training
parser.add_argument(
"--load_best_model",
action="store_true",
help="Whether to load the best model at the end of training",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
# New Code #
def checkpoint_model(checkpoint_folder, ckpt_id, model, epoch, last_global_step, **kwargs):
"""Utility function for checkpointing model + optimizer dictionaries
The main purpose for this is to be able to resume training from that instant again
"""
checkpoint_state_dict = {
"epoch": epoch,
"last_global_step": last_global_step,
}
# Add extra kwargs too
checkpoint_state_dict.update(kwargs)
success = model.save_checkpoint(checkpoint_folder, ckpt_id, checkpoint_state_dict)
status_msg = f"checkpointing: checkpoint_folder={checkpoint_folder}, ckpt_id={ckpt_id}"
if success:
logging.info(f"Success {status_msg}")
else:
logging.warning(f"Failure {status_msg}")
return
# New Code #
def load_training_checkpoint(model, load_dir, tag=None, **kwargs):
"""Utility function for checkpointing model + optimizer dictionaries
The main purpose for this is to be able to resume training from that instant again
"""
_, checkpoint_state_dict = model.load_checkpoint(load_dir, tag=tag, **kwargs)
epoch = checkpoint_state_dict["epoch"]
last_global_step = checkpoint_state_dict["last_global_step"]
del checkpoint_state_dict
return (epoch, last_global_step)
# New Code #
def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))
losses = torch.cat(losses)
try:
eval_loss = torch.mean(losses)
perplexity = math.exp(eval_loss)
except OverflowError:
perplexity = float("inf")
return perplexity, eval_loss
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment
accelerator = (
Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator()
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[:{args.validation_split_percentage}%]",
)
raw_datasets["train"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[{args.validation_split_percentage}%:]",
)
else:
data_files = {}
dataset_args = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{args.validation_split_percentage}%]",
**dataset_args,
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{args.validation_split_percentage}%:]",
**dataset_args,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
with accelerator.main_process_first():
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with accelerator.main_process_first():
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=args.preprocessing_num_workers,
load_from_cache_file=not args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
train_dataset = lm_datasets["train"]
eval_dataset = lm_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size
)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
# New Code #
# Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer
optimizer_cls = (
torch.optim.AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
if accelerator.distributed_type == DistributedType.TPU:
model.tie_weights()
# Scheduler and math around the number of training steps.
# New Code
# Get gradient accumulation steps from deepspeed config if available
if accelerator.state.deepspeed_plugin is not None:
args.gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# New Code #
# Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
else:
lr_scheduler = DummyScheduler(
optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Figure out how many steps we should save the Accelerator states
if hasattr(args.checkpointing_steps, "isdigit"):
checkpointing_steps = args.checkpointing_steps
if args.checkpointing_steps.isdigit():
checkpointing_steps = int(args.checkpointing_steps)
else:
checkpointing_steps = None
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if args.with_tracking:
experiment_config = vars(args)
# TensorBoard cannot log Enums, need the raw value
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
accelerator.init_trackers("clm_no_trainer", experiment_config)
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
starting_epoch = 0
best_metric = None
best_metric_checkpoint = None
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
# New Code #
# Loads the DeepSpeed checkpoint from the specified path
_, last_global_step = load_training_checkpoint(
model,
args.resume_from_checkpoint,
**{"load_optimizer_states": True, "load_lr_scheduler_states": True},
)
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
resume_step = last_global_step
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
for step, batch in enumerate(train_dataloader):
# We need to skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == starting_epoch:
if resume_step is not None and step < resume_step:
completed_steps += 1
continue
outputs = model(**batch)
loss = outputs.loss
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if (step + 1) % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
output_dir = f"step_{completed_steps }"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= args.max_train_steps:
break
perplexity, eval_loss = evaluate(args, model, eval_dataloader, accelerator, eval_dataset)
logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}")
if args.with_tracking:
accelerator.log(
{
"perplexity": perplexity,
"eval_loss": eval_loss,
"train_loss": total_loss.item() / len(train_dataloader),
"epoch": epoch,
"step": completed_steps,
},
step=completed_steps,
)
# New Code #
# Save the DeepSpeed checkpoint to the specified path
checkpoint_model(args.output_dir, epoch, model, epoch, completed_steps)
# New Code #
# Tracks the best checkpoint and best metric
if best_metric is None or best_metric > perplexity:
best_metric = perplexity
best_metric_checkpoint = os.path.join(args.output_dir, str(epoch))
accelerator.print(f"New best metric: {best_metric} at epoch {epoch}")
accelerator.print(f"best_metric_checkpoint: {best_metric_checkpoint}")
# New Code #
# Loads the best checkpoint after the training is finished
if args.load_best_model:
_, last_global_step = load_training_checkpoint(
model,
"/".join(best_metric_checkpoint.split("/")[:-1]),
tag=best_metric_checkpoint.split("/")[-1],
**{"load_optimizer_states": True, "load_lr_scheduler_states": True},
)
# New Code #
# Evaluates using the best checkpoint
perplexity, eval_loss = evaluate(args, model, eval_dataloader, accelerator, eval_dataset)
logger.info(f"Best model metrics: perplexity: {perplexity} eval_loss: {eval_loss}")
if perplexity != best_metric:
raise AssertionError(
f"Best metric {best_metric} does not match the metric {perplexity} of the loaded best model."
)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
# New Code #
# Saves the whole/unpartitioned fp16 model when in ZeRO Stage-3 to the output directory if
# `stage3_gather_16bit_weights_on_model_save` is True in DeepSpeed Config file or
# `zero3_save_16bit_model` is True in DeepSpeed Plugin.
# For Zero Stages 1 and 2, models are saved as usual in the output directory.
# The model name saved is `pytorch_model.bin`
unwrapped_model.save_pretrained(
args.output_dir,
is_main_process=accelerator.is_main_process,
save_function=accelerator.save,
state_dict=accelerator.get_state_dict(model),
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
json.dump({"perplexity": perplexity, "eval_loss": eval_loss.item()}, f)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/by_feature/deepspeed_with_config_support.py |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
get_dataloaders = mocked_dataloaders # noqa: F811
def training_function(config, args):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
config["num_epochs"] = 2
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather((predictions, batch["labels"]))
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(eval_dataloader) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/by_feature/multi_process_metrics.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)
on a text file or a dataset without using HuggingFace Trainer.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=text-generation
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import argparse
import json
import logging
import math
import os
import random
from itertools import chain
from pathlib import Path
import datasets
import torch
import transformers
from datasets import load_dataset
from huggingface_hub import Repository
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
SchedulerType,
default_data_collator,
get_scheduler,
)
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
from transformers.utils.versions import require_version
from accelerate import Accelerator, DistributedType
from accelerate.logging import get_logger
from accelerate.utils import MegatronLMDummyScheduler, set_seed
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.23.0.dev0")
logger = get_logger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--validation_split_percentage",
default=5,
help="The percentage of the train set used as validation set in case there's no validation split",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--model_type",
type=str,
default=None,
help="Model type to use if training from scratch.",
choices=MODEL_TYPES,
)
parser.add_argument(
"--block_size",
type=int,
default=None,
help=(
"Optional input sequence length after tokenization. The training dataset will be truncated in block of"
" this size for training. Default to the model max input length for single sentence inputs (take into"
" account special tokens)."
),
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def main():
args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_clm_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs["log_with"] = args.report_to
accelerator_log_kwargs["logging_dir"] = args.output_dir
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[:{args.validation_split_percentage}%]",
)
raw_datasets["train"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[{args.validation_split_percentage}%:]",
)
else:
data_files = {}
dataset_args = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{args.validation_split_percentage}%]",
**dataset_args,
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{args.validation_split_percentage}%:]",
**dataset_args,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
with accelerator.main_process_first():
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with accelerator.main_process_first():
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=args.preprocessing_num_workers,
load_from_cache_file=not args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
train_dataset = lm_datasets["train"]
eval_dataset = lm_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size
)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "layer_norm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
# New Code
# For Megatron-LM, we need to use `MegatronLMDummyScheduler` instead of regular schedulers
if accelerator.distributed_type == DistributedType.MEGATRON_LM:
lr_scheduler = MegatronLMDummyScheduler(
optimizer=optimizer,
total_num_steps=args.max_train_steps,
warmup_num_steps=args.num_warmup_steps,
)
else:
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
if accelerator.distributed_type == DistributedType.TPU:
model.tie_weights()
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Figure out how many steps we should save the Accelerator states
checkpointing_steps = args.checkpointing_steps
if checkpointing_steps is not None and checkpointing_steps.isdigit():
checkpointing_steps = int(checkpointing_steps)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if args.with_tracking:
experiment_config = vars(args)
# TensorBoard cannot log Enums, need the raw value
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
accelerator.init_trackers("clm_no_trainer", experiment_config)
# Train!
# New Code
# For Megatron-LM, we need to get `global_batch_size` from megatron_lm_plugin
# as it handles the specifics related to data parallelism, tensor model parallelism and pipeline parallelism
if accelerator.distributed_type == DistributedType.MEGATRON_LM:
total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size
else:
total_batch_size = (
args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
else:
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
# update the progress_bar if load from checkpoint
progress_bar.update(starting_epoch * num_update_steps_per_epoch)
completed_steps = starting_epoch * num_update_steps_per_epoch
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
for step, batch in enumerate(train_dataloader):
# We need to skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == starting_epoch:
if resume_step is not None and step < resume_step:
if step % args.gradient_accumulation_steps == 0:
progress_bar.update(1)
completed_steps += 1
continue
with accelerator.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
output_dir = f"step_{completed_steps }"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= args.max_train_steps:
break
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
# New Code
# For Megatron-LM, the losses are already averaged across the data parallel group
if accelerator.distributed_type == DistributedType.MEGATRON_LM:
losses.append(loss)
else:
losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))
try:
if accelerator.distributed_type == DistributedType.MEGATRON_LM:
losses = torch.tensor(losses)
else:
losses = torch.cat(losses)
eval_loss = torch.mean(losses)
perplexity = math.exp(eval_loss)
except OverflowError:
perplexity = float("inf")
logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}")
if args.with_tracking:
accelerator.log(
{
"perplexity": perplexity,
"eval_loss": eval_loss,
"train_loss": total_loss.item() / len(train_dataloader),
"epoch": epoch,
"step": completed_steps,
},
step=completed_steps,
)
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
# this is causing some issue with Megatron-LM when using `wandb` at the end of the main function.
# Everything works fine inspite of commenting this out. (wandb finishes/closes the run without error)
# if args.with_tracking:
# accelerator.end_training()
if args.output_dir is not None:
accelerator.wait_for_everyone()
# New Code
# For Megatron-LM, we need to save the model using `accelerator.save_state`
if accelerator.distributed_type == DistributedType.MEGATRON_LM:
accelerator.save_state(args.output_dir)
else:
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
json.dump({"perplexity": perplexity}, f)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/by_feature/megatron_lm_gpt_pretraining.py |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import os
import evaluate
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
# - FSDP
#
# This example also demonstrates the checkpointing and sharding capabilities
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
# New Code #
# Converting Bytes to Megabytes
def b2mb(x):
return int(x / 2**20)
# New Code #
# This context manager is used to track the peak memory usage of the process
class TorchTracemalloc:
def __enter__(self):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
self.begin = torch.cuda.memory_allocated()
return self
def __exit__(self, *exc):
gc.collect()
torch.cuda.empty_cache()
self.end = torch.cuda.memory_allocated()
self.peak = torch.cuda.max_memory_allocated()
self.used = b2mb(self.end - self.begin)
self.peaked = b2mb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
get_dataloaders = mocked_dataloaders # noqa: F811
def training_function(config, args):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
config["num_epochs"] = 2
# Initialize accelerator
if args.with_tracking:
accelerator = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="wandb", logging_dir=args.logging_dir
)
else:
accelerator = Accelerator()
accelerator.print(accelerator.distributed_type)
if hasattr(args.checkpointing_steps, "isdigit"):
if args.checkpointing_steps == "epoch":
checkpointing_steps = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
checkpointing_steps = int(args.checkpointing_steps)
else:
raise ValueError(
f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed."
)
else:
checkpointing_steps = None
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
experiment_config = vars(args)
accelerator.init_trackers("fsdp_glue_no_trainer", experiment_config)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
datasets = load_dataset("glue", "mrpc")
metric = evaluate.load("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
set_seed(seed)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, return_dict=True)
# New Code #
# For FSDP feature, it is highly recommended and efficient to prepare the model before creating optimizer
model = accelerator.prepare(model)
accelerator.print(model)
# Instantiate optimizer
# New Code #
# For FSDP feature, at present it doesn't support multiple parameter groups,
# so we need to create a single parameter group for the whole model
optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr, weight_decay=2e-4)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=10,
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
)
# New Code #
# For FSDP feature, prepare everything except the model as we have already prepared the model
# before creating the optimizer
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
overall_step = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
num_epochs -= int(training_difference.replace("epoch_", ""))
resume_step = None
else:
resume_step = int(training_difference.replace("step_", ""))
num_epochs -= resume_step // len(train_dataloader)
# If resuming by step, we also need to know exactly how far into the DataLoader we went
resume_step = (num_epochs * len(train_dataloader)) - resume_step
# Now we train the model
for epoch in range(num_epochs):
# New Code #
# context manager to track the peak memory usage during the training epoch
with TorchTracemalloc() as tracemalloc:
model.train()
if args.with_tracking:
total_loss = 0
for step, batch in enumerate(train_dataloader):
# We need to skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == 0:
if resume_step is not None and step < resume_step:
pass
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# accelerator.print(lr_scheduler.get_lr())
overall_step += 1
if isinstance(checkpointing_steps, int):
output_dir = f"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
# New Code #
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(b2mb(tracemalloc.begin)))
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + b2mb(tracemalloc.begin)
)
)
# Logging the peak memory usage of the GPU to the tracker
if args.with_tracking:
accelerator.log(
{
"train_total_peak_memory": tracemalloc.peaked + b2mb(tracemalloc.begin),
},
step=epoch,
)
# New Code #
# context manager to track the peak memory usage during the evaluation
with TorchTracemalloc() as tracemalloc:
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(train_dataloader),
},
step=epoch,
)
if checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
# New Code #
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the eval : {}".format(b2mb(tracemalloc.begin)))
accelerator.print("Memory consumed at the end of the eval (end-begin): {}".format(tracemalloc.used))
accelerator.print("Peak Memory consumed during the eval (max-begin): {}".format(tracemalloc.peaked))
accelerator.print(
"Total Peak Memory consumed during the eval (max): {}".format(tracemalloc.peaked + b2mb(tracemalloc.begin))
)
# Logging the peak memory usage of the GPU to the tracker
if args.with_tracking:
accelerator.log(
{
"eval_total_peak_memory": tracemalloc.peaked + b2mb(tracemalloc.begin),
},
step=epoch,
)
if args.with_tracking:
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to load in all available experiment trackers from the environment and use them for logging.",
)
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help="Location on where to store experiment tracking logs`",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 1, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate-wip-main | examples/by_feature/fsdp_with_peak_mem_tracking.py |
Subsets and Splits