puccife commited on
Commit
b05a8bf
1 Parent(s): db97750

End of training

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. .gitignore +2 -0
  3. README.md +21 -0
  4. checkpoint-1000/latest +1 -0
  5. checkpoint-1000/pytorch_model/mp_rank_00_model_states.pt +3 -0
  6. checkpoint-1000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  7. checkpoint-1000/random_states_0.pkl +3 -0
  8. checkpoint-1000/scheduler.bin +3 -0
  9. checkpoint-1000/zero_to_fp32.py +483 -0
  10. checkpoint-10000/latest +1 -0
  11. checkpoint-10000/pytorch_model/mp_rank_00_model_states.pt +3 -0
  12. checkpoint-10000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  13. checkpoint-10000/random_states_0.pkl +3 -0
  14. checkpoint-10000/scheduler.bin +3 -0
  15. checkpoint-10000/zero_to_fp32.py +483 -0
  16. checkpoint-10500/latest +1 -0
  17. checkpoint-10500/pytorch_model/mp_rank_00_model_states.pt +3 -0
  18. checkpoint-10500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  19. checkpoint-10500/random_states_0.pkl +3 -0
  20. checkpoint-10500/scheduler.bin +3 -0
  21. checkpoint-10500/zero_to_fp32.py +483 -0
  22. checkpoint-11000/latest +1 -0
  23. checkpoint-11000/pytorch_model/mp_rank_00_model_states.pt +3 -0
  24. checkpoint-11000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  25. checkpoint-11000/random_states_0.pkl +3 -0
  26. checkpoint-11000/scheduler.bin +3 -0
  27. checkpoint-11000/zero_to_fp32.py +483 -0
  28. checkpoint-11500/latest +1 -0
  29. checkpoint-11500/pytorch_model/mp_rank_00_model_states.pt +3 -0
  30. checkpoint-11500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  31. checkpoint-11500/random_states_0.pkl +3 -0
  32. checkpoint-11500/scheduler.bin +3 -0
  33. checkpoint-11500/zero_to_fp32.py +483 -0
  34. checkpoint-12000/latest +1 -0
  35. checkpoint-12000/pytorch_model/mp_rank_00_model_states.pt +3 -0
  36. checkpoint-12000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  37. checkpoint-12000/random_states_0.pkl +3 -0
  38. checkpoint-12000/scheduler.bin +3 -0
  39. checkpoint-12000/zero_to_fp32.py +483 -0
  40. checkpoint-12500/latest +1 -0
  41. checkpoint-12500/pytorch_model/mp_rank_00_model_states.pt +3 -0
  42. checkpoint-12500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  43. checkpoint-12500/random_states_0.pkl +3 -0
  44. checkpoint-12500/scheduler.bin +3 -0
  45. checkpoint-12500/zero_to_fp32.py +483 -0
  46. checkpoint-13000/latest +1 -0
  47. checkpoint-13000/pytorch_model/mp_rank_00_model_states.pt +3 -0
  48. checkpoint-13000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  49. checkpoint-13000/random_states_0.pkl +3 -0
  50. checkpoint-13000/scheduler.bin +3 -0
.gitattributes CHANGED
@@ -32,3 +32,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ image_0.png filter=lfs diff=lfs merge=lfs -text
36
+ image_1.png filter=lfs diff=lfs merge=lfs -text
37
+ image_2.png filter=lfs diff=lfs merge=lfs -text
38
+ image_3.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ step_*
2
+ epoch_*
README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ license: creativeml-openrail-m
4
+ base_model: runwayml/stable-diffusion-v1-5
5
+ tags:
6
+ - stable-diffusion
7
+ - stable-diffusion-diffusers
8
+ - text-to-image
9
+ - diffusers
10
+ - lora
11
+ inference: true
12
+ ---
13
+
14
+ # LoRA text2image fine-tuning - https://huggingface.co/puccife/pokemon-lora
15
+ These are LoRA adaption weights for runwayml/stable-diffusion-v1-5. The weights were fine-tuned on the lambdalabs/pokemon-blip-captions dataset. You can find some example images in the following.
16
+
17
+ ![img_0](./image_0.png)
18
+ ![img_1](./image_1.png)
19
+ ![img_2](./image_2.png)
20
+ ![img_3](./image_3.png)
21
+
checkpoint-1000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-1000/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ea8554d9a5721b9e4c5d7725baca320fa1cdf3e4b387b2decb11510103d8bdc
3
+ size 1658715
checkpoint-1000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:073f0ce839e8f1fa58b3503883ad29b3df483231d06a45787543c8615d7a2bc2
3
+ size 9587033
checkpoint-1000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:486b3958ddef4c2ab6047a13e016ec3281cb9bac573f5a581c066308a3dc6325
3
+ size 15755
checkpoint-1000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6fc56ed5838dfc2c048c4167b1c493d0ef89be43cb4db1f937708dc3f5f82d4
3
+ size 563
checkpoint-1000/zero_to_fp32.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ '''Copyright The Microsoft DeepSpeed Team'''
3
+
4
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
5
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
6
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
7
+ # application.
8
+ #
9
+ # example: python zero_to_fp32.py . pytorch_model.bin
10
+
11
+ import argparse
12
+ import torch
13
+ import glob
14
+ import math
15
+ import os
16
+ import re
17
+ from collections import OrderedDict
18
+
19
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
20
+ # DeepSpeed data structures it has to be available in the current python environment.
21
+ from deepspeed.utils import logger
22
+ from deepspeed.checkpoint.constants import (DS_VERSION,
23
+ OPTIMIZER_STATE_DICT,
24
+ SINGLE_PARTITION_OF_FP32_GROUPS,
25
+ FP32_FLAT_GROUPS,
26
+ ZERO_STAGE,
27
+ PARTITION_COUNT,
28
+ PARAM_SHAPES,
29
+ BUFFER_NAMES)
30
+
31
+ debug = 0
32
+
33
+ # load to cpu
34
+ device = torch.device('cpu')
35
+
36
+
37
+ def atoi(text):
38
+ return int(text) if text.isdigit() else text
39
+
40
+
41
+ def natural_keys(text):
42
+ '''
43
+ alist.sort(key=natural_keys) sorts in human order
44
+ http://nedbatchelder.com/blog/200712/human_sorting.html
45
+ (See Toothy's implementation in the comments)
46
+ '''
47
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
48
+
49
+
50
+ def get_model_state_file(checkpoint_dir, zero_stage):
51
+ if not os.path.isdir(checkpoint_dir):
52
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
53
+
54
+ # there should be only one file
55
+ if zero_stage == 2:
56
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
57
+ elif zero_stage == 3:
58
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
59
+
60
+ if not os.path.exists(file):
61
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
62
+
63
+ return file
64
+
65
+
66
+ def get_optim_files(checkpoint_dir):
67
+ # XXX: need to test that this simple glob rule works for multi-node setup too
68
+ optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
69
+ "*_optim_states.pt")),
70
+ key=natural_keys)
71
+
72
+ if len(optim_files) == 0:
73
+ raise FileNotFoundError(
74
+ f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
75
+
76
+ return optim_files
77
+
78
+
79
+ def parse_model_state(file):
80
+ state_dict = torch.load(file, map_location=device)
81
+
82
+ if BUFFER_NAMES not in state_dict:
83
+ raise ValueError(f"{file} is not a model state checkpoint")
84
+ buffer_names = state_dict[BUFFER_NAMES]
85
+ if debug:
86
+ print("Found buffers:", buffer_names)
87
+
88
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
89
+ buffers = {
90
+ k: v.float()
91
+ for k,
92
+ v in state_dict["module"].items() if k in buffer_names
93
+ }
94
+ param_shapes = state_dict[PARAM_SHAPES]
95
+
96
+ ds_version = state_dict.get(DS_VERSION, None)
97
+
98
+ return buffers, param_shapes, ds_version
99
+
100
+
101
+ def parse_optim_states(files, ds_checkpoint_dir):
102
+
103
+ total_files = len(files)
104
+ state_dicts = []
105
+ for f in files:
106
+ state_dicts.append(torch.load(f, map_location=device))
107
+
108
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
109
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
110
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
111
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
112
+
113
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
114
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
115
+ # use the max of the partition_count to get the dp world_size.
116
+
117
+ if type(world_size) is list:
118
+ world_size = max(world_size)
119
+
120
+ if world_size != total_files:
121
+ raise ValueError(
122
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
123
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
124
+ )
125
+
126
+ # the groups are named differently in each stage
127
+ if zero_stage == 2:
128
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
129
+ elif zero_stage == 3:
130
+ fp32_groups_key = FP32_FLAT_GROUPS
131
+ else:
132
+ raise ValueError(f"unknown zero stage {zero_stage}")
133
+
134
+ if zero_stage == 2:
135
+ fp32_flat_groups = [
136
+ state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
137
+ for i in range(len(state_dicts))
138
+ ]
139
+ elif zero_stage == 3:
140
+ # if there is more than one param group, there will be multiple flattened tensors - one
141
+ # flattened tensor per group - for simplicity merge them into a single tensor
142
+ #
143
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
144
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
145
+
146
+ fp32_flat_groups = [
147
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
148
+ 0) for i in range(len(state_dicts))
149
+ ]
150
+
151
+ return zero_stage, world_size, fp32_flat_groups
152
+
153
+
154
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
155
+ """
156
+ Returns fp32 state_dict reconstructed from ds checkpoint
157
+
158
+ Args:
159
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
160
+
161
+ """
162
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
163
+
164
+ optim_files = get_optim_files(ds_checkpoint_dir)
165
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
166
+ print(
167
+ f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
168
+
169
+ model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
170
+ buffers, param_shapes, ds_version = parse_model_state(model_file)
171
+ print(f'Parsing checkpoint created by deepspeed=={ds_version}')
172
+
173
+ if zero_stage == 2:
174
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
175
+ param_shapes,
176
+ fp32_flat_groups,
177
+ buffers)
178
+ elif zero_stage == 3:
179
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
180
+ param_shapes,
181
+ fp32_flat_groups,
182
+ buffers)
183
+
184
+
185
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
186
+ param_shapes,
187
+ fp32_flat_groups,
188
+ buffers):
189
+
190
+ # Reconstruction protocol:
191
+ #
192
+ # XXX: document this
193
+
194
+ if debug:
195
+ for i in range(world_size):
196
+ for j in range(len(fp32_flat_groups[0])):
197
+ print(
198
+ f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
199
+
200
+ # XXX: memory usage doubles here (zero2)
201
+ num_param_groups = len(fp32_flat_groups[0])
202
+ merged_single_partition_of_fp32_groups = []
203
+ for i in range(num_param_groups):
204
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
205
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
206
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
207
+ avail_numel = sum([
208
+ full_single_fp32_vector.numel()
209
+ for full_single_fp32_vector in merged_single_partition_of_fp32_groups
210
+ ])
211
+
212
+ if debug:
213
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
214
+ wanted_numel = sum(
215
+ [sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
216
+ # not asserting if there is a mismatch due to possible padding
217
+ print(f"Have {avail_numel} numels to process.")
218
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
219
+
220
+ state_dict = OrderedDict()
221
+
222
+ # buffers
223
+ state_dict.update(buffers)
224
+ if debug:
225
+ print(f"added {len(buffers)} buffers")
226
+
227
+ # params
228
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
229
+ # out-of-core computing solution
230
+ total_numel = 0
231
+ total_params = 0
232
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
233
+ offset = 0
234
+ avail_numel = full_single_fp32_vector.numel()
235
+ for name, shape in shapes.items():
236
+
237
+ unpartitioned_numel = shape.numel()
238
+ total_numel += unpartitioned_numel
239
+ total_params += 1
240
+
241
+ if debug:
242
+ print(
243
+ f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
244
+ )
245
+ state_dict[name] = full_single_fp32_vector.narrow(
246
+ 0,
247
+ offset,
248
+ unpartitioned_numel).view(shape)
249
+ offset += unpartitioned_numel
250
+
251
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
252
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
253
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
254
+ # live optimizer object, so we are checking that the numbers are within the right range
255
+ align_to = 2 * world_size
256
+
257
+ def zero2_align(x):
258
+ return align_to * math.ceil(x / align_to)
259
+
260
+ if debug:
261
+ print(f"original offset={offset}, avail_numel={avail_numel}")
262
+
263
+ offset = zero2_align(offset)
264
+ avail_numel = zero2_align(avail_numel)
265
+
266
+ if debug:
267
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
268
+
269
+ # Sanity check
270
+ if offset != avail_numel:
271
+ raise ValueError(
272
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
273
+
274
+ print(
275
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
276
+ )
277
+
278
+ return state_dict
279
+
280
+
281
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
282
+ remainder = unpartitioned_numel % world_size
283
+ padding_numel = (world_size - remainder) if remainder else 0
284
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
285
+ return partitioned_numel, padding_numel
286
+
287
+
288
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
289
+ param_shapes,
290
+ fp32_flat_groups,
291
+ buffers):
292
+
293
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
294
+ # param, re-consolidating each param, while dealing with padding if any
295
+
296
+ avail_numel = fp32_flat_groups[0].numel() * world_size
297
+ # merge list of dicts, preserving order
298
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
299
+
300
+ if debug:
301
+ for i in range(world_size):
302
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
303
+
304
+ wanted_params = len(param_shapes)
305
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
306
+ # not asserting if there is a mismatch due to possible padding
307
+ print(f"Have {avail_numel} numels to process.")
308
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
309
+
310
+ state_dict = OrderedDict()
311
+
312
+ # buffers
313
+ state_dict.update(buffers)
314
+ if debug:
315
+ print(f"added {len(buffers)} buffers")
316
+
317
+ # params
318
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
319
+ # out-of-core computing solution
320
+ offset = 0
321
+ total_numel = 0
322
+ total_params = 0
323
+ for name, shape in param_shapes.items():
324
+
325
+ unpartitioned_numel = shape.numel()
326
+ total_numel += unpartitioned_numel
327
+ total_params += 1
328
+
329
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
330
+
331
+ if debug:
332
+ print(
333
+ f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
334
+ )
335
+
336
+ # XXX: memory usage doubles here
337
+ state_dict[name] = torch.cat(
338
+ tuple(fp32_flat_groups[i].narrow(0,
339
+ offset,
340
+ partitioned_numel)
341
+ for i in range(world_size)),
342
+ 0).narrow(0,
343
+ 0,
344
+ unpartitioned_numel).view(shape)
345
+ offset += partitioned_numel
346
+
347
+ offset *= world_size
348
+
349
+ # Sanity check
350
+ if offset != avail_numel:
351
+ raise ValueError(
352
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
353
+
354
+ print(
355
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
356
+ )
357
+
358
+ return state_dict
359
+
360
+
361
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
362
+ """
363
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
364
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
365
+ via a model hub.
366
+
367
+ Args:
368
+ - ``checkpoint_dir``: path to the desired checkpoint folder
369
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
370
+
371
+ Returns:
372
+ - pytorch ``state_dict``
373
+
374
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
375
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
376
+ the checkpoint.
377
+
378
+ A typical usage might be ::
379
+
380
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
381
+ # do the training and checkpoint saving
382
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
383
+ model = model.cpu() # move to cpu
384
+ model.load_state_dict(state_dict)
385
+ # submit to model hub or save the model to share with others
386
+
387
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
388
+ application. i.e. you will need to re-initialize the deepspeed engine, since
389
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
390
+
391
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
392
+
393
+ """
394
+ if tag is None:
395
+ latest_path = os.path.join(checkpoint_dir, 'latest')
396
+ if os.path.isfile(latest_path):
397
+ with open(latest_path, 'r') as fd:
398
+ tag = fd.read().strip()
399
+ else:
400
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
401
+
402
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
403
+
404
+ if not os.path.isdir(ds_checkpoint_dir):
405
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
406
+
407
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
408
+
409
+
410
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
411
+ """
412
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
413
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
414
+
415
+ Args:
416
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
417
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
418
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
419
+ """
420
+
421
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
422
+ print(f"Saving fp32 state dict to {output_file}")
423
+ torch.save(state_dict, output_file)
424
+
425
+
426
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
427
+ """
428
+ 1. Put the provided model to cpu
429
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
430
+ 3. Load it into the provided model
431
+
432
+ Args:
433
+ - ``model``: the model object to update
434
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
435
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
436
+
437
+ Returns:
438
+ - ``model`: modified model
439
+
440
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
441
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
442
+ conveniently placed for you in the checkpoint folder.
443
+
444
+ A typical usage might be ::
445
+
446
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
447
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
448
+ # submit to model hub or save the model to share with others
449
+
450
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
451
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
452
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
453
+
454
+ """
455
+ logger.info(f"Extracting fp32 weights")
456
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
457
+
458
+ logger.info(f"Overwriting model with fp32 weights")
459
+ model = model.cpu()
460
+ model.load_state_dict(state_dict, strict=False)
461
+
462
+ return model
463
+
464
+
465
+ if __name__ == "__main__":
466
+
467
+ parser = argparse.ArgumentParser()
468
+ parser.add_argument(
469
+ "checkpoint_dir",
470
+ type=str,
471
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
472
+ parser.add_argument(
473
+ "output_file",
474
+ type=str,
475
+ help=
476
+ "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
477
+ )
478
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
479
+ args = parser.parse_args()
480
+
481
+ debug = args.debug
482
+
483
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-10000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-10000/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5f10b166783c2641e5b09f9522000a7c3d7ada82373b4596ed4b675e1689b84
3
+ size 1658715
checkpoint-10000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0685e1d849ecd60892d4d041c2b804730d5e55b1a56ac2890d56478698572b72
3
+ size 9587033
checkpoint-10000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f28d81a443fff379339ae9953859ce543669621e9af72a6005daf8539b4da8f
3
+ size 15755
checkpoint-10000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7df8009b1d7a590cdad565d258616d6ce9236a93375c108feb6bee1096afbe2
3
+ size 563
checkpoint-10000/zero_to_fp32.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ '''Copyright The Microsoft DeepSpeed Team'''
3
+
4
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
5
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
6
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
7
+ # application.
8
+ #
9
+ # example: python zero_to_fp32.py . pytorch_model.bin
10
+
11
+ import argparse
12
+ import torch
13
+ import glob
14
+ import math
15
+ import os
16
+ import re
17
+ from collections import OrderedDict
18
+
19
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
20
+ # DeepSpeed data structures it has to be available in the current python environment.
21
+ from deepspeed.utils import logger
22
+ from deepspeed.checkpoint.constants import (DS_VERSION,
23
+ OPTIMIZER_STATE_DICT,
24
+ SINGLE_PARTITION_OF_FP32_GROUPS,
25
+ FP32_FLAT_GROUPS,
26
+ ZERO_STAGE,
27
+ PARTITION_COUNT,
28
+ PARAM_SHAPES,
29
+ BUFFER_NAMES)
30
+
31
+ debug = 0
32
+
33
+ # load to cpu
34
+ device = torch.device('cpu')
35
+
36
+
37
+ def atoi(text):
38
+ return int(text) if text.isdigit() else text
39
+
40
+
41
+ def natural_keys(text):
42
+ '''
43
+ alist.sort(key=natural_keys) sorts in human order
44
+ http://nedbatchelder.com/blog/200712/human_sorting.html
45
+ (See Toothy's implementation in the comments)
46
+ '''
47
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
48
+
49
+
50
+ def get_model_state_file(checkpoint_dir, zero_stage):
51
+ if not os.path.isdir(checkpoint_dir):
52
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
53
+
54
+ # there should be only one file
55
+ if zero_stage == 2:
56
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
57
+ elif zero_stage == 3:
58
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
59
+
60
+ if not os.path.exists(file):
61
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
62
+
63
+ return file
64
+
65
+
66
+ def get_optim_files(checkpoint_dir):
67
+ # XXX: need to test that this simple glob rule works for multi-node setup too
68
+ optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
69
+ "*_optim_states.pt")),
70
+ key=natural_keys)
71
+
72
+ if len(optim_files) == 0:
73
+ raise FileNotFoundError(
74
+ f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
75
+
76
+ return optim_files
77
+
78
+
79
+ def parse_model_state(file):
80
+ state_dict = torch.load(file, map_location=device)
81
+
82
+ if BUFFER_NAMES not in state_dict:
83
+ raise ValueError(f"{file} is not a model state checkpoint")
84
+ buffer_names = state_dict[BUFFER_NAMES]
85
+ if debug:
86
+ print("Found buffers:", buffer_names)
87
+
88
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
89
+ buffers = {
90
+ k: v.float()
91
+ for k,
92
+ v in state_dict["module"].items() if k in buffer_names
93
+ }
94
+ param_shapes = state_dict[PARAM_SHAPES]
95
+
96
+ ds_version = state_dict.get(DS_VERSION, None)
97
+
98
+ return buffers, param_shapes, ds_version
99
+
100
+
101
+ def parse_optim_states(files, ds_checkpoint_dir):
102
+
103
+ total_files = len(files)
104
+ state_dicts = []
105
+ for f in files:
106
+ state_dicts.append(torch.load(f, map_location=device))
107
+
108
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
109
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
110
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
111
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
112
+
113
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
114
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
115
+ # use the max of the partition_count to get the dp world_size.
116
+
117
+ if type(world_size) is list:
118
+ world_size = max(world_size)
119
+
120
+ if world_size != total_files:
121
+ raise ValueError(
122
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
123
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
124
+ )
125
+
126
+ # the groups are named differently in each stage
127
+ if zero_stage == 2:
128
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
129
+ elif zero_stage == 3:
130
+ fp32_groups_key = FP32_FLAT_GROUPS
131
+ else:
132
+ raise ValueError(f"unknown zero stage {zero_stage}")
133
+
134
+ if zero_stage == 2:
135
+ fp32_flat_groups = [
136
+ state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
137
+ for i in range(len(state_dicts))
138
+ ]
139
+ elif zero_stage == 3:
140
+ # if there is more than one param group, there will be multiple flattened tensors - one
141
+ # flattened tensor per group - for simplicity merge them into a single tensor
142
+ #
143
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
144
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
145
+
146
+ fp32_flat_groups = [
147
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
148
+ 0) for i in range(len(state_dicts))
149
+ ]
150
+
151
+ return zero_stage, world_size, fp32_flat_groups
152
+
153
+
154
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
155
+ """
156
+ Returns fp32 state_dict reconstructed from ds checkpoint
157
+
158
+ Args:
159
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
160
+
161
+ """
162
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
163
+
164
+ optim_files = get_optim_files(ds_checkpoint_dir)
165
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
166
+ print(
167
+ f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
168
+
169
+ model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
170
+ buffers, param_shapes, ds_version = parse_model_state(model_file)
171
+ print(f'Parsing checkpoint created by deepspeed=={ds_version}')
172
+
173
+ if zero_stage == 2:
174
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
175
+ param_shapes,
176
+ fp32_flat_groups,
177
+ buffers)
178
+ elif zero_stage == 3:
179
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
180
+ param_shapes,
181
+ fp32_flat_groups,
182
+ buffers)
183
+
184
+
185
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
186
+ param_shapes,
187
+ fp32_flat_groups,
188
+ buffers):
189
+
190
+ # Reconstruction protocol:
191
+ #
192
+ # XXX: document this
193
+
194
+ if debug:
195
+ for i in range(world_size):
196
+ for j in range(len(fp32_flat_groups[0])):
197
+ print(
198
+ f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
199
+
200
+ # XXX: memory usage doubles here (zero2)
201
+ num_param_groups = len(fp32_flat_groups[0])
202
+ merged_single_partition_of_fp32_groups = []
203
+ for i in range(num_param_groups):
204
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
205
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
206
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
207
+ avail_numel = sum([
208
+ full_single_fp32_vector.numel()
209
+ for full_single_fp32_vector in merged_single_partition_of_fp32_groups
210
+ ])
211
+
212
+ if debug:
213
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
214
+ wanted_numel = sum(
215
+ [sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
216
+ # not asserting if there is a mismatch due to possible padding
217
+ print(f"Have {avail_numel} numels to process.")
218
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
219
+
220
+ state_dict = OrderedDict()
221
+
222
+ # buffers
223
+ state_dict.update(buffers)
224
+ if debug:
225
+ print(f"added {len(buffers)} buffers")
226
+
227
+ # params
228
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
229
+ # out-of-core computing solution
230
+ total_numel = 0
231
+ total_params = 0
232
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
233
+ offset = 0
234
+ avail_numel = full_single_fp32_vector.numel()
235
+ for name, shape in shapes.items():
236
+
237
+ unpartitioned_numel = shape.numel()
238
+ total_numel += unpartitioned_numel
239
+ total_params += 1
240
+
241
+ if debug:
242
+ print(
243
+ f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
244
+ )
245
+ state_dict[name] = full_single_fp32_vector.narrow(
246
+ 0,
247
+ offset,
248
+ unpartitioned_numel).view(shape)
249
+ offset += unpartitioned_numel
250
+
251
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
252
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
253
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
254
+ # live optimizer object, so we are checking that the numbers are within the right range
255
+ align_to = 2 * world_size
256
+
257
+ def zero2_align(x):
258
+ return align_to * math.ceil(x / align_to)
259
+
260
+ if debug:
261
+ print(f"original offset={offset}, avail_numel={avail_numel}")
262
+
263
+ offset = zero2_align(offset)
264
+ avail_numel = zero2_align(avail_numel)
265
+
266
+ if debug:
267
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
268
+
269
+ # Sanity check
270
+ if offset != avail_numel:
271
+ raise ValueError(
272
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
273
+
274
+ print(
275
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
276
+ )
277
+
278
+ return state_dict
279
+
280
+
281
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
282
+ remainder = unpartitioned_numel % world_size
283
+ padding_numel = (world_size - remainder) if remainder else 0
284
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
285
+ return partitioned_numel, padding_numel
286
+
287
+
288
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
289
+ param_shapes,
290
+ fp32_flat_groups,
291
+ buffers):
292
+
293
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
294
+ # param, re-consolidating each param, while dealing with padding if any
295
+
296
+ avail_numel = fp32_flat_groups[0].numel() * world_size
297
+ # merge list of dicts, preserving order
298
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
299
+
300
+ if debug:
301
+ for i in range(world_size):
302
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
303
+
304
+ wanted_params = len(param_shapes)
305
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
306
+ # not asserting if there is a mismatch due to possible padding
307
+ print(f"Have {avail_numel} numels to process.")
308
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
309
+
310
+ state_dict = OrderedDict()
311
+
312
+ # buffers
313
+ state_dict.update(buffers)
314
+ if debug:
315
+ print(f"added {len(buffers)} buffers")
316
+
317
+ # params
318
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
319
+ # out-of-core computing solution
320
+ offset = 0
321
+ total_numel = 0
322
+ total_params = 0
323
+ for name, shape in param_shapes.items():
324
+
325
+ unpartitioned_numel = shape.numel()
326
+ total_numel += unpartitioned_numel
327
+ total_params += 1
328
+
329
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
330
+
331
+ if debug:
332
+ print(
333
+ f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
334
+ )
335
+
336
+ # XXX: memory usage doubles here
337
+ state_dict[name] = torch.cat(
338
+ tuple(fp32_flat_groups[i].narrow(0,
339
+ offset,
340
+ partitioned_numel)
341
+ for i in range(world_size)),
342
+ 0).narrow(0,
343
+ 0,
344
+ unpartitioned_numel).view(shape)
345
+ offset += partitioned_numel
346
+
347
+ offset *= world_size
348
+
349
+ # Sanity check
350
+ if offset != avail_numel:
351
+ raise ValueError(
352
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
353
+
354
+ print(
355
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
356
+ )
357
+
358
+ return state_dict
359
+
360
+
361
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
362
+ """
363
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
364
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
365
+ via a model hub.
366
+
367
+ Args:
368
+ - ``checkpoint_dir``: path to the desired checkpoint folder
369
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
370
+
371
+ Returns:
372
+ - pytorch ``state_dict``
373
+
374
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
375
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
376
+ the checkpoint.
377
+
378
+ A typical usage might be ::
379
+
380
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
381
+ # do the training and checkpoint saving
382
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
383
+ model = model.cpu() # move to cpu
384
+ model.load_state_dict(state_dict)
385
+ # submit to model hub or save the model to share with others
386
+
387
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
388
+ application. i.e. you will need to re-initialize the deepspeed engine, since
389
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
390
+
391
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
392
+
393
+ """
394
+ if tag is None:
395
+ latest_path = os.path.join(checkpoint_dir, 'latest')
396
+ if os.path.isfile(latest_path):
397
+ with open(latest_path, 'r') as fd:
398
+ tag = fd.read().strip()
399
+ else:
400
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
401
+
402
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
403
+
404
+ if not os.path.isdir(ds_checkpoint_dir):
405
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
406
+
407
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
408
+
409
+
410
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
411
+ """
412
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
413
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
414
+
415
+ Args:
416
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
417
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
418
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
419
+ """
420
+
421
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
422
+ print(f"Saving fp32 state dict to {output_file}")
423
+ torch.save(state_dict, output_file)
424
+
425
+
426
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
427
+ """
428
+ 1. Put the provided model to cpu
429
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
430
+ 3. Load it into the provided model
431
+
432
+ Args:
433
+ - ``model``: the model object to update
434
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
435
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
436
+
437
+ Returns:
438
+ - ``model`: modified model
439
+
440
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
441
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
442
+ conveniently placed for you in the checkpoint folder.
443
+
444
+ A typical usage might be ::
445
+
446
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
447
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
448
+ # submit to model hub or save the model to share with others
449
+
450
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
451
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
452
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
453
+
454
+ """
455
+ logger.info(f"Extracting fp32 weights")
456
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
457
+
458
+ logger.info(f"Overwriting model with fp32 weights")
459
+ model = model.cpu()
460
+ model.load_state_dict(state_dict, strict=False)
461
+
462
+ return model
463
+
464
+
465
+ if __name__ == "__main__":
466
+
467
+ parser = argparse.ArgumentParser()
468
+ parser.add_argument(
469
+ "checkpoint_dir",
470
+ type=str,
471
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
472
+ parser.add_argument(
473
+ "output_file",
474
+ type=str,
475
+ help=
476
+ "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
477
+ )
478
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
479
+ args = parser.parse_args()
480
+
481
+ debug = args.debug
482
+
483
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-10500/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-10500/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0de6e7f373d558adf41859c73beed5bf46a459b1836b7e47911c26eb1e0101a
3
+ size 1658715
checkpoint-10500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c49cedf239e4c67ae7f161fc5c4e380dfb5cab32b82acee1b5e1281f1d89217d
3
+ size 9587033
checkpoint-10500/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1a777e1ef5736e0f9ae909fe479469924ab00773e092b0a69dfd3392777e350
3
+ size 15755
checkpoint-10500/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7a5f01e08e0842669df5a6731ac935be1d37f50d5b761afdae51da6ed8196b3
3
+ size 563
checkpoint-10500/zero_to_fp32.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ '''Copyright The Microsoft DeepSpeed Team'''
3
+
4
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
5
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
6
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
7
+ # application.
8
+ #
9
+ # example: python zero_to_fp32.py . pytorch_model.bin
10
+
11
+ import argparse
12
+ import torch
13
+ import glob
14
+ import math
15
+ import os
16
+ import re
17
+ from collections import OrderedDict
18
+
19
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
20
+ # DeepSpeed data structures it has to be available in the current python environment.
21
+ from deepspeed.utils import logger
22
+ from deepspeed.checkpoint.constants import (DS_VERSION,
23
+ OPTIMIZER_STATE_DICT,
24
+ SINGLE_PARTITION_OF_FP32_GROUPS,
25
+ FP32_FLAT_GROUPS,
26
+ ZERO_STAGE,
27
+ PARTITION_COUNT,
28
+ PARAM_SHAPES,
29
+ BUFFER_NAMES)
30
+
31
+ debug = 0
32
+
33
+ # load to cpu
34
+ device = torch.device('cpu')
35
+
36
+
37
+ def atoi(text):
38
+ return int(text) if text.isdigit() else text
39
+
40
+
41
+ def natural_keys(text):
42
+ '''
43
+ alist.sort(key=natural_keys) sorts in human order
44
+ http://nedbatchelder.com/blog/200712/human_sorting.html
45
+ (See Toothy's implementation in the comments)
46
+ '''
47
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
48
+
49
+
50
+ def get_model_state_file(checkpoint_dir, zero_stage):
51
+ if not os.path.isdir(checkpoint_dir):
52
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
53
+
54
+ # there should be only one file
55
+ if zero_stage == 2:
56
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
57
+ elif zero_stage == 3:
58
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
59
+
60
+ if not os.path.exists(file):
61
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
62
+
63
+ return file
64
+
65
+
66
+ def get_optim_files(checkpoint_dir):
67
+ # XXX: need to test that this simple glob rule works for multi-node setup too
68
+ optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
69
+ "*_optim_states.pt")),
70
+ key=natural_keys)
71
+
72
+ if len(optim_files) == 0:
73
+ raise FileNotFoundError(
74
+ f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
75
+
76
+ return optim_files
77
+
78
+
79
+ def parse_model_state(file):
80
+ state_dict = torch.load(file, map_location=device)
81
+
82
+ if BUFFER_NAMES not in state_dict:
83
+ raise ValueError(f"{file} is not a model state checkpoint")
84
+ buffer_names = state_dict[BUFFER_NAMES]
85
+ if debug:
86
+ print("Found buffers:", buffer_names)
87
+
88
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
89
+ buffers = {
90
+ k: v.float()
91
+ for k,
92
+ v in state_dict["module"].items() if k in buffer_names
93
+ }
94
+ param_shapes = state_dict[PARAM_SHAPES]
95
+
96
+ ds_version = state_dict.get(DS_VERSION, None)
97
+
98
+ return buffers, param_shapes, ds_version
99
+
100
+
101
+ def parse_optim_states(files, ds_checkpoint_dir):
102
+
103
+ total_files = len(files)
104
+ state_dicts = []
105
+ for f in files:
106
+ state_dicts.append(torch.load(f, map_location=device))
107
+
108
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
109
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
110
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
111
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
112
+
113
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
114
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
115
+ # use the max of the partition_count to get the dp world_size.
116
+
117
+ if type(world_size) is list:
118
+ world_size = max(world_size)
119
+
120
+ if world_size != total_files:
121
+ raise ValueError(
122
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
123
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
124
+ )
125
+
126
+ # the groups are named differently in each stage
127
+ if zero_stage == 2:
128
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
129
+ elif zero_stage == 3:
130
+ fp32_groups_key = FP32_FLAT_GROUPS
131
+ else:
132
+ raise ValueError(f"unknown zero stage {zero_stage}")
133
+
134
+ if zero_stage == 2:
135
+ fp32_flat_groups = [
136
+ state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
137
+ for i in range(len(state_dicts))
138
+ ]
139
+ elif zero_stage == 3:
140
+ # if there is more than one param group, there will be multiple flattened tensors - one
141
+ # flattened tensor per group - for simplicity merge them into a single tensor
142
+ #
143
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
144
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
145
+
146
+ fp32_flat_groups = [
147
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
148
+ 0) for i in range(len(state_dicts))
149
+ ]
150
+
151
+ return zero_stage, world_size, fp32_flat_groups
152
+
153
+
154
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
155
+ """
156
+ Returns fp32 state_dict reconstructed from ds checkpoint
157
+
158
+ Args:
159
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
160
+
161
+ """
162
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
163
+
164
+ optim_files = get_optim_files(ds_checkpoint_dir)
165
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
166
+ print(
167
+ f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
168
+
169
+ model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
170
+ buffers, param_shapes, ds_version = parse_model_state(model_file)
171
+ print(f'Parsing checkpoint created by deepspeed=={ds_version}')
172
+
173
+ if zero_stage == 2:
174
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
175
+ param_shapes,
176
+ fp32_flat_groups,
177
+ buffers)
178
+ elif zero_stage == 3:
179
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
180
+ param_shapes,
181
+ fp32_flat_groups,
182
+ buffers)
183
+
184
+
185
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
186
+ param_shapes,
187
+ fp32_flat_groups,
188
+ buffers):
189
+
190
+ # Reconstruction protocol:
191
+ #
192
+ # XXX: document this
193
+
194
+ if debug:
195
+ for i in range(world_size):
196
+ for j in range(len(fp32_flat_groups[0])):
197
+ print(
198
+ f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
199
+
200
+ # XXX: memory usage doubles here (zero2)
201
+ num_param_groups = len(fp32_flat_groups[0])
202
+ merged_single_partition_of_fp32_groups = []
203
+ for i in range(num_param_groups):
204
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
205
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
206
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
207
+ avail_numel = sum([
208
+ full_single_fp32_vector.numel()
209
+ for full_single_fp32_vector in merged_single_partition_of_fp32_groups
210
+ ])
211
+
212
+ if debug:
213
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
214
+ wanted_numel = sum(
215
+ [sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
216
+ # not asserting if there is a mismatch due to possible padding
217
+ print(f"Have {avail_numel} numels to process.")
218
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
219
+
220
+ state_dict = OrderedDict()
221
+
222
+ # buffers
223
+ state_dict.update(buffers)
224
+ if debug:
225
+ print(f"added {len(buffers)} buffers")
226
+
227
+ # params
228
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
229
+ # out-of-core computing solution
230
+ total_numel = 0
231
+ total_params = 0
232
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
233
+ offset = 0
234
+ avail_numel = full_single_fp32_vector.numel()
235
+ for name, shape in shapes.items():
236
+
237
+ unpartitioned_numel = shape.numel()
238
+ total_numel += unpartitioned_numel
239
+ total_params += 1
240
+
241
+ if debug:
242
+ print(
243
+ f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
244
+ )
245
+ state_dict[name] = full_single_fp32_vector.narrow(
246
+ 0,
247
+ offset,
248
+ unpartitioned_numel).view(shape)
249
+ offset += unpartitioned_numel
250
+
251
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
252
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
253
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
254
+ # live optimizer object, so we are checking that the numbers are within the right range
255
+ align_to = 2 * world_size
256
+
257
+ def zero2_align(x):
258
+ return align_to * math.ceil(x / align_to)
259
+
260
+ if debug:
261
+ print(f"original offset={offset}, avail_numel={avail_numel}")
262
+
263
+ offset = zero2_align(offset)
264
+ avail_numel = zero2_align(avail_numel)
265
+
266
+ if debug:
267
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
268
+
269
+ # Sanity check
270
+ if offset != avail_numel:
271
+ raise ValueError(
272
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
273
+
274
+ print(
275
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
276
+ )
277
+
278
+ return state_dict
279
+
280
+
281
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
282
+ remainder = unpartitioned_numel % world_size
283
+ padding_numel = (world_size - remainder) if remainder else 0
284
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
285
+ return partitioned_numel, padding_numel
286
+
287
+
288
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
289
+ param_shapes,
290
+ fp32_flat_groups,
291
+ buffers):
292
+
293
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
294
+ # param, re-consolidating each param, while dealing with padding if any
295
+
296
+ avail_numel = fp32_flat_groups[0].numel() * world_size
297
+ # merge list of dicts, preserving order
298
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
299
+
300
+ if debug:
301
+ for i in range(world_size):
302
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
303
+
304
+ wanted_params = len(param_shapes)
305
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
306
+ # not asserting if there is a mismatch due to possible padding
307
+ print(f"Have {avail_numel} numels to process.")
308
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
309
+
310
+ state_dict = OrderedDict()
311
+
312
+ # buffers
313
+ state_dict.update(buffers)
314
+ if debug:
315
+ print(f"added {len(buffers)} buffers")
316
+
317
+ # params
318
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
319
+ # out-of-core computing solution
320
+ offset = 0
321
+ total_numel = 0
322
+ total_params = 0
323
+ for name, shape in param_shapes.items():
324
+
325
+ unpartitioned_numel = shape.numel()
326
+ total_numel += unpartitioned_numel
327
+ total_params += 1
328
+
329
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
330
+
331
+ if debug:
332
+ print(
333
+ f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
334
+ )
335
+
336
+ # XXX: memory usage doubles here
337
+ state_dict[name] = torch.cat(
338
+ tuple(fp32_flat_groups[i].narrow(0,
339
+ offset,
340
+ partitioned_numel)
341
+ for i in range(world_size)),
342
+ 0).narrow(0,
343
+ 0,
344
+ unpartitioned_numel).view(shape)
345
+ offset += partitioned_numel
346
+
347
+ offset *= world_size
348
+
349
+ # Sanity check
350
+ if offset != avail_numel:
351
+ raise ValueError(
352
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
353
+
354
+ print(
355
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
356
+ )
357
+
358
+ return state_dict
359
+
360
+
361
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
362
+ """
363
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
364
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
365
+ via a model hub.
366
+
367
+ Args:
368
+ - ``checkpoint_dir``: path to the desired checkpoint folder
369
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
370
+
371
+ Returns:
372
+ - pytorch ``state_dict``
373
+
374
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
375
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
376
+ the checkpoint.
377
+
378
+ A typical usage might be ::
379
+
380
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
381
+ # do the training and checkpoint saving
382
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
383
+ model = model.cpu() # move to cpu
384
+ model.load_state_dict(state_dict)
385
+ # submit to model hub or save the model to share with others
386
+
387
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
388
+ application. i.e. you will need to re-initialize the deepspeed engine, since
389
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
390
+
391
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
392
+
393
+ """
394
+ if tag is None:
395
+ latest_path = os.path.join(checkpoint_dir, 'latest')
396
+ if os.path.isfile(latest_path):
397
+ with open(latest_path, 'r') as fd:
398
+ tag = fd.read().strip()
399
+ else:
400
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
401
+
402
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
403
+
404
+ if not os.path.isdir(ds_checkpoint_dir):
405
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
406
+
407
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
408
+
409
+
410
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
411
+ """
412
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
413
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
414
+
415
+ Args:
416
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
417
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
418
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
419
+ """
420
+
421
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
422
+ print(f"Saving fp32 state dict to {output_file}")
423
+ torch.save(state_dict, output_file)
424
+
425
+
426
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
427
+ """
428
+ 1. Put the provided model to cpu
429
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
430
+ 3. Load it into the provided model
431
+
432
+ Args:
433
+ - ``model``: the model object to update
434
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
435
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
436
+
437
+ Returns:
438
+ - ``model`: modified model
439
+
440
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
441
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
442
+ conveniently placed for you in the checkpoint folder.
443
+
444
+ A typical usage might be ::
445
+
446
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
447
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
448
+ # submit to model hub or save the model to share with others
449
+
450
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
451
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
452
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
453
+
454
+ """
455
+ logger.info(f"Extracting fp32 weights")
456
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
457
+
458
+ logger.info(f"Overwriting model with fp32 weights")
459
+ model = model.cpu()
460
+ model.load_state_dict(state_dict, strict=False)
461
+
462
+ return model
463
+
464
+
465
+ if __name__ == "__main__":
466
+
467
+ parser = argparse.ArgumentParser()
468
+ parser.add_argument(
469
+ "checkpoint_dir",
470
+ type=str,
471
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
472
+ parser.add_argument(
473
+ "output_file",
474
+ type=str,
475
+ help=
476
+ "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
477
+ )
478
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
479
+ args = parser.parse_args()
480
+
481
+ debug = args.debug
482
+
483
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-11000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-11000/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df6204753389087e67a43bc4ea649f8bc1c30ae200717abae5eda4a7d9701eee
3
+ size 1658715
checkpoint-11000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d6ce28a128ab3ac09da5a05984903a0e42b44c15843dd7296169e4a8538da91
3
+ size 9587033
checkpoint-11000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:647faa48ce1f3264acbc2a922f34464074ab2bf30d205e5e5276efba2d8bcc19
3
+ size 15755
checkpoint-11000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ebacb4ed8a15ff7b68ec3810624b871157aefd7fe207d8f34e23076140a7db0
3
+ size 563
checkpoint-11000/zero_to_fp32.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ '''Copyright The Microsoft DeepSpeed Team'''
3
+
4
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
5
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
6
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
7
+ # application.
8
+ #
9
+ # example: python zero_to_fp32.py . pytorch_model.bin
10
+
11
+ import argparse
12
+ import torch
13
+ import glob
14
+ import math
15
+ import os
16
+ import re
17
+ from collections import OrderedDict
18
+
19
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
20
+ # DeepSpeed data structures it has to be available in the current python environment.
21
+ from deepspeed.utils import logger
22
+ from deepspeed.checkpoint.constants import (DS_VERSION,
23
+ OPTIMIZER_STATE_DICT,
24
+ SINGLE_PARTITION_OF_FP32_GROUPS,
25
+ FP32_FLAT_GROUPS,
26
+ ZERO_STAGE,
27
+ PARTITION_COUNT,
28
+ PARAM_SHAPES,
29
+ BUFFER_NAMES)
30
+
31
+ debug = 0
32
+
33
+ # load to cpu
34
+ device = torch.device('cpu')
35
+
36
+
37
+ def atoi(text):
38
+ return int(text) if text.isdigit() else text
39
+
40
+
41
+ def natural_keys(text):
42
+ '''
43
+ alist.sort(key=natural_keys) sorts in human order
44
+ http://nedbatchelder.com/blog/200712/human_sorting.html
45
+ (See Toothy's implementation in the comments)
46
+ '''
47
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
48
+
49
+
50
+ def get_model_state_file(checkpoint_dir, zero_stage):
51
+ if not os.path.isdir(checkpoint_dir):
52
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
53
+
54
+ # there should be only one file
55
+ if zero_stage == 2:
56
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
57
+ elif zero_stage == 3:
58
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
59
+
60
+ if not os.path.exists(file):
61
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
62
+
63
+ return file
64
+
65
+
66
+ def get_optim_files(checkpoint_dir):
67
+ # XXX: need to test that this simple glob rule works for multi-node setup too
68
+ optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
69
+ "*_optim_states.pt")),
70
+ key=natural_keys)
71
+
72
+ if len(optim_files) == 0:
73
+ raise FileNotFoundError(
74
+ f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
75
+
76
+ return optim_files
77
+
78
+
79
+ def parse_model_state(file):
80
+ state_dict = torch.load(file, map_location=device)
81
+
82
+ if BUFFER_NAMES not in state_dict:
83
+ raise ValueError(f"{file} is not a model state checkpoint")
84
+ buffer_names = state_dict[BUFFER_NAMES]
85
+ if debug:
86
+ print("Found buffers:", buffer_names)
87
+
88
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
89
+ buffers = {
90
+ k: v.float()
91
+ for k,
92
+ v in state_dict["module"].items() if k in buffer_names
93
+ }
94
+ param_shapes = state_dict[PARAM_SHAPES]
95
+
96
+ ds_version = state_dict.get(DS_VERSION, None)
97
+
98
+ return buffers, param_shapes, ds_version
99
+
100
+
101
+ def parse_optim_states(files, ds_checkpoint_dir):
102
+
103
+ total_files = len(files)
104
+ state_dicts = []
105
+ for f in files:
106
+ state_dicts.append(torch.load(f, map_location=device))
107
+
108
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
109
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
110
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
111
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
112
+
113
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
114
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
115
+ # use the max of the partition_count to get the dp world_size.
116
+
117
+ if type(world_size) is list:
118
+ world_size = max(world_size)
119
+
120
+ if world_size != total_files:
121
+ raise ValueError(
122
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
123
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
124
+ )
125
+
126
+ # the groups are named differently in each stage
127
+ if zero_stage == 2:
128
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
129
+ elif zero_stage == 3:
130
+ fp32_groups_key = FP32_FLAT_GROUPS
131
+ else:
132
+ raise ValueError(f"unknown zero stage {zero_stage}")
133
+
134
+ if zero_stage == 2:
135
+ fp32_flat_groups = [
136
+ state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
137
+ for i in range(len(state_dicts))
138
+ ]
139
+ elif zero_stage == 3:
140
+ # if there is more than one param group, there will be multiple flattened tensors - one
141
+ # flattened tensor per group - for simplicity merge them into a single tensor
142
+ #
143
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
144
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
145
+
146
+ fp32_flat_groups = [
147
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
148
+ 0) for i in range(len(state_dicts))
149
+ ]
150
+
151
+ return zero_stage, world_size, fp32_flat_groups
152
+
153
+
154
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
155
+ """
156
+ Returns fp32 state_dict reconstructed from ds checkpoint
157
+
158
+ Args:
159
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
160
+
161
+ """
162
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
163
+
164
+ optim_files = get_optim_files(ds_checkpoint_dir)
165
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
166
+ print(
167
+ f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
168
+
169
+ model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
170
+ buffers, param_shapes, ds_version = parse_model_state(model_file)
171
+ print(f'Parsing checkpoint created by deepspeed=={ds_version}')
172
+
173
+ if zero_stage == 2:
174
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
175
+ param_shapes,
176
+ fp32_flat_groups,
177
+ buffers)
178
+ elif zero_stage == 3:
179
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
180
+ param_shapes,
181
+ fp32_flat_groups,
182
+ buffers)
183
+
184
+
185
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
186
+ param_shapes,
187
+ fp32_flat_groups,
188
+ buffers):
189
+
190
+ # Reconstruction protocol:
191
+ #
192
+ # XXX: document this
193
+
194
+ if debug:
195
+ for i in range(world_size):
196
+ for j in range(len(fp32_flat_groups[0])):
197
+ print(
198
+ f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
199
+
200
+ # XXX: memory usage doubles here (zero2)
201
+ num_param_groups = len(fp32_flat_groups[0])
202
+ merged_single_partition_of_fp32_groups = []
203
+ for i in range(num_param_groups):
204
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
205
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
206
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
207
+ avail_numel = sum([
208
+ full_single_fp32_vector.numel()
209
+ for full_single_fp32_vector in merged_single_partition_of_fp32_groups
210
+ ])
211
+
212
+ if debug:
213
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
214
+ wanted_numel = sum(
215
+ [sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
216
+ # not asserting if there is a mismatch due to possible padding
217
+ print(f"Have {avail_numel} numels to process.")
218
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
219
+
220
+ state_dict = OrderedDict()
221
+
222
+ # buffers
223
+ state_dict.update(buffers)
224
+ if debug:
225
+ print(f"added {len(buffers)} buffers")
226
+
227
+ # params
228
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
229
+ # out-of-core computing solution
230
+ total_numel = 0
231
+ total_params = 0
232
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
233
+ offset = 0
234
+ avail_numel = full_single_fp32_vector.numel()
235
+ for name, shape in shapes.items():
236
+
237
+ unpartitioned_numel = shape.numel()
238
+ total_numel += unpartitioned_numel
239
+ total_params += 1
240
+
241
+ if debug:
242
+ print(
243
+ f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
244
+ )
245
+ state_dict[name] = full_single_fp32_vector.narrow(
246
+ 0,
247
+ offset,
248
+ unpartitioned_numel).view(shape)
249
+ offset += unpartitioned_numel
250
+
251
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
252
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
253
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
254
+ # live optimizer object, so we are checking that the numbers are within the right range
255
+ align_to = 2 * world_size
256
+
257
+ def zero2_align(x):
258
+ return align_to * math.ceil(x / align_to)
259
+
260
+ if debug:
261
+ print(f"original offset={offset}, avail_numel={avail_numel}")
262
+
263
+ offset = zero2_align(offset)
264
+ avail_numel = zero2_align(avail_numel)
265
+
266
+ if debug:
267
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
268
+
269
+ # Sanity check
270
+ if offset != avail_numel:
271
+ raise ValueError(
272
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
273
+
274
+ print(
275
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
276
+ )
277
+
278
+ return state_dict
279
+
280
+
281
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
282
+ remainder = unpartitioned_numel % world_size
283
+ padding_numel = (world_size - remainder) if remainder else 0
284
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
285
+ return partitioned_numel, padding_numel
286
+
287
+
288
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
289
+ param_shapes,
290
+ fp32_flat_groups,
291
+ buffers):
292
+
293
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
294
+ # param, re-consolidating each param, while dealing with padding if any
295
+
296
+ avail_numel = fp32_flat_groups[0].numel() * world_size
297
+ # merge list of dicts, preserving order
298
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
299
+
300
+ if debug:
301
+ for i in range(world_size):
302
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
303
+
304
+ wanted_params = len(param_shapes)
305
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
306
+ # not asserting if there is a mismatch due to possible padding
307
+ print(f"Have {avail_numel} numels to process.")
308
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
309
+
310
+ state_dict = OrderedDict()
311
+
312
+ # buffers
313
+ state_dict.update(buffers)
314
+ if debug:
315
+ print(f"added {len(buffers)} buffers")
316
+
317
+ # params
318
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
319
+ # out-of-core computing solution
320
+ offset = 0
321
+ total_numel = 0
322
+ total_params = 0
323
+ for name, shape in param_shapes.items():
324
+
325
+ unpartitioned_numel = shape.numel()
326
+ total_numel += unpartitioned_numel
327
+ total_params += 1
328
+
329
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
330
+
331
+ if debug:
332
+ print(
333
+ f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
334
+ )
335
+
336
+ # XXX: memory usage doubles here
337
+ state_dict[name] = torch.cat(
338
+ tuple(fp32_flat_groups[i].narrow(0,
339
+ offset,
340
+ partitioned_numel)
341
+ for i in range(world_size)),
342
+ 0).narrow(0,
343
+ 0,
344
+ unpartitioned_numel).view(shape)
345
+ offset += partitioned_numel
346
+
347
+ offset *= world_size
348
+
349
+ # Sanity check
350
+ if offset != avail_numel:
351
+ raise ValueError(
352
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
353
+
354
+ print(
355
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
356
+ )
357
+
358
+ return state_dict
359
+
360
+
361
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
362
+ """
363
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
364
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
365
+ via a model hub.
366
+
367
+ Args:
368
+ - ``checkpoint_dir``: path to the desired checkpoint folder
369
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
370
+
371
+ Returns:
372
+ - pytorch ``state_dict``
373
+
374
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
375
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
376
+ the checkpoint.
377
+
378
+ A typical usage might be ::
379
+
380
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
381
+ # do the training and checkpoint saving
382
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
383
+ model = model.cpu() # move to cpu
384
+ model.load_state_dict(state_dict)
385
+ # submit to model hub or save the model to share with others
386
+
387
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
388
+ application. i.e. you will need to re-initialize the deepspeed engine, since
389
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
390
+
391
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
392
+
393
+ """
394
+ if tag is None:
395
+ latest_path = os.path.join(checkpoint_dir, 'latest')
396
+ if os.path.isfile(latest_path):
397
+ with open(latest_path, 'r') as fd:
398
+ tag = fd.read().strip()
399
+ else:
400
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
401
+
402
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
403
+
404
+ if not os.path.isdir(ds_checkpoint_dir):
405
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
406
+
407
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
408
+
409
+
410
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
411
+ """
412
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
413
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
414
+
415
+ Args:
416
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
417
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
418
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
419
+ """
420
+
421
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
422
+ print(f"Saving fp32 state dict to {output_file}")
423
+ torch.save(state_dict, output_file)
424
+
425
+
426
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
427
+ """
428
+ 1. Put the provided model to cpu
429
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
430
+ 3. Load it into the provided model
431
+
432
+ Args:
433
+ - ``model``: the model object to update
434
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
435
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
436
+
437
+ Returns:
438
+ - ``model`: modified model
439
+
440
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
441
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
442
+ conveniently placed for you in the checkpoint folder.
443
+
444
+ A typical usage might be ::
445
+
446
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
447
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
448
+ # submit to model hub or save the model to share with others
449
+
450
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
451
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
452
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
453
+
454
+ """
455
+ logger.info(f"Extracting fp32 weights")
456
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
457
+
458
+ logger.info(f"Overwriting model with fp32 weights")
459
+ model = model.cpu()
460
+ model.load_state_dict(state_dict, strict=False)
461
+
462
+ return model
463
+
464
+
465
+ if __name__ == "__main__":
466
+
467
+ parser = argparse.ArgumentParser()
468
+ parser.add_argument(
469
+ "checkpoint_dir",
470
+ type=str,
471
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
472
+ parser.add_argument(
473
+ "output_file",
474
+ type=str,
475
+ help=
476
+ "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
477
+ )
478
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
479
+ args = parser.parse_args()
480
+
481
+ debug = args.debug
482
+
483
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-11500/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-11500/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e1d12c9a8be83666b79559d72a70b76e6a40d80cc20fd2a48f271328947e87d
3
+ size 1658715
checkpoint-11500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eb713d3a2b5dc7fc0eb6e14cb9dd26d34dabc71f4b6b6462ca8e7c3ef473c2f
3
+ size 9587033
checkpoint-11500/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54c8e5a8a7bc9cfd0d4d594c2e44964410a0f28ad38f9769c8f822dafd158250
3
+ size 15755
checkpoint-11500/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:572aa286bd2ae5f7669ee0b35bad5d72d1e67f8bb6694660a8f0846042807f34
3
+ size 563
checkpoint-11500/zero_to_fp32.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ '''Copyright The Microsoft DeepSpeed Team'''
3
+
4
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
5
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
6
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
7
+ # application.
8
+ #
9
+ # example: python zero_to_fp32.py . pytorch_model.bin
10
+
11
+ import argparse
12
+ import torch
13
+ import glob
14
+ import math
15
+ import os
16
+ import re
17
+ from collections import OrderedDict
18
+
19
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
20
+ # DeepSpeed data structures it has to be available in the current python environment.
21
+ from deepspeed.utils import logger
22
+ from deepspeed.checkpoint.constants import (DS_VERSION,
23
+ OPTIMIZER_STATE_DICT,
24
+ SINGLE_PARTITION_OF_FP32_GROUPS,
25
+ FP32_FLAT_GROUPS,
26
+ ZERO_STAGE,
27
+ PARTITION_COUNT,
28
+ PARAM_SHAPES,
29
+ BUFFER_NAMES)
30
+
31
+ debug = 0
32
+
33
+ # load to cpu
34
+ device = torch.device('cpu')
35
+
36
+
37
+ def atoi(text):
38
+ return int(text) if text.isdigit() else text
39
+
40
+
41
+ def natural_keys(text):
42
+ '''
43
+ alist.sort(key=natural_keys) sorts in human order
44
+ http://nedbatchelder.com/blog/200712/human_sorting.html
45
+ (See Toothy's implementation in the comments)
46
+ '''
47
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
48
+
49
+
50
+ def get_model_state_file(checkpoint_dir, zero_stage):
51
+ if not os.path.isdir(checkpoint_dir):
52
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
53
+
54
+ # there should be only one file
55
+ if zero_stage == 2:
56
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
57
+ elif zero_stage == 3:
58
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
59
+
60
+ if not os.path.exists(file):
61
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
62
+
63
+ return file
64
+
65
+
66
+ def get_optim_files(checkpoint_dir):
67
+ # XXX: need to test that this simple glob rule works for multi-node setup too
68
+ optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
69
+ "*_optim_states.pt")),
70
+ key=natural_keys)
71
+
72
+ if len(optim_files) == 0:
73
+ raise FileNotFoundError(
74
+ f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
75
+
76
+ return optim_files
77
+
78
+
79
+ def parse_model_state(file):
80
+ state_dict = torch.load(file, map_location=device)
81
+
82
+ if BUFFER_NAMES not in state_dict:
83
+ raise ValueError(f"{file} is not a model state checkpoint")
84
+ buffer_names = state_dict[BUFFER_NAMES]
85
+ if debug:
86
+ print("Found buffers:", buffer_names)
87
+
88
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
89
+ buffers = {
90
+ k: v.float()
91
+ for k,
92
+ v in state_dict["module"].items() if k in buffer_names
93
+ }
94
+ param_shapes = state_dict[PARAM_SHAPES]
95
+
96
+ ds_version = state_dict.get(DS_VERSION, None)
97
+
98
+ return buffers, param_shapes, ds_version
99
+
100
+
101
+ def parse_optim_states(files, ds_checkpoint_dir):
102
+
103
+ total_files = len(files)
104
+ state_dicts = []
105
+ for f in files:
106
+ state_dicts.append(torch.load(f, map_location=device))
107
+
108
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
109
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
110
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
111
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
112
+
113
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
114
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
115
+ # use the max of the partition_count to get the dp world_size.
116
+
117
+ if type(world_size) is list:
118
+ world_size = max(world_size)
119
+
120
+ if world_size != total_files:
121
+ raise ValueError(
122
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
123
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
124
+ )
125
+
126
+ # the groups are named differently in each stage
127
+ if zero_stage == 2:
128
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
129
+ elif zero_stage == 3:
130
+ fp32_groups_key = FP32_FLAT_GROUPS
131
+ else:
132
+ raise ValueError(f"unknown zero stage {zero_stage}")
133
+
134
+ if zero_stage == 2:
135
+ fp32_flat_groups = [
136
+ state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
137
+ for i in range(len(state_dicts))
138
+ ]
139
+ elif zero_stage == 3:
140
+ # if there is more than one param group, there will be multiple flattened tensors - one
141
+ # flattened tensor per group - for simplicity merge them into a single tensor
142
+ #
143
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
144
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
145
+
146
+ fp32_flat_groups = [
147
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
148
+ 0) for i in range(len(state_dicts))
149
+ ]
150
+
151
+ return zero_stage, world_size, fp32_flat_groups
152
+
153
+
154
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
155
+ """
156
+ Returns fp32 state_dict reconstructed from ds checkpoint
157
+
158
+ Args:
159
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
160
+
161
+ """
162
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
163
+
164
+ optim_files = get_optim_files(ds_checkpoint_dir)
165
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
166
+ print(
167
+ f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
168
+
169
+ model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
170
+ buffers, param_shapes, ds_version = parse_model_state(model_file)
171
+ print(f'Parsing checkpoint created by deepspeed=={ds_version}')
172
+
173
+ if zero_stage == 2:
174
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
175
+ param_shapes,
176
+ fp32_flat_groups,
177
+ buffers)
178
+ elif zero_stage == 3:
179
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
180
+ param_shapes,
181
+ fp32_flat_groups,
182
+ buffers)
183
+
184
+
185
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
186
+ param_shapes,
187
+ fp32_flat_groups,
188
+ buffers):
189
+
190
+ # Reconstruction protocol:
191
+ #
192
+ # XXX: document this
193
+
194
+ if debug:
195
+ for i in range(world_size):
196
+ for j in range(len(fp32_flat_groups[0])):
197
+ print(
198
+ f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
199
+
200
+ # XXX: memory usage doubles here (zero2)
201
+ num_param_groups = len(fp32_flat_groups[0])
202
+ merged_single_partition_of_fp32_groups = []
203
+ for i in range(num_param_groups):
204
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
205
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
206
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
207
+ avail_numel = sum([
208
+ full_single_fp32_vector.numel()
209
+ for full_single_fp32_vector in merged_single_partition_of_fp32_groups
210
+ ])
211
+
212
+ if debug:
213
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
214
+ wanted_numel = sum(
215
+ [sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
216
+ # not asserting if there is a mismatch due to possible padding
217
+ print(f"Have {avail_numel} numels to process.")
218
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
219
+
220
+ state_dict = OrderedDict()
221
+
222
+ # buffers
223
+ state_dict.update(buffers)
224
+ if debug:
225
+ print(f"added {len(buffers)} buffers")
226
+
227
+ # params
228
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
229
+ # out-of-core computing solution
230
+ total_numel = 0
231
+ total_params = 0
232
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
233
+ offset = 0
234
+ avail_numel = full_single_fp32_vector.numel()
235
+ for name, shape in shapes.items():
236
+
237
+ unpartitioned_numel = shape.numel()
238
+ total_numel += unpartitioned_numel
239
+ total_params += 1
240
+
241
+ if debug:
242
+ print(
243
+ f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
244
+ )
245
+ state_dict[name] = full_single_fp32_vector.narrow(
246
+ 0,
247
+ offset,
248
+ unpartitioned_numel).view(shape)
249
+ offset += unpartitioned_numel
250
+
251
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
252
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
253
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
254
+ # live optimizer object, so we are checking that the numbers are within the right range
255
+ align_to = 2 * world_size
256
+
257
+ def zero2_align(x):
258
+ return align_to * math.ceil(x / align_to)
259
+
260
+ if debug:
261
+ print(f"original offset={offset}, avail_numel={avail_numel}")
262
+
263
+ offset = zero2_align(offset)
264
+ avail_numel = zero2_align(avail_numel)
265
+
266
+ if debug:
267
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
268
+
269
+ # Sanity check
270
+ if offset != avail_numel:
271
+ raise ValueError(
272
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
273
+
274
+ print(
275
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
276
+ )
277
+
278
+ return state_dict
279
+
280
+
281
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
282
+ remainder = unpartitioned_numel % world_size
283
+ padding_numel = (world_size - remainder) if remainder else 0
284
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
285
+ return partitioned_numel, padding_numel
286
+
287
+
288
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
289
+ param_shapes,
290
+ fp32_flat_groups,
291
+ buffers):
292
+
293
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
294
+ # param, re-consolidating each param, while dealing with padding if any
295
+
296
+ avail_numel = fp32_flat_groups[0].numel() * world_size
297
+ # merge list of dicts, preserving order
298
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
299
+
300
+ if debug:
301
+ for i in range(world_size):
302
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
303
+
304
+ wanted_params = len(param_shapes)
305
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
306
+ # not asserting if there is a mismatch due to possible padding
307
+ print(f"Have {avail_numel} numels to process.")
308
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
309
+
310
+ state_dict = OrderedDict()
311
+
312
+ # buffers
313
+ state_dict.update(buffers)
314
+ if debug:
315
+ print(f"added {len(buffers)} buffers")
316
+
317
+ # params
318
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
319
+ # out-of-core computing solution
320
+ offset = 0
321
+ total_numel = 0
322
+ total_params = 0
323
+ for name, shape in param_shapes.items():
324
+
325
+ unpartitioned_numel = shape.numel()
326
+ total_numel += unpartitioned_numel
327
+ total_params += 1
328
+
329
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
330
+
331
+ if debug:
332
+ print(
333
+ f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
334
+ )
335
+
336
+ # XXX: memory usage doubles here
337
+ state_dict[name] = torch.cat(
338
+ tuple(fp32_flat_groups[i].narrow(0,
339
+ offset,
340
+ partitioned_numel)
341
+ for i in range(world_size)),
342
+ 0).narrow(0,
343
+ 0,
344
+ unpartitioned_numel).view(shape)
345
+ offset += partitioned_numel
346
+
347
+ offset *= world_size
348
+
349
+ # Sanity check
350
+ if offset != avail_numel:
351
+ raise ValueError(
352
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
353
+
354
+ print(
355
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
356
+ )
357
+
358
+ return state_dict
359
+
360
+
361
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
362
+ """
363
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
364
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
365
+ via a model hub.
366
+
367
+ Args:
368
+ - ``checkpoint_dir``: path to the desired checkpoint folder
369
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
370
+
371
+ Returns:
372
+ - pytorch ``state_dict``
373
+
374
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
375
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
376
+ the checkpoint.
377
+
378
+ A typical usage might be ::
379
+
380
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
381
+ # do the training and checkpoint saving
382
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
383
+ model = model.cpu() # move to cpu
384
+ model.load_state_dict(state_dict)
385
+ # submit to model hub or save the model to share with others
386
+
387
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
388
+ application. i.e. you will need to re-initialize the deepspeed engine, since
389
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
390
+
391
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
392
+
393
+ """
394
+ if tag is None:
395
+ latest_path = os.path.join(checkpoint_dir, 'latest')
396
+ if os.path.isfile(latest_path):
397
+ with open(latest_path, 'r') as fd:
398
+ tag = fd.read().strip()
399
+ else:
400
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
401
+
402
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
403
+
404
+ if not os.path.isdir(ds_checkpoint_dir):
405
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
406
+
407
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
408
+
409
+
410
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
411
+ """
412
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
413
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
414
+
415
+ Args:
416
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
417
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
418
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
419
+ """
420
+
421
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
422
+ print(f"Saving fp32 state dict to {output_file}")
423
+ torch.save(state_dict, output_file)
424
+
425
+
426
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
427
+ """
428
+ 1. Put the provided model to cpu
429
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
430
+ 3. Load it into the provided model
431
+
432
+ Args:
433
+ - ``model``: the model object to update
434
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
435
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
436
+
437
+ Returns:
438
+ - ``model`: modified model
439
+
440
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
441
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
442
+ conveniently placed for you in the checkpoint folder.
443
+
444
+ A typical usage might be ::
445
+
446
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
447
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
448
+ # submit to model hub or save the model to share with others
449
+
450
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
451
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
452
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
453
+
454
+ """
455
+ logger.info(f"Extracting fp32 weights")
456
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
457
+
458
+ logger.info(f"Overwriting model with fp32 weights")
459
+ model = model.cpu()
460
+ model.load_state_dict(state_dict, strict=False)
461
+
462
+ return model
463
+
464
+
465
+ if __name__ == "__main__":
466
+
467
+ parser = argparse.ArgumentParser()
468
+ parser.add_argument(
469
+ "checkpoint_dir",
470
+ type=str,
471
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
472
+ parser.add_argument(
473
+ "output_file",
474
+ type=str,
475
+ help=
476
+ "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
477
+ )
478
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
479
+ args = parser.parse_args()
480
+
481
+ debug = args.debug
482
+
483
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-12000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-12000/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:841bd99babe37443f766f4a49a6873e052db52612d595dcc6794550a78267fd1
3
+ size 1658715
checkpoint-12000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec6e43e63f47fcd341efc2453b13a0242ab8da0721af2792255dc35e970ad187
3
+ size 9587033
checkpoint-12000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aec726d1bf777e26cb70842c8b2d729b944efe361537f9e28908a05207c1c6a
3
+ size 15755
checkpoint-12000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb97c28306c268f1f9b8b7ddc17b35630a1d0b654045981dfa88592a2430aab5
3
+ size 563
checkpoint-12000/zero_to_fp32.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ '''Copyright The Microsoft DeepSpeed Team'''
3
+
4
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
5
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
6
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
7
+ # application.
8
+ #
9
+ # example: python zero_to_fp32.py . pytorch_model.bin
10
+
11
+ import argparse
12
+ import torch
13
+ import glob
14
+ import math
15
+ import os
16
+ import re
17
+ from collections import OrderedDict
18
+
19
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
20
+ # DeepSpeed data structures it has to be available in the current python environment.
21
+ from deepspeed.utils import logger
22
+ from deepspeed.checkpoint.constants import (DS_VERSION,
23
+ OPTIMIZER_STATE_DICT,
24
+ SINGLE_PARTITION_OF_FP32_GROUPS,
25
+ FP32_FLAT_GROUPS,
26
+ ZERO_STAGE,
27
+ PARTITION_COUNT,
28
+ PARAM_SHAPES,
29
+ BUFFER_NAMES)
30
+
31
+ debug = 0
32
+
33
+ # load to cpu
34
+ device = torch.device('cpu')
35
+
36
+
37
+ def atoi(text):
38
+ return int(text) if text.isdigit() else text
39
+
40
+
41
+ def natural_keys(text):
42
+ '''
43
+ alist.sort(key=natural_keys) sorts in human order
44
+ http://nedbatchelder.com/blog/200712/human_sorting.html
45
+ (See Toothy's implementation in the comments)
46
+ '''
47
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
48
+
49
+
50
+ def get_model_state_file(checkpoint_dir, zero_stage):
51
+ if not os.path.isdir(checkpoint_dir):
52
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
53
+
54
+ # there should be only one file
55
+ if zero_stage == 2:
56
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
57
+ elif zero_stage == 3:
58
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
59
+
60
+ if not os.path.exists(file):
61
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
62
+
63
+ return file
64
+
65
+
66
+ def get_optim_files(checkpoint_dir):
67
+ # XXX: need to test that this simple glob rule works for multi-node setup too
68
+ optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
69
+ "*_optim_states.pt")),
70
+ key=natural_keys)
71
+
72
+ if len(optim_files) == 0:
73
+ raise FileNotFoundError(
74
+ f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
75
+
76
+ return optim_files
77
+
78
+
79
+ def parse_model_state(file):
80
+ state_dict = torch.load(file, map_location=device)
81
+
82
+ if BUFFER_NAMES not in state_dict:
83
+ raise ValueError(f"{file} is not a model state checkpoint")
84
+ buffer_names = state_dict[BUFFER_NAMES]
85
+ if debug:
86
+ print("Found buffers:", buffer_names)
87
+
88
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
89
+ buffers = {
90
+ k: v.float()
91
+ for k,
92
+ v in state_dict["module"].items() if k in buffer_names
93
+ }
94
+ param_shapes = state_dict[PARAM_SHAPES]
95
+
96
+ ds_version = state_dict.get(DS_VERSION, None)
97
+
98
+ return buffers, param_shapes, ds_version
99
+
100
+
101
+ def parse_optim_states(files, ds_checkpoint_dir):
102
+
103
+ total_files = len(files)
104
+ state_dicts = []
105
+ for f in files:
106
+ state_dicts.append(torch.load(f, map_location=device))
107
+
108
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
109
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
110
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
111
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
112
+
113
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
114
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
115
+ # use the max of the partition_count to get the dp world_size.
116
+
117
+ if type(world_size) is list:
118
+ world_size = max(world_size)
119
+
120
+ if world_size != total_files:
121
+ raise ValueError(
122
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
123
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
124
+ )
125
+
126
+ # the groups are named differently in each stage
127
+ if zero_stage == 2:
128
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
129
+ elif zero_stage == 3:
130
+ fp32_groups_key = FP32_FLAT_GROUPS
131
+ else:
132
+ raise ValueError(f"unknown zero stage {zero_stage}")
133
+
134
+ if zero_stage == 2:
135
+ fp32_flat_groups = [
136
+ state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
137
+ for i in range(len(state_dicts))
138
+ ]
139
+ elif zero_stage == 3:
140
+ # if there is more than one param group, there will be multiple flattened tensors - one
141
+ # flattened tensor per group - for simplicity merge them into a single tensor
142
+ #
143
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
144
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
145
+
146
+ fp32_flat_groups = [
147
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
148
+ 0) for i in range(len(state_dicts))
149
+ ]
150
+
151
+ return zero_stage, world_size, fp32_flat_groups
152
+
153
+
154
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
155
+ """
156
+ Returns fp32 state_dict reconstructed from ds checkpoint
157
+
158
+ Args:
159
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
160
+
161
+ """
162
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
163
+
164
+ optim_files = get_optim_files(ds_checkpoint_dir)
165
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
166
+ print(
167
+ f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
168
+
169
+ model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
170
+ buffers, param_shapes, ds_version = parse_model_state(model_file)
171
+ print(f'Parsing checkpoint created by deepspeed=={ds_version}')
172
+
173
+ if zero_stage == 2:
174
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
175
+ param_shapes,
176
+ fp32_flat_groups,
177
+ buffers)
178
+ elif zero_stage == 3:
179
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
180
+ param_shapes,
181
+ fp32_flat_groups,
182
+ buffers)
183
+
184
+
185
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
186
+ param_shapes,
187
+ fp32_flat_groups,
188
+ buffers):
189
+
190
+ # Reconstruction protocol:
191
+ #
192
+ # XXX: document this
193
+
194
+ if debug:
195
+ for i in range(world_size):
196
+ for j in range(len(fp32_flat_groups[0])):
197
+ print(
198
+ f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
199
+
200
+ # XXX: memory usage doubles here (zero2)
201
+ num_param_groups = len(fp32_flat_groups[0])
202
+ merged_single_partition_of_fp32_groups = []
203
+ for i in range(num_param_groups):
204
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
205
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
206
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
207
+ avail_numel = sum([
208
+ full_single_fp32_vector.numel()
209
+ for full_single_fp32_vector in merged_single_partition_of_fp32_groups
210
+ ])
211
+
212
+ if debug:
213
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
214
+ wanted_numel = sum(
215
+ [sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
216
+ # not asserting if there is a mismatch due to possible padding
217
+ print(f"Have {avail_numel} numels to process.")
218
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
219
+
220
+ state_dict = OrderedDict()
221
+
222
+ # buffers
223
+ state_dict.update(buffers)
224
+ if debug:
225
+ print(f"added {len(buffers)} buffers")
226
+
227
+ # params
228
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
229
+ # out-of-core computing solution
230
+ total_numel = 0
231
+ total_params = 0
232
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
233
+ offset = 0
234
+ avail_numel = full_single_fp32_vector.numel()
235
+ for name, shape in shapes.items():
236
+
237
+ unpartitioned_numel = shape.numel()
238
+ total_numel += unpartitioned_numel
239
+ total_params += 1
240
+
241
+ if debug:
242
+ print(
243
+ f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
244
+ )
245
+ state_dict[name] = full_single_fp32_vector.narrow(
246
+ 0,
247
+ offset,
248
+ unpartitioned_numel).view(shape)
249
+ offset += unpartitioned_numel
250
+
251
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
252
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
253
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
254
+ # live optimizer object, so we are checking that the numbers are within the right range
255
+ align_to = 2 * world_size
256
+
257
+ def zero2_align(x):
258
+ return align_to * math.ceil(x / align_to)
259
+
260
+ if debug:
261
+ print(f"original offset={offset}, avail_numel={avail_numel}")
262
+
263
+ offset = zero2_align(offset)
264
+ avail_numel = zero2_align(avail_numel)
265
+
266
+ if debug:
267
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
268
+
269
+ # Sanity check
270
+ if offset != avail_numel:
271
+ raise ValueError(
272
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
273
+
274
+ print(
275
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
276
+ )
277
+
278
+ return state_dict
279
+
280
+
281
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
282
+ remainder = unpartitioned_numel % world_size
283
+ padding_numel = (world_size - remainder) if remainder else 0
284
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
285
+ return partitioned_numel, padding_numel
286
+
287
+
288
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
289
+ param_shapes,
290
+ fp32_flat_groups,
291
+ buffers):
292
+
293
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
294
+ # param, re-consolidating each param, while dealing with padding if any
295
+
296
+ avail_numel = fp32_flat_groups[0].numel() * world_size
297
+ # merge list of dicts, preserving order
298
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
299
+
300
+ if debug:
301
+ for i in range(world_size):
302
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
303
+
304
+ wanted_params = len(param_shapes)
305
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
306
+ # not asserting if there is a mismatch due to possible padding
307
+ print(f"Have {avail_numel} numels to process.")
308
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
309
+
310
+ state_dict = OrderedDict()
311
+
312
+ # buffers
313
+ state_dict.update(buffers)
314
+ if debug:
315
+ print(f"added {len(buffers)} buffers")
316
+
317
+ # params
318
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
319
+ # out-of-core computing solution
320
+ offset = 0
321
+ total_numel = 0
322
+ total_params = 0
323
+ for name, shape in param_shapes.items():
324
+
325
+ unpartitioned_numel = shape.numel()
326
+ total_numel += unpartitioned_numel
327
+ total_params += 1
328
+
329
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
330
+
331
+ if debug:
332
+ print(
333
+ f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
334
+ )
335
+
336
+ # XXX: memory usage doubles here
337
+ state_dict[name] = torch.cat(
338
+ tuple(fp32_flat_groups[i].narrow(0,
339
+ offset,
340
+ partitioned_numel)
341
+ for i in range(world_size)),
342
+ 0).narrow(0,
343
+ 0,
344
+ unpartitioned_numel).view(shape)
345
+ offset += partitioned_numel
346
+
347
+ offset *= world_size
348
+
349
+ # Sanity check
350
+ if offset != avail_numel:
351
+ raise ValueError(
352
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
353
+
354
+ print(
355
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
356
+ )
357
+
358
+ return state_dict
359
+
360
+
361
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
362
+ """
363
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
364
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
365
+ via a model hub.
366
+
367
+ Args:
368
+ - ``checkpoint_dir``: path to the desired checkpoint folder
369
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
370
+
371
+ Returns:
372
+ - pytorch ``state_dict``
373
+
374
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
375
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
376
+ the checkpoint.
377
+
378
+ A typical usage might be ::
379
+
380
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
381
+ # do the training and checkpoint saving
382
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
383
+ model = model.cpu() # move to cpu
384
+ model.load_state_dict(state_dict)
385
+ # submit to model hub or save the model to share with others
386
+
387
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
388
+ application. i.e. you will need to re-initialize the deepspeed engine, since
389
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
390
+
391
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
392
+
393
+ """
394
+ if tag is None:
395
+ latest_path = os.path.join(checkpoint_dir, 'latest')
396
+ if os.path.isfile(latest_path):
397
+ with open(latest_path, 'r') as fd:
398
+ tag = fd.read().strip()
399
+ else:
400
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
401
+
402
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
403
+
404
+ if not os.path.isdir(ds_checkpoint_dir):
405
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
406
+
407
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
408
+
409
+
410
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
411
+ """
412
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
413
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
414
+
415
+ Args:
416
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
417
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
418
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
419
+ """
420
+
421
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
422
+ print(f"Saving fp32 state dict to {output_file}")
423
+ torch.save(state_dict, output_file)
424
+
425
+
426
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
427
+ """
428
+ 1. Put the provided model to cpu
429
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
430
+ 3. Load it into the provided model
431
+
432
+ Args:
433
+ - ``model``: the model object to update
434
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
435
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
436
+
437
+ Returns:
438
+ - ``model`: modified model
439
+
440
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
441
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
442
+ conveniently placed for you in the checkpoint folder.
443
+
444
+ A typical usage might be ::
445
+
446
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
447
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
448
+ # submit to model hub or save the model to share with others
449
+
450
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
451
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
452
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
453
+
454
+ """
455
+ logger.info(f"Extracting fp32 weights")
456
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
457
+
458
+ logger.info(f"Overwriting model with fp32 weights")
459
+ model = model.cpu()
460
+ model.load_state_dict(state_dict, strict=False)
461
+
462
+ return model
463
+
464
+
465
+ if __name__ == "__main__":
466
+
467
+ parser = argparse.ArgumentParser()
468
+ parser.add_argument(
469
+ "checkpoint_dir",
470
+ type=str,
471
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
472
+ parser.add_argument(
473
+ "output_file",
474
+ type=str,
475
+ help=
476
+ "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
477
+ )
478
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
479
+ args = parser.parse_args()
480
+
481
+ debug = args.debug
482
+
483
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-12500/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-12500/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbfbadfdb5fd62ef9d1951ee73de5732cadd9eb5bdd3470ce06ed4f39c212efc
3
+ size 1658715
checkpoint-12500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d34b48d84e669ca90522d8c948b2137fda008358bc53287118bb1b206db2fa99
3
+ size 9587033
checkpoint-12500/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a7bf0b971bc677e452904f7f6250af2f795ffa2fc31fa999844cf2d6d410f95
3
+ size 15755
checkpoint-12500/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12f72d3ef74a91f7d1ba0b76edaf21ac75a55a8f2923a5f4b6f5597235cb5b3c
3
+ size 563
checkpoint-12500/zero_to_fp32.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ '''Copyright The Microsoft DeepSpeed Team'''
3
+
4
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
5
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
6
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
7
+ # application.
8
+ #
9
+ # example: python zero_to_fp32.py . pytorch_model.bin
10
+
11
+ import argparse
12
+ import torch
13
+ import glob
14
+ import math
15
+ import os
16
+ import re
17
+ from collections import OrderedDict
18
+
19
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
20
+ # DeepSpeed data structures it has to be available in the current python environment.
21
+ from deepspeed.utils import logger
22
+ from deepspeed.checkpoint.constants import (DS_VERSION,
23
+ OPTIMIZER_STATE_DICT,
24
+ SINGLE_PARTITION_OF_FP32_GROUPS,
25
+ FP32_FLAT_GROUPS,
26
+ ZERO_STAGE,
27
+ PARTITION_COUNT,
28
+ PARAM_SHAPES,
29
+ BUFFER_NAMES)
30
+
31
+ debug = 0
32
+
33
+ # load to cpu
34
+ device = torch.device('cpu')
35
+
36
+
37
+ def atoi(text):
38
+ return int(text) if text.isdigit() else text
39
+
40
+
41
+ def natural_keys(text):
42
+ '''
43
+ alist.sort(key=natural_keys) sorts in human order
44
+ http://nedbatchelder.com/blog/200712/human_sorting.html
45
+ (See Toothy's implementation in the comments)
46
+ '''
47
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
48
+
49
+
50
+ def get_model_state_file(checkpoint_dir, zero_stage):
51
+ if not os.path.isdir(checkpoint_dir):
52
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
53
+
54
+ # there should be only one file
55
+ if zero_stage == 2:
56
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
57
+ elif zero_stage == 3:
58
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
59
+
60
+ if not os.path.exists(file):
61
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
62
+
63
+ return file
64
+
65
+
66
+ def get_optim_files(checkpoint_dir):
67
+ # XXX: need to test that this simple glob rule works for multi-node setup too
68
+ optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
69
+ "*_optim_states.pt")),
70
+ key=natural_keys)
71
+
72
+ if len(optim_files) == 0:
73
+ raise FileNotFoundError(
74
+ f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
75
+
76
+ return optim_files
77
+
78
+
79
+ def parse_model_state(file):
80
+ state_dict = torch.load(file, map_location=device)
81
+
82
+ if BUFFER_NAMES not in state_dict:
83
+ raise ValueError(f"{file} is not a model state checkpoint")
84
+ buffer_names = state_dict[BUFFER_NAMES]
85
+ if debug:
86
+ print("Found buffers:", buffer_names)
87
+
88
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
89
+ buffers = {
90
+ k: v.float()
91
+ for k,
92
+ v in state_dict["module"].items() if k in buffer_names
93
+ }
94
+ param_shapes = state_dict[PARAM_SHAPES]
95
+
96
+ ds_version = state_dict.get(DS_VERSION, None)
97
+
98
+ return buffers, param_shapes, ds_version
99
+
100
+
101
+ def parse_optim_states(files, ds_checkpoint_dir):
102
+
103
+ total_files = len(files)
104
+ state_dicts = []
105
+ for f in files:
106
+ state_dicts.append(torch.load(f, map_location=device))
107
+
108
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
109
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
110
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
111
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
112
+
113
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
114
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
115
+ # use the max of the partition_count to get the dp world_size.
116
+
117
+ if type(world_size) is list:
118
+ world_size = max(world_size)
119
+
120
+ if world_size != total_files:
121
+ raise ValueError(
122
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
123
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
124
+ )
125
+
126
+ # the groups are named differently in each stage
127
+ if zero_stage == 2:
128
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
129
+ elif zero_stage == 3:
130
+ fp32_groups_key = FP32_FLAT_GROUPS
131
+ else:
132
+ raise ValueError(f"unknown zero stage {zero_stage}")
133
+
134
+ if zero_stage == 2:
135
+ fp32_flat_groups = [
136
+ state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
137
+ for i in range(len(state_dicts))
138
+ ]
139
+ elif zero_stage == 3:
140
+ # if there is more than one param group, there will be multiple flattened tensors - one
141
+ # flattened tensor per group - for simplicity merge them into a single tensor
142
+ #
143
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
144
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
145
+
146
+ fp32_flat_groups = [
147
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
148
+ 0) for i in range(len(state_dicts))
149
+ ]
150
+
151
+ return zero_stage, world_size, fp32_flat_groups
152
+
153
+
154
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
155
+ """
156
+ Returns fp32 state_dict reconstructed from ds checkpoint
157
+
158
+ Args:
159
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
160
+
161
+ """
162
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
163
+
164
+ optim_files = get_optim_files(ds_checkpoint_dir)
165
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
166
+ print(
167
+ f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
168
+
169
+ model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
170
+ buffers, param_shapes, ds_version = parse_model_state(model_file)
171
+ print(f'Parsing checkpoint created by deepspeed=={ds_version}')
172
+
173
+ if zero_stage == 2:
174
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
175
+ param_shapes,
176
+ fp32_flat_groups,
177
+ buffers)
178
+ elif zero_stage == 3:
179
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
180
+ param_shapes,
181
+ fp32_flat_groups,
182
+ buffers)
183
+
184
+
185
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
186
+ param_shapes,
187
+ fp32_flat_groups,
188
+ buffers):
189
+
190
+ # Reconstruction protocol:
191
+ #
192
+ # XXX: document this
193
+
194
+ if debug:
195
+ for i in range(world_size):
196
+ for j in range(len(fp32_flat_groups[0])):
197
+ print(
198
+ f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
199
+
200
+ # XXX: memory usage doubles here (zero2)
201
+ num_param_groups = len(fp32_flat_groups[0])
202
+ merged_single_partition_of_fp32_groups = []
203
+ for i in range(num_param_groups):
204
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
205
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
206
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
207
+ avail_numel = sum([
208
+ full_single_fp32_vector.numel()
209
+ for full_single_fp32_vector in merged_single_partition_of_fp32_groups
210
+ ])
211
+
212
+ if debug:
213
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
214
+ wanted_numel = sum(
215
+ [sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
216
+ # not asserting if there is a mismatch due to possible padding
217
+ print(f"Have {avail_numel} numels to process.")
218
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
219
+
220
+ state_dict = OrderedDict()
221
+
222
+ # buffers
223
+ state_dict.update(buffers)
224
+ if debug:
225
+ print(f"added {len(buffers)} buffers")
226
+
227
+ # params
228
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
229
+ # out-of-core computing solution
230
+ total_numel = 0
231
+ total_params = 0
232
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
233
+ offset = 0
234
+ avail_numel = full_single_fp32_vector.numel()
235
+ for name, shape in shapes.items():
236
+
237
+ unpartitioned_numel = shape.numel()
238
+ total_numel += unpartitioned_numel
239
+ total_params += 1
240
+
241
+ if debug:
242
+ print(
243
+ f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
244
+ )
245
+ state_dict[name] = full_single_fp32_vector.narrow(
246
+ 0,
247
+ offset,
248
+ unpartitioned_numel).view(shape)
249
+ offset += unpartitioned_numel
250
+
251
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
252
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
253
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
254
+ # live optimizer object, so we are checking that the numbers are within the right range
255
+ align_to = 2 * world_size
256
+
257
+ def zero2_align(x):
258
+ return align_to * math.ceil(x / align_to)
259
+
260
+ if debug:
261
+ print(f"original offset={offset}, avail_numel={avail_numel}")
262
+
263
+ offset = zero2_align(offset)
264
+ avail_numel = zero2_align(avail_numel)
265
+
266
+ if debug:
267
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
268
+
269
+ # Sanity check
270
+ if offset != avail_numel:
271
+ raise ValueError(
272
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
273
+
274
+ print(
275
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
276
+ )
277
+
278
+ return state_dict
279
+
280
+
281
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
282
+ remainder = unpartitioned_numel % world_size
283
+ padding_numel = (world_size - remainder) if remainder else 0
284
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
285
+ return partitioned_numel, padding_numel
286
+
287
+
288
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
289
+ param_shapes,
290
+ fp32_flat_groups,
291
+ buffers):
292
+
293
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
294
+ # param, re-consolidating each param, while dealing with padding if any
295
+
296
+ avail_numel = fp32_flat_groups[0].numel() * world_size
297
+ # merge list of dicts, preserving order
298
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
299
+
300
+ if debug:
301
+ for i in range(world_size):
302
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
303
+
304
+ wanted_params = len(param_shapes)
305
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
306
+ # not asserting if there is a mismatch due to possible padding
307
+ print(f"Have {avail_numel} numels to process.")
308
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
309
+
310
+ state_dict = OrderedDict()
311
+
312
+ # buffers
313
+ state_dict.update(buffers)
314
+ if debug:
315
+ print(f"added {len(buffers)} buffers")
316
+
317
+ # params
318
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
319
+ # out-of-core computing solution
320
+ offset = 0
321
+ total_numel = 0
322
+ total_params = 0
323
+ for name, shape in param_shapes.items():
324
+
325
+ unpartitioned_numel = shape.numel()
326
+ total_numel += unpartitioned_numel
327
+ total_params += 1
328
+
329
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
330
+
331
+ if debug:
332
+ print(
333
+ f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
334
+ )
335
+
336
+ # XXX: memory usage doubles here
337
+ state_dict[name] = torch.cat(
338
+ tuple(fp32_flat_groups[i].narrow(0,
339
+ offset,
340
+ partitioned_numel)
341
+ for i in range(world_size)),
342
+ 0).narrow(0,
343
+ 0,
344
+ unpartitioned_numel).view(shape)
345
+ offset += partitioned_numel
346
+
347
+ offset *= world_size
348
+
349
+ # Sanity check
350
+ if offset != avail_numel:
351
+ raise ValueError(
352
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
353
+
354
+ print(
355
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
356
+ )
357
+
358
+ return state_dict
359
+
360
+
361
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
362
+ """
363
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
364
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
365
+ via a model hub.
366
+
367
+ Args:
368
+ - ``checkpoint_dir``: path to the desired checkpoint folder
369
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
370
+
371
+ Returns:
372
+ - pytorch ``state_dict``
373
+
374
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
375
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
376
+ the checkpoint.
377
+
378
+ A typical usage might be ::
379
+
380
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
381
+ # do the training and checkpoint saving
382
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
383
+ model = model.cpu() # move to cpu
384
+ model.load_state_dict(state_dict)
385
+ # submit to model hub or save the model to share with others
386
+
387
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
388
+ application. i.e. you will need to re-initialize the deepspeed engine, since
389
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
390
+
391
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
392
+
393
+ """
394
+ if tag is None:
395
+ latest_path = os.path.join(checkpoint_dir, 'latest')
396
+ if os.path.isfile(latest_path):
397
+ with open(latest_path, 'r') as fd:
398
+ tag = fd.read().strip()
399
+ else:
400
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
401
+
402
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
403
+
404
+ if not os.path.isdir(ds_checkpoint_dir):
405
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
406
+
407
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
408
+
409
+
410
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
411
+ """
412
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
413
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
414
+
415
+ Args:
416
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
417
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
418
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
419
+ """
420
+
421
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
422
+ print(f"Saving fp32 state dict to {output_file}")
423
+ torch.save(state_dict, output_file)
424
+
425
+
426
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
427
+ """
428
+ 1. Put the provided model to cpu
429
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
430
+ 3. Load it into the provided model
431
+
432
+ Args:
433
+ - ``model``: the model object to update
434
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
435
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
436
+
437
+ Returns:
438
+ - ``model`: modified model
439
+
440
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
441
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
442
+ conveniently placed for you in the checkpoint folder.
443
+
444
+ A typical usage might be ::
445
+
446
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
447
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
448
+ # submit to model hub or save the model to share with others
449
+
450
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
451
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
452
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
453
+
454
+ """
455
+ logger.info(f"Extracting fp32 weights")
456
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
457
+
458
+ logger.info(f"Overwriting model with fp32 weights")
459
+ model = model.cpu()
460
+ model.load_state_dict(state_dict, strict=False)
461
+
462
+ return model
463
+
464
+
465
+ if __name__ == "__main__":
466
+
467
+ parser = argparse.ArgumentParser()
468
+ parser.add_argument(
469
+ "checkpoint_dir",
470
+ type=str,
471
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
472
+ parser.add_argument(
473
+ "output_file",
474
+ type=str,
475
+ help=
476
+ "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
477
+ )
478
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
479
+ args = parser.parse_args()
480
+
481
+ debug = args.debug
482
+
483
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-13000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-13000/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:015ca74de7641a77ded8ddf35317e4f502cdc2087bc76e326598f26a25c3bf8a
3
+ size 1658715
checkpoint-13000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31fae3f3e6d78b4e2185fd6bc0c927f462d96f1304309400dde02a2595aaee72
3
+ size 9587033
checkpoint-13000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cde737934b7deb6f6e085ab2bcbb01ddb296963af88d76dd5af4560d13e6410f
3
+ size 15755
checkpoint-13000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:817cb34c93c86642aabbda68f7e5dbaf01780201ee7b7680089a89ede5fb2a83
3
+ size 563