NIRVANALAN commited on
Commit
44bb30d
1 Parent(s): 00d21f3
guided_diffusion/__pycache__/dist_util.cpython-310.pyc CHANGED
Binary files a/guided_diffusion/__pycache__/dist_util.cpython-310.pyc and b/guided_diffusion/__pycache__/dist_util.cpython-310.pyc differ
 
guided_diffusion/__pycache__/train_util.cpython-310.pyc CHANGED
Binary files a/guided_diffusion/__pycache__/train_util.cpython-310.pyc and b/guided_diffusion/__pycache__/train_util.cpython-310.pyc differ
 
guided_diffusion/train_util.py CHANGED
@@ -131,16 +131,18 @@ class TrainLoop:
131
 
132
  # print('creating DDP')
133
  if th.cuda.is_available():
134
- self.use_ddp = True
135
- self.ddpm_model = self.model
136
- self.ddp_model = DDP(
137
- self.model.to(dist_util.dev()),
138
- device_ids=[dist_util.dev()],
139
- output_device=dist_util.dev(),
140
- broadcast_buffers=False,
141
- bucket_cap_mb=128,
142
- find_unused_parameters=False,
143
- )
 
 
144
  else:
145
  if dist.get_world_size() > 1:
146
  logger.warn("Distributed training requires CUDA. "
 
131
 
132
  # print('creating DDP')
133
  if th.cuda.is_available():
134
+ # self.use_ddp = True
135
+ # self.ddpm_model = self.model
136
+ # self.ddp_model = DDP(
137
+ # # self.model.to(dist_util.dev()),
138
+ # self.model.to('cuda:0'),
139
+ # device_ids=[dist_util.dev()],
140
+ # output_device=dist_util.dev(),
141
+ # broadcast_buffers=False,
142
+ # bucket_cap_mb=128,
143
+ # find_unused_parameters=False,
144
+ # )
145
+ self.ddp_model = self.model.to('cuda:0') # demo does not require ddp
146
  else:
147
  if dist.get_world_size() > 1:
148
  logger.warn("Distributed training requires CUDA. "