QinOwen commited on
Commit
1759457
1 Parent(s): ff3cdde

debug-noise

Browse files
VADER-VideoCrafter/lvdm/models/samplers/ddim.py CHANGED
@@ -5,18 +5,9 @@ import torch
5
  from lvdm.models.utils_diffusion import make_ddim_sampling_parameters, make_ddim_timesteps
6
  from lvdm.common import noise_like
7
  import random
8
- import os
9
  # import ipdb
10
  # st = ipdb.set_trace
11
 
12
- def seed_everything_self(TORCH_SEED):
13
- random.seed(TORCH_SEED)
14
- os.environ['PYTHONHASHSEED'] = str(TORCH_SEED)
15
- np.random.seed(TORCH_SEED)
16
- torch.manual_seed(TORCH_SEED)
17
- torch.cuda.manual_seed_all(TORCH_SEED)
18
- torch.backends.cudnn.deterministic = True
19
- torch.backends.cudnn.benchmark = False
20
 
21
  class DDIMSampler(object):
22
  def __init__(self, model, schedule="linear", **kwargs):
@@ -97,7 +88,6 @@ class DDIMSampler(object):
97
  log_every_t=100,
98
  unconditional_guidance_scale=1.,
99
  unconditional_conditioning=None,
100
- seed=0,
101
  # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
102
  **kwargs
103
  ):
@@ -143,7 +133,6 @@ class DDIMSampler(object):
143
  unconditional_guidance_scale=unconditional_guidance_scale,
144
  unconditional_conditioning=unconditional_conditioning,
145
  verbose=verbose,
146
- seed=seed,
147
  **kwargs)
148
  return samples, intermediates
149
 
@@ -154,11 +143,10 @@ class DDIMSampler(object):
154
  mask=None, x0=None, img_callback=None, log_every_t=100,
155
  temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
156
  unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,
157
- cond_tau=1., target_size=None, start_timesteps=None, seed=0,
158
  **kwargs):
159
  device = self.model.betas.device
160
  # print('ddim device', device)
161
- seed_everything_self(seed)
162
  b = shape[0]
163
  if x_T is None:
164
  img = torch.randn(shape, device=device)
@@ -168,8 +156,8 @@ class DDIMSampler(object):
168
  print("x_T: ", x_T)
169
  print("shape: ", shape)
170
  print('random seed debug: ', torch.randn(100, device=device).sum())
171
- print("Debug initial noise1: ", img.sum().item())
172
- print("Debug initial noise2: ", torch.randn(shape, device=device).sum().item())
173
  print("noise device: ", img.device)
174
 
175
  if timesteps is None:
 
5
  from lvdm.models.utils_diffusion import make_ddim_sampling_parameters, make_ddim_timesteps
6
  from lvdm.common import noise_like
7
  import random
 
8
  # import ipdb
9
  # st = ipdb.set_trace
10
 
 
 
 
 
 
 
 
 
11
 
12
  class DDIMSampler(object):
13
  def __init__(self, model, schedule="linear", **kwargs):
 
88
  log_every_t=100,
89
  unconditional_guidance_scale=1.,
90
  unconditional_conditioning=None,
 
91
  # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
92
  **kwargs
93
  ):
 
133
  unconditional_guidance_scale=unconditional_guidance_scale,
134
  unconditional_conditioning=unconditional_conditioning,
135
  verbose=verbose,
 
136
  **kwargs)
137
  return samples, intermediates
138
 
 
143
  mask=None, x0=None, img_callback=None, log_every_t=100,
144
  temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
145
  unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,
146
+ cond_tau=1., target_size=None, start_timesteps=None,
147
  **kwargs):
148
  device = self.model.betas.device
149
  # print('ddim device', device)
 
150
  b = shape[0]
151
  if x_T is None:
152
  img = torch.randn(shape, device=device)
 
156
  print("x_T: ", x_T)
157
  print("shape: ", shape)
158
  print('random seed debug: ', torch.randn(100, device=device).sum())
159
+ print("Debug initial noise: ", torch.randn(shape, device=device).sum().item())
160
+ print("Debug initial noise: ", torch.randn(shape, device=device).sum().item())
161
  print("noise device: ", img.device)
162
 
163
  if timesteps is None:
VADER-VideoCrafter/scripts/main/funcs.py CHANGED
@@ -14,7 +14,7 @@ from lvdm.models.samplers.ddim import DDIMSampler
14
  # st = ipdb.set_trace
15
 
16
  def batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=50, ddim_eta=1.0,\
17
- cfg_scale=1.0, temporal_cfg_scale=None, backprop_mode=None, decode_frame='-1', seed=0, **kwargs):
18
  ddim_sampler = DDIMSampler(model)
19
  if backprop_mode is not None: # it is for training now, backprop_mode != None also means vader training mode
20
  ddim_sampler.backprop_mode = backprop_mode
@@ -64,7 +64,6 @@ def batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=50, dd
64
  temporal_length=noise_shape[2],
65
  conditional_guidance_scale_temporal=temporal_cfg_scale,
66
  x_T=x_T,
67
- seed=seed,
68
  **kwargs
69
  )
70
 
 
14
  # st = ipdb.set_trace
15
 
16
  def batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=50, ddim_eta=1.0,\
17
+ cfg_scale=1.0, temporal_cfg_scale=None, backprop_mode=None, decode_frame='-1', **kwargs):
18
  ddim_sampler = DDIMSampler(model)
19
  if backprop_mode is not None: # it is for training now, backprop_mode != None also means vader training mode
20
  ddim_sampler.backprop_mode = backprop_mode
 
64
  temporal_length=noise_shape[2],
65
  conditional_guidance_scale_temporal=temporal_cfg_scale,
66
  x_T=x_T,
 
67
  **kwargs
68
  )
69
 
VADER-VideoCrafter/scripts/main/train_t2v_lora.py CHANGED
@@ -655,10 +655,10 @@ def run_training(args, model, **kwargs):
655
  seed_everything_self(args.seed)
656
  if isinstance(peft_model, torch.nn.parallel.DistributedDataParallel):
657
  batch_samples = batch_ddim_sampling(peft_model.module, cond, noise_shape, args.n_samples, \
658
- args.ddim_steps, args.ddim_eta, args.unconditional_guidance_scale, None, decode_frame=args.decode_frame, seed=args.seed, **kwargs)
659
  else:
660
  batch_samples = batch_ddim_sampling(peft_model, cond, noise_shape, args.n_samples, \
661
- args.ddim_steps, args.ddim_eta, args.unconditional_guidance_scale, None, decode_frame=args.decode_frame, seed=args.seed, **kwargs)
662
 
663
  print("batch_samples dtype: ", batch_samples.dtype)
664
  print("batch_samples device: ", batch_samples.device)
 
655
  seed_everything_self(args.seed)
656
  if isinstance(peft_model, torch.nn.parallel.DistributedDataParallel):
657
  batch_samples = batch_ddim_sampling(peft_model.module, cond, noise_shape, args.n_samples, \
658
+ args.ddim_steps, args.ddim_eta, args.unconditional_guidance_scale, None, decode_frame=args.decode_frame, **kwargs)
659
  else:
660
  batch_samples = batch_ddim_sampling(peft_model, cond, noise_shape, args.n_samples, \
661
+ args.ddim_steps, args.ddim_eta, args.unconditional_guidance_scale, None, decode_frame=args.decode_frame, **kwargs)
662
 
663
  print("batch_samples dtype: ", batch_samples.dtype)
664
  print("batch_samples device: ", batch_samples.device)