File size: 1,312 Bytes
e6d2ce0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# Define dataset
dataset = dict(
    type="VideoTextDataset",
    data_path=None,
    num_frames=16,
    frame_interval=3,
    image_size=(256, 256),
)

# Define acceleration
num_workers = 4
dtype = "bf16"
grad_checkpoint = True
plugin = "zero2"
sp_size = 1

# Define model
model = dict(
    type="STDiT-XL/2",
    space_scale=0.5,
    time_scale=1.0,
    # from_pretrained="PixArt-XL-2-512x512.pth",
    # from_pretrained = "/home/zhaowangbo/wangbo/PixArt-alpha/pretrained_models/OpenSora-v1-HQ-16x512x512.pth",
    # from_pretrained = "OpenSora-v1-HQ-16x512x512.pth",
    from_pretrained="PRETRAINED_MODEL",
    enable_flash_attn=True,
    enable_layernorm_kernel=True,
)
# mask_ratios = [0.5, 0.29, 0.07, 0.07, 0.07]
# mask_ratios = {
#     "identity": 0.9,
#     "random": 0.06,
#     "mask_head": 0.01,
#     "mask_tail": 0.01,
#     "mask_head_tail": 0.02,
# }
vae = dict(
    type="VideoAutoencoderKL",
    from_pretrained="stabilityai/sd-vae-ft-ema",
)
text_encoder = dict(
    type="t5",
    from_pretrained="DeepFloyd/t5-v1_1-xxl",
    model_max_length=120,
    shardformer=True,
)
scheduler = dict(
    type="rflow",
    # timestep_respacing="",
)

# Others
seed = 42
outputs = "outputs"
wandb = True

epochs = 1
log_every = 10
ckpt_every = 1000
load = None

batch_size = 16
lr = 2e-5
grad_clip = 1.0