Update app.py
Browse files
app.py
CHANGED
@@ -279,10 +279,10 @@ def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 10
|
|
279 |
half = dim // 2
|
280 |
|
281 |
# Do not block CUDA steam, but having about 1e-4 differences with Flux official codes:
|
282 |
-
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half)
|
283 |
|
284 |
# Block CUDA steam, but consistent with official codes:
|
285 |
-
|
286 |
|
287 |
args = t[:, None].float() * freqs[None]
|
288 |
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
|
|
279 |
half = dim // 2
|
280 |
|
281 |
# Do not block CUDA steam, but having about 1e-4 differences with Flux official codes:
|
282 |
+
# freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half)
|
283 |
|
284 |
# Block CUDA steam, but consistent with official codes:
|
285 |
+
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(t.device)
|
286 |
|
287 |
args = t[:, None].float() * freqs[None]
|
288 |
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|