epoch 1/20 Traceback (most recent call last): File "/workspace/kohya-trainer/sdxl_train.py", line 649, in train(args) File "/workspace/kohya-trainer/sdxl_train.py", line 475, in train accelerator.backward(loss) File "/workspace/venv/lib/python3.10/site-packages/accelerate/accelerator.py", line 1743, in backward self.scaler.scale(loss).backward(**kwargs) File "/workspace/venv/lib/python3.10/site-packages/torch/_tensor.py", line 487, in backward torch.autograd.backward( File "/workspace/venv/lib/python3.10/site-packages/torch/autograd/__init__.py", line 200, in backward Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass File "/workspace/venv/lib/python3.10/site-packages/torch/autograd/function.py", line 274, in apply return user_fn(self, *args) File "/workspace/venv/lib/python3.10/site-packages/torch/utils/checkpoint.py", line 157, in backward torch.autograd.backward(outputs_with_grad, args_with_grad) File "/workspace/venv/lib/python3.10/site-packages/torch/autograd/__init__.py", line 200, in backward Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 26.00 MiB (GPU 0; 23.65 GiB total capacity; 18.73 GiB already allocated; 25.69 MiB free; 19.47 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF ╭───────────────────── Traceback (most recent call last) ──────────────────────╮ │ /workspace/kohya-trainer/sdxl_train.py:649 in  │ │ │ │ 646 │ args = parser.parse_args() │ │ 647 │ args = train_util.read_config_from_file(args, parser) │ │ 648 │ │ │ ❱ 649 │ train(args) │ │ 650 │ │ │ │ /workspace/kohya-trainer/sdxl_train.py:475 in train │ │ │ │ 472 │ │ │ │ else: │ │ 473 │ │ │ │ │ loss = torch.nn.functional.mse_loss(noise_pred.flo │ │ 474 │ │ │ │ │ │ ❱ 475 │ │ │ │ accelerator.backward(loss) │ │ 476 │ │ │ │ if accelerator.sync_gradients and args.max_grad_norm ! │ │ 477 │ │ │ │ │ params_to_clip = [] │ │ 478 │ │ │ │ │ for m in training_models: │ │ │ │ /workspace/venv/lib/python3.10/site-packages/accelerate/accelerator.py:1743 │ │ in backward │ │ │ │ 1740 │ │ elif self.distributed_type == DistributedType.MEGATRON_LM: │ │ 1741 │ │ │ return │ │ 1742 │ │ elif self.scaler is not None: │ │ ❱ 1743 │ │ │ self.scaler.scale(loss).backward(**kwargs) │ │ 1744 │ │ else: │ │ 1745 │ │ │ loss.backward(**kwargs) │ │ 1746 │ │ │ │ /workspace/venv/lib/python3.10/site-packages/torch/_tensor.py:487 in │ │ backward │ │ │ │ 484 │ │ │ │ create_graph=create_graph, │ │ 485 │ │ │ │ inputs=inputs, │ │ 486 │ │ │ ) │ │ ❱  487 │ │ torch.autograd.backward( │ │ 488 │ │ │ self, gradient, retain_graph, create_graph, inputs=inputs │ │ 489 │ │ ) │ │ 490 │ │ │ │ /workspace/venv/lib/python3.10/site-packages/torch/autograd/__init__.py:200 │ │ in backward │ │ │ │ 197 │ # The reason we repeat same the comment below is that │ │ 198 │ # some Python versions print out the first line of a multi-line fu │ │ 199 │ # calls in the traceback and some print out the last line │ │ ❱ 200 │ Variable._execution_engine.run_backward( # Calls into the C++ eng │ │ 201 │ │ tensors, grad_tensors_, retain_graph, create_graph, inputs, │ │ 202 │ │ allow_unreachable=True, accumulate_grad=True) # Calls into th │ │ 203 │ │ │ │ /workspace/venv/lib/python3.10/site-packages/torch/autograd/function.py:274 │ │ in apply │ │ │ │ 271 │ │ │ │ │ │ │ "Function is not allowed. You should on │ │ 272 │ │ │ │ │ │ │ "of them.") │ │ 273 │ │ user_fn = vjp_fn if vjp_fn is not Function.vjp else backward_f │ │ ❱ 274 │ │ return user_fn(self, *args) │ │ 275 │ │ │ 276 │ def apply_jvp(self, *args): │ │ 277 │ │ # _forward_cls is defined by derived class │ │ │ │ /workspace/venv/lib/python3.10/site-packages/torch/utils/checkpoint.py:157 │ │ in backward │ │ │ │ 154 │ │ │ raise RuntimeError( │ │ 155 │ │ │ │ "none of output has requires_grad=True," │ │ 156 │ │ │ │ " this checkpoint() is not necessary") │ │ ❱ 157 │ │ torch.autograd.backward(outputs_with_grad, args_with_grad) │ │ 158 │ │ grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else N │ │ 159 │ │ │ │ │ for inp in detached_inputs) │ │ 160 │ │ │ │ /workspace/venv/lib/python3.10/site-packages/torch/autograd/__init__.py:200 │ │ in backward │ │ │ │ 197 │ # The reason we repeat same the comment below is that │ │ 198 │ # some Python versions print out the first line of a multi-line fu │ │ 199 │ # calls in the traceback and some print out the last line │ │ ❱ 200 │ Variable._execution_engine.run_backward( # Calls into the C++ eng │ │ 201 │ │ tensors, grad_tensors_, retain_graph, create_graph, inputs, │ │ 202 │ │ allow_unreachable=True, accumulate_grad=True) # Calls into th │ │ 203 │ ╰──────────────────────────────────────────────────────────────────────────────╯ OutOfMemoryError: CUDA out of memory. Tried to allocate 26.00 MiB (GPU 0; 23.65 GiB total capacity; 18.73 GiB already allocated; 25.69 MiB free; 19.47 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF