Fix(docs): Update flash attn requirements (#409)
Browse files
README.md
CHANGED
@@ -483,8 +483,8 @@ max_grad_norm:
|
|
483 |
flash_optimum:
|
484 |
# whether to use xformers attention patch https://github.com/facebookresearch/xformers:
|
485 |
xformers_attention:
|
486 |
-
# whether to use flash attention patch https://github.com/
|
487 |
-
flash_attention:
|
488 |
# whether to use scaled-dot-product attention
|
489 |
# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
|
490 |
sdp_attention:
|
|
|
483 |
flash_optimum:
|
484 |
# whether to use xformers attention patch https://github.com/facebookresearch/xformers:
|
485 |
xformers_attention:
|
486 |
+
# whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:
|
487 |
+
flash_attention:
|
488 |
# whether to use scaled-dot-product attention
|
489 |
# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
|
490 |
sdp_attention:
|