Nanobit commited on
Commit
72fe3f8
1 Parent(s): 47961fd

Fix(docs): Update flash attn requirements (#409)

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -483,8 +483,8 @@ max_grad_norm:
483
  flash_optimum:
484
  # whether to use xformers attention patch https://github.com/facebookresearch/xformers:
485
  xformers_attention:
486
- # whether to use flash attention patch https://github.com/HazyResearch/flash-attention:
487
- flash_attention: # require a100 for llama
488
  # whether to use scaled-dot-product attention
489
  # https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
490
  sdp_attention:
 
483
  flash_optimum:
484
  # whether to use xformers attention patch https://github.com/facebookresearch/xformers:
485
  xformers_attention:
486
+ # whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:
487
+ flash_attention:
488
  # whether to use scaled-dot-product attention
489
  # https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
490
  sdp_attention: