winglian commited on
Commit
58b0d4b
1 Parent(s): ed70a08

update flash attention for gemma support: (#1368)

Browse files
Files changed (2) hide show
  1. requirements.txt +1 -1
  2. setup.py +1 -1
requirements.txt CHANGED
@@ -12,7 +12,7 @@ fire
12
  PyYAML>=6.0
13
  requests
14
  datasets>=2.15.0
15
- flash-attn==2.3.3
16
  sentencepiece
17
  wandb
18
  einops
 
12
  PyYAML>=6.0
13
  requests
14
  datasets>=2.15.0
15
+ flash-attn==2.5.5
16
  sentencepiece
17
  wandb
18
  einops
setup.py CHANGED
@@ -68,7 +68,7 @@ setup(
68
  dependency_links=dependency_links,
69
  extras_require={
70
  "flash-attn": [
71
- "flash-attn==2.5.0",
72
  ],
73
  "fused-dense-lib": [
74
  "fused-dense-lib @ git+https://github.com/Dao-AILab/flash-attention@v2.3.3#subdirectory=csrc/fused_dense_lib",
 
68
  dependency_links=dependency_links,
69
  extras_require={
70
  "flash-attn": [
71
+ "flash-attn==2.5.5",
72
  ],
73
  "fused-dense-lib": [
74
  "fused-dense-lib @ git+https://github.com/Dao-AILab/flash-attention@v2.3.3#subdirectory=csrc/fused_dense_lib",