haixuantao
commited on
Commit
·
841b7c4
1
Parent(s):
810a70f
Fix autoawq kernels requirements
Browse files- README.md +20 -1
- requirements.txt +11 -0
- tests/test_idefics2.py +1 -2
README.md
CHANGED
@@ -11,11 +11,12 @@ Create a new conda environment for idefics2. This requirements file suppose that
|
|
11 |
|
12 |
```bash
|
13 |
conda create -n idefics2 python=3.10
|
|
|
14 |
pip install -r requirements.txt
|
15 |
pip install -e <PATH TO LATEST TRANSFOMERS VERSION>
|
16 |
```
|
17 |
|
18 |
-
##
|
19 |
|
20 |
Make sure to be connected using the wifi hotspot of the robomaster which is the most stable one.
|
21 |
|
@@ -23,6 +24,24 @@ The default password for the hotpsot is: 12341234
|
|
23 |
|
24 |
You might need to have a second wifi card if you want to be able to run the demo with internet on.
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
```bash
|
27 |
export HF_TOKEN=<TOKEN>
|
28 |
conda activate idefics2
|
|
|
11 |
|
12 |
```bash
|
13 |
conda create -n idefics2 python=3.10
|
14 |
+
conda activate idefics2
|
15 |
pip install -r requirements.txt
|
16 |
pip install -e <PATH TO LATEST TRANSFOMERS VERSION>
|
17 |
```
|
18 |
|
19 |
+
## Robomaster Connection
|
20 |
|
21 |
Make sure to be connected using the wifi hotspot of the robomaster which is the most stable one.
|
22 |
|
|
|
24 |
|
25 |
You might need to have a second wifi card if you want to be able to run the demo with internet on.
|
26 |
|
27 |
+
## Post-Installation test
|
28 |
+
|
29 |
+
Please try running idefics2 with:
|
30 |
+
|
31 |
+
```bash
|
32 |
+
conda activate idefics2
|
33 |
+
python tests/test_idefics2.py
|
34 |
+
```
|
35 |
+
|
36 |
+
Please try running robomaster with:
|
37 |
+
|
38 |
+
```bash
|
39 |
+
conda activate robomaster
|
40 |
+
python tests/test_robomaster.py
|
41 |
+
```
|
42 |
+
|
43 |
+
## Running the demo
|
44 |
+
|
45 |
```bash
|
46 |
export HF_TOKEN=<TOKEN>
|
47 |
conda activate idefics2
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dora-rs
|
2 |
+
torch==2.2.0
|
3 |
+
flash_attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
4 |
+
autoawq
|
5 |
+
autoawq-kernels
|
6 |
+
sounddevice
|
7 |
+
openai-whisper
|
8 |
+
pynput
|
9 |
+
opencv-python
|
10 |
+
Pillow
|
11 |
+
transformers
|
tests/test_idefics2.py
CHANGED
@@ -4,7 +4,7 @@ from PIL import Image
|
|
4 |
from io import BytesIO
|
5 |
|
6 |
from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
|
7 |
-
|
8 |
|
9 |
MODE = "quantized"
|
10 |
DEVICE = "cuda"
|
@@ -98,7 +98,6 @@ def ask_vlm(image, instruction):
|
|
98 |
|
99 |
import time
|
100 |
|
101 |
-
model.eval()
|
102 |
now = time.time()
|
103 |
print(ask_vlm(image1, "What is this?")[0].split("\nAssistant: ")[1])
|
104 |
|
|
|
4 |
from io import BytesIO
|
5 |
|
6 |
from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
|
7 |
+
import awq_ext
|
8 |
|
9 |
MODE = "quantized"
|
10 |
DEVICE = "cuda"
|
|
|
98 |
|
99 |
import time
|
100 |
|
|
|
101 |
now = time.time()
|
102 |
print(ask_vlm(image1, "What is this?")[0].split("\nAssistant: ")[1])
|
103 |
|