Update README.md
Browse files
README.md
CHANGED
@@ -32,7 +32,7 @@ I strongly advice to run inference in INT8 or INT4 mode, with the help of Bitsan
|
|
32 |
import torch
|
33 |
from transformers import AutoTokenizer, pipeline, AutoModel, AutoModelForCausalLM, BitsAndBytesConfig
|
34 |
|
35 |
-
MODEL = "ecastera/eva-
|
36 |
|
37 |
quantization_config = BitsAndBytesConfig(
|
38 |
load_in_4bit=True,
|
|
|
32 |
import torch
|
33 |
from transformers import AutoTokenizer, pipeline, AutoModel, AutoModelForCausalLM, BitsAndBytesConfig
|
34 |
|
35 |
+
MODEL = "ecastera/ecastera-eva-westlake-7b-spanish"
|
36 |
|
37 |
quantization_config = BitsAndBytesConfig(
|
38 |
load_in_4bit=True,
|