Fix padding token

#1
by jbochi - opened
Files changed (4) hide show
  1. .gitattributes +0 -1
  2. README.md +2 -6
  3. model-q4k.gguf +0 -3
  4. model-q6k.gguf +0 -3
.gitattributes CHANGED
@@ -34,4 +34,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
- *.gguf filter=lfs diff=lfs merge=lfs -text
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
README.md CHANGED
@@ -489,15 +489,11 @@ Find below some example scripts on how to use the model:
489
  <details>
490
  <summary> Click to expand </summary>
491
 
492
- First, install the Python packages that are required:
493
-
494
- `pip install transformers accelerate sentencepiece`
495
-
496
  ```python
497
- from transformers import T5ForConditionalGeneration, T5Tokenizer
498
 
499
  model_name = 'jbochi/madlad400-7b-mt-bt'
500
- model = T5ForConditionalGeneration.from_pretrained(model_name, device_map="auto")
501
  tokenizer = T5Tokenizer.from_pretrained(model_name)
502
 
503
  text = "<2pt> I love pizza!"
 
489
  <details>
490
  <summary> Click to expand </summary>
491
 
 
 
 
 
492
  ```python
493
+ from transformers import T5ForConditionalGeneration, T5Tokenizer, GenerationConfig
494
 
495
  model_name = 'jbochi/madlad400-7b-mt-bt'
496
+ model = T5ForConditionalGeneration.from_pretrained(model_name, device="auto")
497
  tokenizer = T5Tokenizer.from_pretrained(model_name)
498
 
499
  text = "<2pt> I love pizza!"
model-q4k.gguf DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a20ef082258e423b78a71852cb59b0fa3396654d8b0bd365915343a8771cf58a
3
- size 4668768576
 
 
 
 
model-q6k.gguf DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9fa27654a20c1fac5ee21743f29fdd3ca80ef80db918c4f3d624deb35dc95998
3
- size 6807667008