egon-nlpulse commited on
Commit
74852cc
1 Parent(s): 440fc16
Files changed (1) hide show
  1. README.md +57 -1
README.md CHANGED
@@ -5,4 +5,60 @@ datasets:
5
  language:
6
  - en
7
  library_name: transformers
8
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  language:
6
  - en
7
  library_name: transformers
8
+ ---
9
+
10
+ # Quantization 4Bits - 4.92 GB GPU memory usage for inference:
11
+
12
+ ```
13
+ $ nvidia-smi
14
+ +-----------------------------------------------------------------------------+
15
+ | NVIDIA-SMI 515.105.01 Driver Version: 515.105.01 CUDA Version: 11.7 |
16
+ |-------------------------------+----------------------+----------------------+
17
+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
18
+ | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
19
+ | | | MIG M. |
20
+ |===============================+======================+======================|
21
+ | 1 NVIDIA GeForce ... Off | 00000000:04:00.0 Off | N/A |
22
+ | 37% 70C P2 163W / 170W | 4923MiB / 12288MiB | 91% Default |
23
+ | | | N/A |
24
+ +-------------------------------+----------------------+----------------------+
25
+ ```
26
+
27
+ ```
28
+ import os
29
+ import torch
30
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
31
+
32
+
33
+
34
+ model_path = "nlpulse/gpt-j-6b-english_quotes"
35
+ model_path = os.environ.get("model_path", model_path)
36
+
37
+ # tokenizer
38
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
39
+ tokenizer.pad_token = tokenizer.eos_token
40
+
41
+
42
+ # quantization config
43
+ quant_config = BitsAndBytesConfig(
44
+ load_in_4bit=True,
45
+ bnb_4bit_use_double_quant=True,
46
+ bnb_4bit_quant_type="nf4",
47
+ bnb_4bit_compute_dtype=torch.bfloat16
48
+ )
49
+
50
+ # model
51
+ model = AutoModelForCausalLM.from_pretrained(model_path, quantization_config=quant_config, device_map={"":0})
52
+
53
+
54
+ # inference
55
+ device = "cuda"
56
+ text_list = ["Ask not what your country", "Be the change that", "You only live once, but", "I'm selfish, impatient and"]
57
+ for text in text_list:
58
+ inputs = tokenizer(text, return_tensors="pt").to(device)
59
+ outputs = model.generate(**inputs, max_new_tokens=20)
60
+ print('>> ', text, " => ", tokenizer.decode(outputs[0], skip_special_tokens=True))
61
+
62
+ ```
63
+
64
+