Update README.md
#2
by
sixsixcoder
- opened
README.md
CHANGED
@@ -67,7 +67,7 @@ Use the transformers backend for inference:
|
|
67 |
```python
|
68 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
69 |
|
70 |
-
MODEL_PATH = '
|
71 |
|
72 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
73 |
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")
|
@@ -101,6 +101,39 @@ out = model.generate(**generate_kwargs)
|
|
101 |
print(tokenizer.decode(out[0][input_len:], skip_special_tokens=True))
|
102 |
```
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
## LICENSE
|
105 |
|
106 |
The weights of the GLM-4 model are available under the terms of [LICENSE](LICENSE)
|
|
|
67 |
```python
|
68 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
69 |
|
70 |
+
MODEL_PATH = 'THUDM/glm-4-9b-chat-1m-hf'
|
71 |
|
72 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
73 |
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")
|
|
|
101 |
print(tokenizer.decode(out[0][input_len:], skip_special_tokens=True))
|
102 |
```
|
103 |
|
104 |
+
### vLLM Lib(0.6.4 and later version) for inference:
|
105 |
+
|
106 |
+
```Python
|
107 |
+
from transformers import AutoTokenizer
|
108 |
+
from vllm import LLM, SamplingParams
|
109 |
+
|
110 |
+
# THUDM/glm-4-9b-chat-1m-hf
|
111 |
+
# max_model_len, tp_size = 1048576, 4
|
112 |
+
# If you encounter OOM phenomenon, it is recommended to reduce max_model_len or increase tp_size
|
113 |
+
max_model_len, tp_size = 131072, 1
|
114 |
+
model_name = "THUDM/glm-4-9b-chat-1m-hf"
|
115 |
+
prompt = [{"role": "user", "content": "what is your name?"}]
|
116 |
+
|
117 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
118 |
+
llm = LLM(
|
119 |
+
model=model_name,
|
120 |
+
tensor_parallel_size=tp_size,
|
121 |
+
max_model_len=max_model_len,
|
122 |
+
trust_remote_code=True,
|
123 |
+
enforce_eager=True,
|
124 |
+
# GLM-4-9B-Chat-1M-HF If you encounter OOM phenomenon, it is recommended to enable the following parameters
|
125 |
+
# enable_chunked_prefill=True,
|
126 |
+
# max_num_batched_tokens=8192
|
127 |
+
)
|
128 |
+
stop_token_ids = [151329, 151336, 151338]
|
129 |
+
sampling_params = SamplingParams(temperature=0.95, max_tokens=1024, stop_token_ids=stop_token_ids)
|
130 |
+
|
131 |
+
inputs = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
|
132 |
+
outputs = llm.generate(prompts=inputs, sampling_params=sampling_params)
|
133 |
+
|
134 |
+
print(outputs[0].outputs[0].text)
|
135 |
+
```
|
136 |
+
|
137 |
## LICENSE
|
138 |
|
139 |
The weights of the GLM-4 model are available under the terms of [LICENSE](LICENSE)
|