InternLM-Math
commited on
Commit
•
8fd7d8a
1
Parent(s):
c314c82
Update README.md
Browse files
README.md
CHANGED
@@ -87,18 +87,8 @@ We suggest using [LMDeploy](https://github.com/InternLM/LMDeploy)(>=0.2.1) for i
|
|
87 |
from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
|
88 |
|
89 |
backend_config = TurbomindEngineConfig(model_name='internlm2-chat-7b', tp=1, cache_max_entry_count=0.3)
|
90 |
-
chat_template = ChatTemplateConfig(model_name='internlm2-chat-7b',
|
91 |
-
|
92 |
-
eosys='',
|
93 |
-
meta_instruction='',
|
94 |
-
user='<|im_start|>user\n',
|
95 |
-
assistant='<|im_start|>assistant\n',
|
96 |
-
eoh='<|im_end|>\n',
|
97 |
-
eoa='<|im_end|>\n',
|
98 |
-
stop_words=['<|im_end|>', '<|action_end|>'])
|
99 |
-
pipe = pipeline(model_path='internlm/internlm2-math-7b',
|
100 |
-
chat_template_config=chat_template,
|
101 |
-
backend_config=backend_config)
|
102 |
|
103 |
problem = '1+1='
|
104 |
result = pipe([problem], request_output_len=1024, top_k=1)
|
@@ -112,7 +102,7 @@ tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-math-7b", trust_re
|
|
112 |
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
113 |
model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-math-7b", trust_remote_code=True, torch_dtype=torch.float16).cuda()
|
114 |
model = model.eval()
|
115 |
-
response, history = model.chat(tokenizer, "1+1=", history=[])
|
116 |
print(response)
|
117 |
```
|
118 |
|
|
|
87 |
from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
|
88 |
|
89 |
backend_config = TurbomindEngineConfig(model_name='internlm2-chat-7b', tp=1, cache_max_entry_count=0.3)
|
90 |
+
chat_template = ChatTemplateConfig(model_name='internlm2-chat-7b', system='', eosys='', meta_instruction='')
|
91 |
+
pipe = pipeline(model_path='internlm/internlm2-math-7b', chat_template_config=chat_template, backend_config=backend_config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
problem = '1+1='
|
94 |
result = pipe([problem], request_output_len=1024, top_k=1)
|
|
|
102 |
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
103 |
model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-math-7b", trust_remote_code=True, torch_dtype=torch.float16).cuda()
|
104 |
model = model.eval()
|
105 |
+
response, history = model.chat(tokenizer, "1+1=", history=[], meta_instruction="")
|
106 |
print(response)
|
107 |
```
|
108 |
|