macadeliccc
commited on
Commit
•
795a5c2
1
Parent(s):
80674f5
Update README.md
Browse files
README.md
CHANGED
@@ -16,18 +16,25 @@ license: apache-2.0
|
|
16 |
## Code Example
|
17 |
|
18 |
```python
|
19 |
-
from transformers import
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
|
27 |
-
|
28 |
|
29 |
-
outputs = model.generate(**inputs, max_new_tokens=4096)
|
30 |
-
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
31 |
```
|
32 |
|
33 |
## Evaluations
|
|
|
16 |
## Code Example
|
17 |
|
18 |
```python
|
19 |
+
from transformers import AutoTokenizer
|
20 |
+
import transformers
|
21 |
+
import torch
|
22 |
|
23 |
+
model = "macadeliccc/WestLake-7B-v2-laser-truthy-dpo"
|
24 |
+
messages = [{"role": "user", "content": "What is a large language model?"}]
|
25 |
|
26 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
27 |
+
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
28 |
+
pipeline = transformers.pipeline(
|
29 |
+
"text-generation",
|
30 |
+
model=model,
|
31 |
+
torch_dtype=torch.float16,
|
32 |
+
device_map="auto",
|
33 |
+
)
|
34 |
|
35 |
+
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
36 |
+
print(outputs[0]["generated_text"])
|
37 |
|
|
|
|
|
38 |
```
|
39 |
|
40 |
## Evaluations
|