Update README.md
Browse files
README.md
CHANGED
@@ -35,6 +35,39 @@ User message<|im_end|>
|
|
35 |
<|im_start|>assistant
|
36 |
```
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
## GGUF quants
|
39 |
|
40 |
You can find GGUF quants for llama.cpp [here](https://huggingface.co/rxavier/Taurus-7B-1.0-GGUF).
|
|
|
35 |
<|im_start|>assistant
|
36 |
```
|
37 |
|
38 |
+
## Usage
|
39 |
+
|
40 |
+
```python
|
41 |
+
import torch
|
42 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, GeneratorConfig
|
43 |
+
|
44 |
+
|
45 |
+
model_id = "rxavier/Taurus-7B-1.0"
|
46 |
+
model = AutoModelForCausalLM.from_pretrained(
|
47 |
+
model_path,
|
48 |
+
torch_dtype=torch.bfloat16,
|
49 |
+
)
|
50 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
51 |
+
generation_config = GenerationConfig(
|
52 |
+
bos_token_id=tok.bos_token_id,
|
53 |
+
eos_token_id=tok.eos_token_id,
|
54 |
+
pad_token_id=tok.pad_token_id,
|
55 |
+
)
|
56 |
+
|
57 |
+
prompt = "Give me latex formulas for extended euler equations"
|
58 |
+
system_message = "You are an expert in economics with PhD level knowledge. You are helpful, give thorough and clear explanations, and use equations and formulas where needed."
|
59 |
+
|
60 |
+
messages = [{"role": "system",
|
61 |
+
"content": system_message},
|
62 |
+
{"role": "user",
|
63 |
+
"content": prompt}]
|
64 |
+
tokens = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
|
65 |
+
|
66 |
+
with torch.no_grad():
|
67 |
+
outputs = model.generate(inputs=tokens, generation_config=generation_config)
|
68 |
+
print(tokenizer.decode(outputs["sequences"].cpu().tolist()[0]))
|
69 |
+
```
|
70 |
+
|
71 |
## GGUF quants
|
72 |
|
73 |
You can find GGUF quants for llama.cpp [here](https://huggingface.co/rxavier/Taurus-7B-1.0-GGUF).
|