umisetokikaze
commited on
Commit
•
3f69f5a
1
Parent(s):
504ffe6
Update README.md
Browse files
README.md
CHANGED
@@ -60,17 +60,23 @@ We would like to take this opportunity to thank
|
|
60 |
|
61 |
```python
|
62 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
63 |
|
64 |
-
|
65 |
-
|
66 |
|
67 |
-
|
68 |
-
|
69 |
|
70 |
-
|
71 |
-
generated_text = tokenizer.decode(output)
|
72 |
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
````
|
75 |
|
76 |
## Merge recipe
|
|
|
60 |
|
61 |
```python
|
62 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
63 |
+
import torch
|
64 |
|
65 |
+
model_id = "Local-Novel-LLM-project/Ninja-v1"
|
66 |
+
new_tokens = 1024
|
67 |
|
68 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float16, attn_implementation="flash_attention_2", device_map="auto")
|
69 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
70 |
|
71 |
+
system_prompt = "あなたはプロの小説家です。\n小説を書いてください\n-------- "
|
|
|
72 |
|
73 |
+
prompt = input("Enter a prompt: ")
|
74 |
+
system_prompt += prompt + "\n-------- "
|
75 |
+
model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
|
76 |
+
|
77 |
+
|
78 |
+
generated_ids = model.generate(**model_inputs, max_new_tokens=new_tokens, do_sample=True)
|
79 |
+
print(tokenizer.batch_decode(generated_ids)[0])
|
80 |
````
|
81 |
|
82 |
## Merge recipe
|