Siddharth63
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -7,7 +7,8 @@ datasets:
|
|
7 |
|
8 |
BitNEt 250 M trained on 7B tokens on PubMed + Clinical dataset
|
9 |
|
10 |
-
Inference code:
|
|
|
11 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
12 |
from transformers.models.llama.modeling_llama import *
|
13 |
|
@@ -65,4 +66,6 @@ model.to(device="cuda:0")
|
|
65 |
prompt = "Atherosclerosis is"
|
66 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
67 |
generate_ids = model.generate(inputs.input_ids, max_length=50)
|
68 |
-
tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
|
|
|
|
7 |
|
8 |
BitNEt 250 M trained on 7B tokens on PubMed + Clinical dataset
|
9 |
|
10 |
+
Inference code:
|
11 |
+
```
|
12 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
13 |
from transformers.models.llama.modeling_llama import *
|
14 |
|
|
|
66 |
prompt = "Atherosclerosis is"
|
67 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
68 |
generate_ids = model.generate(inputs.input_ids, max_length=50)
|
69 |
+
tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
70 |
+
|
71 |
+
```
|