Update README.md
Browse files
README.md
CHANGED
@@ -56,7 +56,7 @@ from transformers import AutoTokenizer
|
|
56 |
import transformers
|
57 |
import torch
|
58 |
|
59 |
-
model = "Technoculture/
|
60 |
|
61 |
tokenizer = AutoTokenizer.from_pretrained(model)
|
62 |
pipeline = transformers.pipeline(
|
@@ -65,7 +65,7 @@ pipeline = transformers.pipeline(
|
|
65 |
model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
|
66 |
)
|
67 |
|
68 |
-
messages = [{"role": "user", "content": "
|
69 |
prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
70 |
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
71 |
print(outputs[0]["generated_text"])
|
|
|
56 |
import transformers
|
57 |
import torch
|
58 |
|
59 |
+
model = "Technoculture/Medorca-11B"
|
60 |
|
61 |
tokenizer = AutoTokenizer.from_pretrained(model)
|
62 |
pipeline = transformers.pipeline(
|
|
|
65 |
model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
|
66 |
)
|
67 |
|
68 |
+
messages = [{"role": "user", "content": "Why am i feeling so tired this month?"}]
|
69 |
prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
70 |
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
71 |
print(outputs[0]["generated_text"])
|