satyamt commited on
Commit
b4e9dfa
1 Parent(s): dab1f1a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -56,7 +56,7 @@ from transformers import AutoTokenizer
56
  import transformers
57
  import torch
58
 
59
- model = "Technoculture/Medstral-7B"
60
 
61
  tokenizer = AutoTokenizer.from_pretrained(model)
62
  pipeline = transformers.pipeline(
@@ -65,7 +65,7 @@ pipeline = transformers.pipeline(
65
  model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
66
  )
67
 
68
- messages = [{"role": "user", "content": "Explain what a Mixture of Experts is in less than 100 words."}]
69
  prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
70
  outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
71
  print(outputs[0]["generated_text"])
 
56
  import transformers
57
  import torch
58
 
59
+ model = "Technoculture/Medorca-11B"
60
 
61
  tokenizer = AutoTokenizer.from_pretrained(model)
62
  pipeline = transformers.pipeline(
 
65
  model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
66
  )
67
 
68
+ messages = [{"role": "user", "content": "Why am i feeling so tired this month?"}]
69
  prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
70
  outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
71
  print(outputs[0]["generated_text"])