migtissera
commited on
Commit
•
18cf5eb
1
Parent(s):
63b8f0e
Update README.md
Browse files
README.md
CHANGED
@@ -35,9 +35,7 @@ You didn't have any oranges on Saturday because you bought 3 oranges yesterday.
|
|
35 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
36 |
import torch
|
37 |
|
38 |
-
model_path = "/
|
39 |
-
# model_path = "/home/migel/gemma-2-27b"
|
40 |
-
# model_path = "google/gemma-2-27b-it"
|
41 |
|
42 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
43 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -80,19 +78,12 @@ def generate_text(llm_prompt):
|
|
80 |
string = tokenizer.decode(output, skip_special_tokens=True)
|
81 |
return f"{string}"
|
82 |
|
83 |
-
|
84 |
-
# conversation = f"""<|im_start|>system\nYou are Tesoro, a helful AI assitant. You always provide detailed answers without hesitation. No follow-up questions necessary.<|im_end|>\n<|im_start|>user\n"""
|
85 |
-
|
86 |
conversation = f"""<bos><start_of_turn>user\n"""
|
87 |
-
# conversation = f"""Write a long poem about statins."""
|
88 |
|
89 |
while True:
|
90 |
user_input = input("You: ")
|
91 |
llm_prompt = f"{conversation}{user_input}<end_of_turn>\n<start_of_turn>model\n"
|
92 |
-
# llm_prompt = f"{conversation}{user_input}"
|
93 |
answer = generate_text(llm_prompt)
|
94 |
print(answer)
|
95 |
conversation = f"{llm_prompt}{answer}\n<start_of_turn>user\n"
|
96 |
-
json_data = {"prompt": user_input, "answer": answer}
|
97 |
-
|
98 |
```
|
|
|
35 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
36 |
import torch
|
37 |
|
38 |
+
model_path = "migtissera/Tess-v2.5-Gemma-2-27B-alpha"
|
|
|
|
|
39 |
|
40 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
41 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
78 |
string = tokenizer.decode(output, skip_special_tokens=True)
|
79 |
return f"{string}"
|
80 |
|
|
|
|
|
|
|
81 |
conversation = f"""<bos><start_of_turn>user\n"""
|
|
|
82 |
|
83 |
while True:
|
84 |
user_input = input("You: ")
|
85 |
llm_prompt = f"{conversation}{user_input}<end_of_turn>\n<start_of_turn>model\n"
|
|
|
86 |
answer = generate_text(llm_prompt)
|
87 |
print(answer)
|
88 |
conversation = f"{llm_prompt}{answer}\n<start_of_turn>user\n"
|
|
|
|
|
89 |
```
|