Spaces:
Runtime error
Runtime error
Mikhil-jivus
commited on
Commit
•
72c7c74
1
Parent(s):
55bc99c
Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ For more details, please check [our post](https://huggingface.co/blog/llama32).
|
|
22 |
access_token = os.getenv('HF_TOKEN')
|
23 |
# Download the Base model
|
24 |
#model_id = "./models/Llama-32-3B-Instruct"
|
25 |
-
model_id = "
|
26 |
MAX_MAX_NEW_TOKENS = 2048
|
27 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
28 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
@@ -31,8 +31,9 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
31 |
|
32 |
#model_id = "nltpt/Llama-3.2-3B-Instruct"
|
33 |
tokenizer = AutoTokenizer.from_pretrained(model_id,token=access_token)
|
34 |
-
tokenizer.padding_side = 'right'
|
35 |
-
tokenizer.
|
|
|
36 |
model = AutoModelForCausalLM.from_pretrained(
|
37 |
model_id,
|
38 |
device_map=device,
|
|
|
22 |
access_token = os.getenv('HF_TOKEN')
|
23 |
# Download the Base model
|
24 |
#model_id = "./models/Llama-32-3B-Instruct"
|
25 |
+
model_id = "princeton-nlp/gemma-2-9b-it-SimPO"
|
26 |
MAX_MAX_NEW_TOKENS = 2048
|
27 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
28 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
|
31 |
|
32 |
#model_id = "nltpt/Llama-3.2-3B-Instruct"
|
33 |
tokenizer = AutoTokenizer.from_pretrained(model_id,token=access_token)
|
34 |
+
#tokenizer.padding_side = 'right'
|
35 |
+
tokenizer.eos_token_id = 107
|
36 |
+
tokenizer.eos_token = "<end_of_turn>"
|
37 |
model = AutoModelForCausalLM.from_pretrained(
|
38 |
model_id,
|
39 |
device_map=device,
|