Spaces:
Running
on
Zero
Running
on
Zero
Martín Santillán Cooper
commited on
Commit
•
93e42f8
1
Parent(s):
6617373
fix: send to device
Browse files
model.py
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
-
import logging.handlers
|
2 |
import torch
|
3 |
from torch.nn.functional import softmax
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
|
5 |
-
import jinja2
|
6 |
import os
|
7 |
from time import time
|
8 |
from logger import logger
|
@@ -27,7 +25,7 @@ def generate_text(prompt):
|
|
27 |
add_generation_prompt=True,
|
28 |
return_tensors="pt")#.to(device)
|
29 |
if use_conda:
|
30 |
-
tokenized_chat.to(device)
|
31 |
with torch.no_grad():
|
32 |
logits = model(tokenized_chat).logits
|
33 |
gen_outputs = model.generate(tokenized_chat, max_new_tokens=128)
|
|
|
|
|
1 |
import torch
|
2 |
from torch.nn.functional import softmax
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
|
|
|
4 |
import os
|
5 |
from time import time
|
6 |
from logger import logger
|
|
|
25 |
add_generation_prompt=True,
|
26 |
return_tensors="pt")#.to(device)
|
27 |
if use_conda:
|
28 |
+
tokenized_chat = tokenized_chat.to(device)
|
29 |
with torch.no_grad():
|
30 |
logits = model(tokenized_chat).logits
|
31 |
gen_outputs = model.generate(tokenized_chat, max_new_tokens=128)
|