Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
+
|
4 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
5 |
+
'kakaobrain/kogpt', revision='KoGPT6B-ryan1.5b-float16', # or float32 version: revision=KoGPT6B-ryan1.5b
|
6 |
+
bos_token='[BOS]', eos_token='[EOS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]'
|
7 |
+
)
|
8 |
+
model = AutoModelForCausalLM.from_pretrained(
|
9 |
+
'kakaobrain/kogpt', revision='KoGPT6B-ryan1.5b-float16', # or float32 version: revision=KoGPT6B-ryan1.5b
|
10 |
+
pad_token_id=tokenizer.eos_token_id,
|
11 |
+
torch_dtype='auto', low_cpu_mem_usage=True
|
12 |
+
).to(device='cuda', non_blocking=True)
|
13 |
+
_ = model.eval()
|
14 |
+
|
15 |
+
prompt = 'μΈκ³΅μ§λ₯μ, λλ λ§μ ν μ μλ?'
|
16 |
+
with torch.no_grad():
|
17 |
+
tokens = tokenizer.encode(prompt, return_tensors='pt').to(device='cuda', non_blocking=True)
|
18 |
+
gen_tokens = model.generate(tokens, do_sample=True, temperature=0.8, max_length=64)
|
19 |
+
generated = tokenizer.batch_decode(gen_tokens)[0]
|
20 |
+
|
21 |
+
print(generated)
|