Spaces:
Runtime error
Runtime error
Refactor to use upstream automodel.
Browse files
app.py
CHANGED
@@ -1,28 +1,9 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""Fujisaki_CPU.ipynb
|
3 |
-
|
4 |
-
Automatically generated by Colaboratory.
|
5 |
-
|
6 |
-
Original file is located at
|
7 |
-
https://colab.research.google.com/drive/1Damnr0Ha4zZAlKFvne9cu76uuElLNYus
|
8 |
-
|
9 |
-
李萌萌的电子骨灰盒
|
10 |
-
----
|
11 |
-
|
12 |
-
这是一个通过ChatGLM模型训练的李萌萌的数字分身,你可以在问题栏目填入内容,或者什么都不填,来观察李萌萌到底会说些什么。
|
13 |
-
T4级别的GPU已经可以很胜任这个任务了。
|
14 |
-
|
15 |
-
### 安装依赖
|
16 |
-
"""
|
17 |
-
|
18 |
-
#from modeling_chatglm import ChatGLMForConditionalGeneration
|
19 |
import torch
|
20 |
-
import sys
|
21 |
|
22 |
from transformers import AutoTokenizer, GenerationConfig, AutoModel
|
23 |
|
24 |
-
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
25 |
-
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
26 |
|
27 |
from peft import PeftModel
|
28 |
peft_path = 'ljsabc/Fujisaki_GLM' # change it to your own
|
@@ -36,6 +17,7 @@ model = PeftModel.from_pretrained(
|
|
36 |
print(model.peft_config)
|
37 |
# We have to use full precision, as some tokens are >65535
|
38 |
model.eval()
|
|
|
39 |
|
40 |
torch.set_default_tensor_type(torch.FloatTensor)
|
41 |
def evaluate(context, temperature, top_p, top_k):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
|
|
2 |
|
3 |
from transformers import AutoTokenizer, GenerationConfig, AutoModel
|
4 |
|
5 |
+
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, revision="fdb7a60").float()
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, revision="fdb7a60")
|
7 |
|
8 |
from peft import PeftModel
|
9 |
peft_path = 'ljsabc/Fujisaki_GLM' # change it to your own
|
|
|
17 |
print(model.peft_config)
|
18 |
# We have to use full precision, as some tokens are >65535
|
19 |
model.eval()
|
20 |
+
print(model)
|
21 |
|
22 |
torch.set_default_tensor_type(torch.FloatTensor)
|
23 |
def evaluate(context, temperature, top_p, top_k):
|