Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,11 @@ from PIL import Image
|
|
9 |
@st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None, re.Pattern: lambda _: None}, allow_output_mutation=True, suppress_st_warning=True)
|
10 |
def get_model(model_name, model_path):
|
11 |
tokenizer = transformers.GPT2Tokenizer.from_pretrained(model_name)
|
|
|
|
|
|
|
12 |
model = transformers.GPT2LMHeadModel.from_pretrained(model_name)
|
|
|
13 |
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
|
14 |
model.eval()
|
15 |
return model, tokenizer
|
|
|
9 |
@st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None, re.Pattern: lambda _: None}, allow_output_mutation=True, suppress_st_warning=True)
|
10 |
def get_model(model_name, model_path):
|
11 |
tokenizer = transformers.GPT2Tokenizer.from_pretrained(model_name)
|
12 |
+
tokenizer.add_special_tokens({
|
13 |
+
'eos_token': '[EOS]'
|
14 |
+
})
|
15 |
model = transformers.GPT2LMHeadModel.from_pretrained(model_name)
|
16 |
+
model.resize_token_embeddings(len(tokenizer))
|
17 |
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
|
18 |
model.eval()
|
19 |
return model, tokenizer
|