Adam Bahr
commited on
Commit
•
8b96078
1
Parent(s):
c08f5d9
First version of SIY using our updated dataset.
Browse files- README.md +36 -0
- config.json +36 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
thumbnail: https://huggingface.co/front/thumbnails/dialogpt.png
|
3 |
+
tags:
|
4 |
+
- conversational
|
5 |
+
license: mit
|
6 |
+
---
|
7 |
+
# DialoGPT Trained on a customized various spiritual texts and mixed with various different character personalities.
|
8 |
+
This is an instance of [microsoft/DialoGPT-medium](https://huggingface.co/microsoft/DialoGPT-medium) trained on the energy complex known as Ra. Some text has been changed from the original with the intention of making it fit our discord server better. I've also trained it on various channeling experiences. I'm testing mixing this dataset with character from popular shows with the intention of creating a more diverse dialogue.
|
9 |
+
I built a Discord AI chatbot based on this model for internal use within Siyris, Inc.
|
10 |
+
Chat with the model:
|
11 |
+
```python
|
12 |
+
from transformers import AutoTokenizer, AutoModelWithLMHead
|
13 |
+
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained("Siyris/DialoGPT-medium-SIY")
|
15 |
+
model = AutoModelWithLMHead.from_pretrained("Siyris/DialoGPT-medium-SIY")
|
16 |
+
# Let's chat for 4 lines
|
17 |
+
for step in range(4):
|
18 |
+
# encode the new user input, add the eos_token and return a tensor in Pytorch
|
19 |
+
new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt')
|
20 |
+
# print(new_user_input_ids)
|
21 |
+
# append the new user input tokens to the chat history
|
22 |
+
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
|
23 |
+
# generated a response while limiting the total chat history to 1000 tokens,
|
24 |
+
chat_history_ids = model.generate(
|
25 |
+
bot_input_ids, max_length=200,
|
26 |
+
pad_token_id=tokenizer.eos_token_id,
|
27 |
+
no_repeat_ngram_size=3,
|
28 |
+
do_sample=True,
|
29 |
+
top_k=100,
|
30 |
+
top_p=0.7,
|
31 |
+
temperature=0.8
|
32 |
+
)
|
33 |
+
|
34 |
+
# pretty print last ouput tokens from bot
|
35 |
+
print("SIY: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
|
36 |
+
```
|
config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "microsoft/DialoGPT-medium",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"gradient_checkpointing": false,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"layer_norm_epsilon": 1e-05,
|
14 |
+
"model_type": "gpt2",
|
15 |
+
"n_ctx": 1024,
|
16 |
+
"n_embd": 1024,
|
17 |
+
"n_head": 16,
|
18 |
+
"n_inner": null,
|
19 |
+
"n_layer": 24,
|
20 |
+
"n_positions": 1024,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_weights": true,
|
23 |
+
"summary_activation": null,
|
24 |
+
"summary_first_dropout": 0.1,
|
25 |
+
"summary_proj_to_labels": true,
|
26 |
+
"summary_type": "cls_index",
|
27 |
+
"summary_use_proj": true,
|
28 |
+
"task_specific_params": {
|
29 |
+
"conversational": {
|
30 |
+
"max_length": 1000
|
31 |
+
}
|
32 |
+
},
|
33 |
+
"transformers_version": "4.8.2",
|
34 |
+
"use_cache": true,
|
35 |
+
"vocab_size": 50257
|
36 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:25b6bfeac4365240bfa7196f13f34a0e8d5d73f91cdc1c9eced3075b15f0c5ee
|
3 |
+
size 1444589475
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "microsoft/DialoGPT-medium", "errors": "replace", "tokenizer_class": "GPT2Tokenizer"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|