Hanlard commited on
Commit
afbba8e
1 Parent(s): 59e4fdf

Upload 6 files

Browse files
TokenConvert.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from tokenization_gptpangu import GPTPanguTokenizer
2
+ # import json
3
+ #
4
+ # tokenizer = GPTPanguTokenizer.from_pretrained(".")
5
+ # with open("tokenizer.json",encoding="utf-8") as f:
6
+ # cofig = json.load(f)
7
+ #
8
+ #
9
+ # vocab_file = "vocab.vocab"
10
+ #
11
+ # f = open(vocab_file, 'r', encoding="utf-8")
12
+ # lines = f.readlines()
13
+ # vocab = []
14
+ # for line in enumerate(lines):
15
+ # key = line[1].split('\t')[0]
16
+ # pair = [key,line[0]]
17
+ # vocab.append(pair)
18
+ #
19
+ # cofig['model']['vocab'] = vocab
20
+ #
21
+ # with open("new_tokenizer.json","w",encoding="utf-8") as w:
22
+ # d = json.dumps(cofig)
23
+ # w.write(d)
24
+ #
25
+ # print("ok")
26
+
27
+ from transformers import AutoTokenizer
28
+ tokenizer = AutoTokenizer.from_pretrained(".")
29
+
30
+
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "<sep>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}, "additional_special_tokens": [ "<eod>"]}
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18857e86783e50cfcaa0bc3c043fb4e9b5f240b885d2870ea593ee69b44f7a3a
3
+ size 879697
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "remove_space": true, "keep_accents": false, "bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "<sep>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "additional_special_tokens": [ "<eod>"], "special_tokens_map_file": null, "name_or_path": "TsinghuaAI/CPM-Generate", "tokenizer_class": "CpmTokenizer"}
vocab.vocab ADDED
The diff for this file is too large to render. See raw diff