Update boomer_code_tokenizer.py
#1
by
mike-ravkine
- opened
- boomer_code_tokenizer.py +1 -1
boomer_code_tokenizer.py
CHANGED
@@ -63,10 +63,10 @@ class BoomerCodeTokenizer(PreTrainedTokenizer):
|
|
63 |
|
64 |
def __init__(self, vocab_file, bos_token=None, eos_token='<|endoftext|>', unk_token='<|unk|>', pad_token='<|pad|>', sep_token=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:
|
65 |
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
66 |
-
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
|
67 |
self.vocab_file = vocab_file
|
68 |
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
69 |
self.sp_model.Load(vocab_file)
|
|
|
70 |
|
71 |
print(self.sp_model)
|
72 |
|
|
|
63 |
|
64 |
def __init__(self, vocab_file, bos_token=None, eos_token='<|endoftext|>', unk_token='<|unk|>', pad_token='<|pad|>', sep_token=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:
|
65 |
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
|
|
66 |
self.vocab_file = vocab_file
|
67 |
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
68 |
self.sp_model.Load(vocab_file)
|
69 |
+
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
|
70 |
|
71 |
print(self.sp_model)
|
72 |
|