OwenElliott
commited on
Commit
•
6353c49
1
Parent(s):
6a17657
Upload 8 files
Browse files- config.json +12 -0
- configuration_marqo_arctic_bge_chimera_m.py +10 -0
- model.safetensors +3 -0
- modeling_marqo_arctic_bge_chimera_m.py +68 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +62 -0
- vocab.txt +0 -0
config.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Chimera"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "configuration_marqo_arctic_bge_chimera_m.ChimeraConfig",
|
7 |
+
"AutoModel": "modeling_marqo_arctic_bge_chimera_m.Chimera"
|
8 |
+
},
|
9 |
+
"model_type": "marqo-chimera-arctic-bge-m",
|
10 |
+
"torch_dtype": "float32",
|
11 |
+
"transformers_version": "4.44.2"
|
12 |
+
}
|
configuration_marqo_arctic_bge_chimera_m.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PretrainedConfig
|
2 |
+
import torch.nn as nn
|
3 |
+
from typing import List, Union
|
4 |
+
|
5 |
+
|
6 |
+
class ChimeraConfig(PretrainedConfig):
|
7 |
+
model_type = "marqo-chimera-arctic-bge-m"
|
8 |
+
|
9 |
+
def __init__(self, **kwargs):
|
10 |
+
super().__init__(**kwargs)
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9185237e2a937d3a7421464a6876b4b6e44aaf849d3eb7439f1a051b436751a2
|
3 |
+
size 871183080
|
modeling_marqo_arctic_bge_chimera_m.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from transformers import BertModel, PreTrainedModel, BertConfig, AutoModel
|
4 |
+
from typing import List
|
5 |
+
from .configuration_marqo_arctic_bge_chimera_m import ChimeraConfig
|
6 |
+
|
7 |
+
|
8 |
+
class Chimera(PreTrainedModel):
|
9 |
+
config_class = ChimeraConfig
|
10 |
+
|
11 |
+
def __init__(self, config: ChimeraConfig):
|
12 |
+
super().__init__(config)
|
13 |
+
bert_config = BertConfig(
|
14 |
+
vocab_size=30522,
|
15 |
+
hidden_size=768,
|
16 |
+
num_hidden_layers=12,
|
17 |
+
num_attention_heads=12,
|
18 |
+
intermediate_size=3072,
|
19 |
+
hidden_act="gelu",
|
20 |
+
hidden_dropout_prob=0.1,
|
21 |
+
attention_probs_dropout_prob=0.1,
|
22 |
+
max_position_embeddings=512,
|
23 |
+
type_vocab_size=2,
|
24 |
+
initializer_range=0.02,
|
25 |
+
layer_norm_eps=1e-12,
|
26 |
+
)
|
27 |
+
|
28 |
+
self.model = nn.ModuleDict(
|
29 |
+
{
|
30 |
+
"model_0": BertModel(bert_config),
|
31 |
+
"model_1": BertModel(bert_config),
|
32 |
+
}
|
33 |
+
)
|
34 |
+
|
35 |
+
def forward(
|
36 |
+
self,
|
37 |
+
input_ids: torch.Tensor,
|
38 |
+
attention_mask: torch.Tensor,
|
39 |
+
token_type_ids: torch.Tensor = None,
|
40 |
+
) -> torch.Tensor:
|
41 |
+
embeddings = []
|
42 |
+
for _, model in self.model.items():
|
43 |
+
model_output = model(
|
44 |
+
input_ids=input_ids,
|
45 |
+
attention_mask=attention_mask,
|
46 |
+
token_type_ids=token_type_ids,
|
47 |
+
)
|
48 |
+
pooled_output = model_output[0][:, 0]
|
49 |
+
embeddings.append(pooled_output)
|
50 |
+
|
51 |
+
return torch.cat(embeddings, dim=-1)
|
52 |
+
|
53 |
+
def load_weights_from_automodels(
|
54 |
+
self, in_models: List[str], has_pooling_layer: List[bool]
|
55 |
+
):
|
56 |
+
model_list = []
|
57 |
+
for i, model_name in enumerate(in_models):
|
58 |
+
model = AutoModel.from_pretrained(
|
59 |
+
model_name,
|
60 |
+
add_pooling_layer=has_pooling_layer[i],
|
61 |
+
trust_remote_code=True,
|
62 |
+
)
|
63 |
+
model.eval()
|
64 |
+
model_list.append(model)
|
65 |
+
|
66 |
+
self.model = nn.ModuleDict(
|
67 |
+
{f"model_{i}": model for i, model in enumerate(model_list)}
|
68 |
+
)
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"max_length": 512,
|
49 |
+
"model_max_length": 512,
|
50 |
+
"pad_to_multiple_of": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"pad_token_type_id": 0,
|
53 |
+
"padding_side": "right",
|
54 |
+
"sep_token": "[SEP]",
|
55 |
+
"stride": 0,
|
56 |
+
"strip_accents": null,
|
57 |
+
"tokenize_chinese_chars": true,
|
58 |
+
"tokenizer_class": "BertTokenizer",
|
59 |
+
"truncation_side": "right",
|
60 |
+
"truncation_strategy": "longest_first",
|
61 |
+
"unk_token": "[UNK]"
|
62 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|