vincentromanet commited on
Commit
c8f86b9
1 Parent(s): b29c25e

Add new SentenceTransformer model.

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 1024,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false,
9
+ "include_prompt": true
10
+ }
README.md ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "jinaai/jina-embeddings-v3",
3
+ "architectures": [
4
+ "XLMRobertaLoRA"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "auto_map": {
8
+ "AutoConfig": "jinaai/xlm-roberta-flash-implementation--configuration_xlm_roberta.XLMRobertaFlashConfig",
9
+ "AutoModel": "jinaai/xlm-roberta-flash-implementation--modeling_lora.XLMRobertaLoRA",
10
+ "AutoModelForMaskedLM": "jinaai/xlm-roberta-flash-implementation--modeling_xlm_roberta.XLMRobertaForMaskedLM",
11
+ "AutoModelForPreTraining": "jinaai/xlm-roberta-flash-implementation--modeling_xlm_roberta.XLMRobertaForPreTraining"
12
+ },
13
+ "bos_token_id": 0,
14
+ "classifier_dropout": null,
15
+ "emb_pooler": null,
16
+ "eos_token_id": 2,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.1,
19
+ "hidden_size": 1024,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 4096,
22
+ "layer_norm_eps": 1e-05,
23
+ "load_trained_adapters": true,
24
+ "lora_adaptations": [
25
+ "retrieval.query",
26
+ "retrieval.passage",
27
+ "separation",
28
+ "classification",
29
+ "text-matching"
30
+ ],
31
+ "lora_alpha": 1,
32
+ "lora_dropout_p": 0.0,
33
+ "lora_main_params_trainable": false,
34
+ "lora_rank": 4,
35
+ "matryoshka_dimensions": [
36
+ 32,
37
+ 64,
38
+ 128,
39
+ 256,
40
+ 512,
41
+ 768,
42
+ 1024
43
+ ],
44
+ "max_position_embeddings": 8194,
45
+ "model_type": "xlm-roberta",
46
+ "num_attention_heads": 16,
47
+ "num_hidden_layers": 24,
48
+ "output_past": true,
49
+ "pad_token_id": 1,
50
+ "position_embedding_type": "rotary",
51
+ "rotary_emb_base": 20000.0,
52
+ "task_instructions": {
53
+ "classification": "",
54
+ "retrieval.passage": "Represent the document for retrieval: ",
55
+ "retrieval.query": "Represent the query for retrieving evidence documents: ",
56
+ "separation": "",
57
+ "text-matching": ""
58
+ },
59
+ "torch_dtype": "float32",
60
+ "transformers_version": "4.44.2",
61
+ "truncate_dim": null,
62
+ "type_vocab_size": 1,
63
+ "use_cache": true,
64
+ "use_flash_attn": true,
65
+ "use_reentrant": false,
66
+ "vocab_size": 250002
67
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "3.1.1",
4
+ "transformers": "4.44.2",
5
+ "pytorch": "2.3.1"
6
+ },
7
+ "prompts": {
8
+ "retrieval.query": "Represent the query for retrieving evidence documents: ",
9
+ "retrieval.passage": "Represent the document for retrieval: ",
10
+ "separation": "",
11
+ "classification": "",
12
+ "text-matching": ""
13
+ },
14
+ "default_prompt_name": null,
15
+ "similarity_fn_name": "cosine"
16
+ }
custom_st.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from io import BytesIO
4
+ from typing import Any, Dict, List, Optional, Tuple, Union
5
+
6
+ import torch
7
+ from torch import nn
8
+ from transformers import AutoConfig, AutoModel, AutoTokenizer
9
+
10
+
11
+ class Transformer(nn.Module):
12
+ """Huggingface AutoModel to generate token embeddings.
13
+ Loads the correct class, e.g. BERT / RoBERTa etc.
14
+
15
+ Args:
16
+ model_name_or_path: Huggingface models name
17
+ (https://huggingface.co/models)
18
+ max_seq_length: Truncate any inputs longer than max_seq_length
19
+ model_args: Keyword arguments passed to the Huggingface
20
+ Transformers model
21
+ tokenizer_args: Keyword arguments passed to the Huggingface
22
+ Transformers tokenizer
23
+ config_args: Keyword arguments passed to the Huggingface
24
+ Transformers config
25
+ cache_dir: Cache dir for Huggingface Transformers to store/load
26
+ models
27
+ do_lower_case: If true, lowercases the input (independent if the
28
+ model is cased or not)
29
+ tokenizer_name_or_path: Name or path of the tokenizer. When
30
+ None, then model_name_or_path is used
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ model_name_or_path: str,
36
+ max_seq_length: int = None,
37
+ model_args: Dict[str, Any] = None,
38
+ tokenizer_args: Dict[str, Any] = None,
39
+ config_args: Dict[str, Any] = None,
40
+ cache_dir: str = None,
41
+ do_lower_case: bool = False,
42
+ tokenizer_name_or_path: str = None,
43
+ ) -> None:
44
+ super().__init__()
45
+ self.config_keys = ["max_seq_length", "do_lower_case"]
46
+ self.do_lower_case = do_lower_case
47
+ if model_args is None:
48
+ model_args = {}
49
+ if tokenizer_args is None:
50
+ tokenizer_args = {}
51
+ if config_args is None:
52
+ config_args = {}
53
+
54
+ self.config = AutoConfig.from_pretrained(model_name_or_path, **config_args, cache_dir=cache_dir)
55
+ self.auto_model = AutoModel.from_pretrained(model_name_or_path, config=self.config, cache_dir=cache_dir, **model_args)
56
+
57
+ self._lora_adaptations = self.config.lora_adaptations
58
+ if (
59
+ not isinstance(self._lora_adaptations, list)
60
+ or len(self._lora_adaptations) < 1
61
+ ):
62
+ raise ValueError(
63
+ f"`lora_adaptations` must be a list and contain at least one element"
64
+ )
65
+ self._adaptation_map = {
66
+ name: idx for idx, name in enumerate(self._lora_adaptations)
67
+ }
68
+
69
+ if max_seq_length is not None and "model_max_length" not in tokenizer_args:
70
+ tokenizer_args["model_max_length"] = max_seq_length
71
+ self.tokenizer = AutoTokenizer.from_pretrained(
72
+ tokenizer_name_or_path if tokenizer_name_or_path is not None else model_name_or_path,
73
+ cache_dir=cache_dir,
74
+ **tokenizer_args,
75
+ )
76
+
77
+ # No max_seq_length set. Try to infer from model
78
+ if max_seq_length is None:
79
+ if (
80
+ hasattr(self.auto_model, "config")
81
+ and hasattr(self.auto_model.config, "max_position_embeddings")
82
+ and hasattr(self.tokenizer, "model_max_length")
83
+ ):
84
+ max_seq_length = min(self.auto_model.config.max_position_embeddings, self.tokenizer.model_max_length)
85
+
86
+ self.max_seq_length = max_seq_length
87
+
88
+ if tokenizer_name_or_path is not None:
89
+ self.auto_model.config.tokenizer_class = self.tokenizer.__class__.__name__
90
+
91
+ def forward(
92
+ self, features: Dict[str, torch.Tensor], task: Optional[str] = None
93
+ ) -> Dict[str, torch.Tensor]:
94
+ """Returns token_embeddings, cls_token"""
95
+ if task and task not in self._lora_adaptations:
96
+ raise ValueError(
97
+ f"Unsupported task '{task}'. "
98
+ f"Supported tasks are: {', '.join(self.config.lora_adaptations)}."
99
+ f"Alternatively, don't pass the `task` argument to disable LoRA."
100
+ )
101
+
102
+ adapter_mask = None
103
+ if task:
104
+ task_id = self._adaptation_map[task]
105
+ num_examples = features['input_ids'].size(0)
106
+ adapter_mask = torch.full(
107
+ (num_examples,), task_id, dtype=torch.int32, device=features['input_ids'].device
108
+ )
109
+
110
+ lora_arguments = (
111
+ {"adapter_mask": adapter_mask} if adapter_mask is not None else {}
112
+ )
113
+ output_states = self.auto_model.forward(**features, **lora_arguments, return_dict=False)
114
+ output_tokens = output_states[0]
115
+ features.update({"token_embeddings": output_tokens, "attention_mask": features["attention_mask"]})
116
+ return features
117
+
118
+ def get_word_embedding_dimension(self) -> int:
119
+ return self.auto_model.config.hidden_size
120
+
121
+ def tokenize(
122
+ self,
123
+ texts: Union[List[str], List[dict], List[Tuple[str, str]]],
124
+ padding: Union[str, bool] = True
125
+ ) -> Dict[str, torch.Tensor]:
126
+ """Tokenizes a text and maps tokens to token-ids"""
127
+ output = {}
128
+ if isinstance(texts[0], str):
129
+ to_tokenize = [texts]
130
+ elif isinstance(texts[0], dict):
131
+ to_tokenize = []
132
+ output["text_keys"] = []
133
+ for lookup in texts:
134
+ text_key, text = next(iter(lookup.items()))
135
+ to_tokenize.append(text)
136
+ output["text_keys"].append(text_key)
137
+ to_tokenize = [to_tokenize]
138
+ else:
139
+ batch1, batch2 = [], []
140
+ for text_tuple in texts:
141
+ batch1.append(text_tuple[0])
142
+ batch2.append(text_tuple[1])
143
+ to_tokenize = [batch1, batch2]
144
+
145
+ # strip
146
+ to_tokenize = [[str(s).strip() for s in col] for col in to_tokenize]
147
+
148
+ # Lowercase
149
+ if self.do_lower_case:
150
+ to_tokenize = [[s.lower() for s in col] for col in to_tokenize]
151
+
152
+ output.update(
153
+ self.tokenizer(
154
+ *to_tokenize,
155
+ padding=padding,
156
+ truncation="longest_first",
157
+ return_tensors="pt",
158
+ max_length=self.max_seq_length,
159
+ )
160
+ )
161
+ return output
162
+
163
+ def get_config_dict(self) -> dict[str, Any]:
164
+ return {key: self.__dict__[key] for key in self.config_keys}
165
+
166
+ def save(self, output_path: str, safe_serialization: bool = True) -> None:
167
+ self.auto_model.save_pretrained(output_path, safe_serialization=safe_serialization)
168
+ self.tokenizer.save_pretrained(output_path)
169
+
170
+ with open(os.path.join(output_path, "sentence_bert_config.json"), "w") as fOut:
171
+ json.dump(self.get_config_dict(), fOut, indent=2)
172
+
173
+
174
+ @classmethod
175
+ def load(cls, input_path: str) -> "Transformer":
176
+ # Old classes used other config names than 'sentence_bert_config.json'
177
+ for config_name in [
178
+ "sentence_bert_config.json",
179
+ "sentence_roberta_config.json",
180
+ "sentence_distilbert_config.json",
181
+ "sentence_camembert_config.json",
182
+ "sentence_albert_config.json",
183
+ "sentence_xlm-roberta_config.json",
184
+ "sentence_xlnet_config.json",
185
+ ]:
186
+ sbert_config_path = os.path.join(input_path, config_name)
187
+ if os.path.exists(sbert_config_path):
188
+ break
189
+
190
+ with open(sbert_config_path) as fIn:
191
+ config = json.load(fIn)
192
+ # Don't allow configs to set trust_remote_code
193
+ if "model_args" in config and "trust_remote_code" in config["model_args"]:
194
+ config["model_args"].pop("trust_remote_code")
195
+ if "tokenizer_args" in config and "trust_remote_code" in config["tokenizer_args"]:
196
+ config["tokenizer_args"].pop("trust_remote_code")
197
+ if "config_args" in config and "trust_remote_code" in config["config_args"]:
198
+ config["config_args"].pop("trust_remote_code")
199
+ return cls(model_name_or_path=input_path, **config)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6658a34d2f9999b96850614b259122930fde49b864beafbc6966cad0e9b21f5b
3
+ size 2289306368
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "custom_st.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 8194,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a56def25aa40facc030ea8b0b87f3688e4b3c39eb8b45d5702b3a1300fe2a20
3
+ size 17082734
tokenizer_config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 8194,
50
+ "pad_token": "<pad>",
51
+ "sep_token": "</s>",
52
+ "tokenizer_class": "XLMRobertaTokenizer",
53
+ "unk_token": "<unk>"
54
+ }