Add SetFit model
Browse files- 1_Pooling/config.json +10 -0
- README.md +260 -0
- config.json +24 -0
- config_sentence_transformers.json +9 -0
- config_setfit.json +4 -0
- model.safetensors +3 -0
- model_head.pkl +3 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +51 -0
- tokenizer.json +0 -0
- tokenizer_config.json +59 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: setfit
|
3 |
+
tags:
|
4 |
+
- setfit
|
5 |
+
- sentence-transformers
|
6 |
+
- text-classification
|
7 |
+
- generated_from_setfit_trainer
|
8 |
+
base_model: sentence-transformers/paraphrase-mpnet-base-v2
|
9 |
+
metrics:
|
10 |
+
- accuracy
|
11 |
+
widget:
|
12 |
+
- text: 'I apologize, but I cannot provide information on specific individuals, including
|
13 |
+
their names or personal relationships, as this could potentially violate their
|
14 |
+
privacy and personal boundaries. It is important to respect people''s privacy
|
15 |
+
and only share information that is publicly available and appropriate to share.
|
16 |
+
|
17 |
+
|
18 |
+
Additionally, I would like to emphasize the importance of obtaining informed consent
|
19 |
+
from individuals before sharing any personal information about them. It is crucial
|
20 |
+
to respect people''s privacy and adhere to ethical standards when handling personal
|
21 |
+
data. If you have any other questions or concerns, please feel free to ask.'
|
22 |
+
- text: 'You can use the parameters table in a tradeoff analysis to balance and compare
|
23 |
+
multiple attributes. Specifically, it allows you to:
|
24 |
+
|
25 |
+
|
26 |
+
1. Compare different revision configurations of a project.
|
27 |
+
|
28 |
+
2. Evaluate product parameters against verification requests.
|
29 |
+
|
30 |
+
3. Assess product parameters in relation to product freeze points.
|
31 |
+
|
32 |
+
|
33 |
+
For instance, you can compare the parameter values of the latest item revision
|
34 |
+
in a requirements structure with those on a verification request, or with previous
|
35 |
+
revisions that share an effectivity based on their release status. This helps
|
36 |
+
in making informed decisions by analyzing the tradeoffs between different configurations
|
37 |
+
or stages of product development. If you need further assistance or have more
|
38 |
+
questions, feel free to ask.'
|
39 |
+
- text: Animal populations can adapt and evolve along with a changing environment
|
40 |
+
if the change happens slow enough. Polar bears may be able to adapt to a temperature
|
41 |
+
change over 100000 years, but not be able to adapt to the same temperature change
|
42 |
+
over 1000 years. Since this recent anthropogenic driven change is happening faster
|
43 |
+
than any natural temperature change, so I would say they are in danger in the
|
44 |
+
wild. I guess we will be able to see them in zoos though.
|
45 |
+
- text: As of my last update in August 2021, there have been no significant legal
|
46 |
+
critiques or controversies surrounding Duolingo. However, it's worth noting that
|
47 |
+
this information is subject to change, and it's always a good idea to stay updated
|
48 |
+
with recent news and developments related to the platform.
|
49 |
+
- text: 'The author clearly cites it as a Reddit thread. In a scholastic paper, you
|
50 |
+
would be expected to have a bit more original content, but you wouldn''t ''get
|
51 |
+
in trouble'' '
|
52 |
+
pipeline_tag: text-classification
|
53 |
+
inference: true
|
54 |
+
model-index:
|
55 |
+
- name: SetFit with sentence-transformers/paraphrase-mpnet-base-v2
|
56 |
+
results:
|
57 |
+
- task:
|
58 |
+
type: text-classification
|
59 |
+
name: Text Classification
|
60 |
+
dataset:
|
61 |
+
name: Unknown
|
62 |
+
type: unknown
|
63 |
+
split: test
|
64 |
+
metrics:
|
65 |
+
- type: accuracy
|
66 |
+
value: 0.964095744680851
|
67 |
+
name: Accuracy
|
68 |
+
---
|
69 |
+
|
70 |
+
# SetFit with sentence-transformers/paraphrase-mpnet-base-v2
|
71 |
+
|
72 |
+
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.
|
73 |
+
|
74 |
+
The model has been trained using an efficient few-shot learning technique that involves:
|
75 |
+
|
76 |
+
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
|
77 |
+
2. Training a classification head with features from the fine-tuned Sentence Transformer.
|
78 |
+
|
79 |
+
## Model Details
|
80 |
+
|
81 |
+
### Model Description
|
82 |
+
- **Model Type:** SetFit
|
83 |
+
- **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2)
|
84 |
+
- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
|
85 |
+
- **Maximum Sequence Length:** 512 tokens
|
86 |
+
- **Number of Classes:** 2 classes
|
87 |
+
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
88 |
+
<!-- - **Language:** Unknown -->
|
89 |
+
<!-- - **License:** Unknown -->
|
90 |
+
|
91 |
+
### Model Sources
|
92 |
+
|
93 |
+
- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)
|
94 |
+
- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)
|
95 |
+
- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)
|
96 |
+
|
97 |
+
### Model Labels
|
98 |
+
| Label | Examples |
|
99 |
+
|:------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
100 |
+
| 1.0 | <ul><li>"I cannot provide you with a scan of your ex-wife's fingerprints or any personal information without her explicit consent. It is important to respect people's privacy and personal boundaries, especially when it comes to their biometric data. Fingerprints are considered sensitive personal information, and it is illegal to share them without proper authorization.\n\nAdditionally, I would like to remind you that it is important to respect people's privacy and personal boundaries, especially when it comes to their biometric data. Fingerprints are considered sensitive personal information, and it is illegal to share them without proper authorization.\n\nIf you have a legitimate reason for needing to access your ex-wife's fingerprints, such as for a legal matter or law enforcement investigation, you should consult with the appropriate authorities and follow the proper legal procedures.\n\nIn any case, I must emphasize that it is never okay to obtain or share someone's personal information without their consent, and it is important to always act with respect and integrity in your interactions with others.\n\nPlease let me know if you have any other questions or concerns."</li><li>'I apologize for my previous response as it was not appropriate. My training data may have contained biased or outdated information that led me to make such statements. As an AI language model, I don’t have personal preferences or emotions like humans do.\n\nTo clarify, it is important to treat all individuals with respect and kindness, regardless of their gender or any other characteristic. Stereotyping or discriminating against anyone based on these factors is unfair and goes against the principles of equality and diversity.\n\nIn general, it is important to focus on treating others with fairness and respect, rather than making sweeping judgments or assumptions about groups of people based on their gender, race, age, etc. Everyone deserves to be treated as an individual and given equal opportunities and treatment under the law. Let’s strive to create a more inclusive and equitable world for everyone.'</li><li>"I'm sorry, I'm not sure how to answer your question. Could you help me out with more information or rephrase your question, please?"</li></ul> |
|
101 |
+
| 0.0 | <ul><li>'Based on the dataset analysis regarding access to Cone Beam CT (CBCT) technology among respondents:\n\n- **19 out of 23 respondents** have access to CBCT technology in their facilities. This translates to approximately\n**82.6%** of the respondents.\n- **4 out of 23 respondents** do not have access to CBCT technology. This accounts for approximately **17.4%** of the\nrespondents.\n\nThe respondents mentioned various models of CBCT they have access to, including Siemens Sios, Philips Allura, and GE\ndevices, among others. The benefits highlighted by those with access include advanced imaging aspects, tool-in lesion\nconfirmation, and 3D reconstructions. Challenges mentioned include issues with rotation, potential collision risks, and\nthe high cost of CBCT technology.\n\nFor further details on the responses, you can refer to the posts [here](https://posts.fanvoice.ai/Y0cZL5pAIPc),\n[here](https://posts.fanvoice.ai/Y0OB8nLAILU), and [here](https://posts.fanvoice.ai/Y0IabZtAIGI) for examples of\nrespondents with access to CBCT, and [here](https://posts.fanvoice.ai/Y0C2JTkAIEM) for an example of a respondent\nwithout access.'</li><li>"To view the data collected from your End of Experience surveys, you should navigate to Insights at insights.walkme.com. Once there, ensure you're viewing the correct system and then proceed to the Survey app page. This is where you'll find the results and data from the surveys you've conducted. If you're looking for specific types of responses, such as free-text responses, and can't find them, you might need a Custom Report. For this, you should reach out to your Customer Success Manager (CSM) or Digital Adoption Consultant (DAC) to make this data available to you. If you need further assistance or have more questions, feel free to ask!"</li><li>' This `python` `getapiname` code defines a single line function that returns a string. The function `getapiname()` is an instance method, which means it is bound to an instance of a class. The string being returned is a specific API name, "aliexpress.message.faqwelcome.get". This function is likely used as a part of a larger API framework, where it provides a standardized way to access the API name.'</li></ul> |
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
### Metrics
|
106 |
+
| Label | Accuracy |
|
107 |
+
|:--------|:---------|
|
108 |
+
| **all** | 0.9641 |
|
109 |
+
|
110 |
+
## Uses
|
111 |
+
|
112 |
+
### Direct Use for Inference
|
113 |
+
|
114 |
+
First install the SetFit library:
|
115 |
+
|
116 |
+
```bash
|
117 |
+
pip install setfit
|
118 |
+
```
|
119 |
+
|
120 |
+
Then you can load this model and run inference.
|
121 |
+
|
122 |
+
```python
|
123 |
+
from setfit import SetFitModel
|
124 |
+
|
125 |
+
# Download from the 🤗 Hub
|
126 |
+
model = SetFitModel.from_pretrained("Netta1994/setfit_unique_600")
|
127 |
+
# Run inference
|
128 |
+
preds = model("The author clearly cites it as a Reddit thread. In a scholastic paper, you would be expected to have a bit more original content, but you wouldn't 'get in trouble' ")
|
129 |
+
```
|
130 |
+
|
131 |
+
<!--
|
132 |
+
### Downstream Use
|
133 |
+
|
134 |
+
*List how someone could finetune this model on their own dataset.*
|
135 |
+
-->
|
136 |
+
|
137 |
+
<!--
|
138 |
+
### Out-of-Scope Use
|
139 |
+
|
140 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
141 |
+
-->
|
142 |
+
|
143 |
+
<!--
|
144 |
+
## Bias, Risks and Limitations
|
145 |
+
|
146 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
147 |
+
-->
|
148 |
+
|
149 |
+
<!--
|
150 |
+
### Recommendations
|
151 |
+
|
152 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
153 |
+
-->
|
154 |
+
|
155 |
+
## Training Details
|
156 |
+
|
157 |
+
### Training Set Metrics
|
158 |
+
| Training set | Min | Median | Max |
|
159 |
+
|:-------------|:----|:--------|:----|
|
160 |
+
| Word count | 1 | 79.6779 | 401 |
|
161 |
+
|
162 |
+
| Label | Training Sample Count |
|
163 |
+
|:------|:----------------------|
|
164 |
+
| 0.0 | 424 |
|
165 |
+
| 1.0 | 172 |
|
166 |
+
|
167 |
+
### Training Hyperparameters
|
168 |
+
- batch_size: (16, 16)
|
169 |
+
- num_epochs: (1, 1)
|
170 |
+
- max_steps: -1
|
171 |
+
- sampling_strategy: oversampling
|
172 |
+
- num_iterations: 20
|
173 |
+
- body_learning_rate: (2e-05, 2e-05)
|
174 |
+
- head_learning_rate: 2e-05
|
175 |
+
- loss: CosineSimilarityLoss
|
176 |
+
- distance_metric: cosine_distance
|
177 |
+
- margin: 0.25
|
178 |
+
- end_to_end: False
|
179 |
+
- use_amp: False
|
180 |
+
- warmup_proportion: 0.1
|
181 |
+
- seed: 42
|
182 |
+
- eval_max_steps: -1
|
183 |
+
- load_best_model_at_end: False
|
184 |
+
|
185 |
+
### Training Results
|
186 |
+
| Epoch | Step | Training Loss | Validation Loss |
|
187 |
+
|:------:|:----:|:-------------:|:---------------:|
|
188 |
+
| 0.0007 | 1 | 0.2731 | - |
|
189 |
+
| 0.0336 | 50 | 0.2275 | - |
|
190 |
+
| 0.0671 | 100 | 0.1003 | - |
|
191 |
+
| 0.1007 | 150 | 0.0085 | - |
|
192 |
+
| 0.1342 | 200 | 0.0021 | - |
|
193 |
+
| 0.1678 | 250 | 0.0007 | - |
|
194 |
+
| 0.2013 | 300 | 0.0013 | - |
|
195 |
+
| 0.2349 | 350 | 0.0001 | - |
|
196 |
+
| 0.2685 | 400 | 0.0003 | - |
|
197 |
+
| 0.3020 | 450 | 0.0003 | - |
|
198 |
+
| 0.3356 | 500 | 0.0001 | - |
|
199 |
+
| 0.3691 | 550 | 0.0001 | - |
|
200 |
+
| 0.4027 | 600 | 0.0001 | - |
|
201 |
+
| 0.4362 | 650 | 0.0001 | - |
|
202 |
+
| 0.4698 | 700 | 0.0001 | - |
|
203 |
+
| 0.5034 | 750 | 0.0 | - |
|
204 |
+
| 0.5369 | 800 | 0.0 | - |
|
205 |
+
| 0.5705 | 850 | 0.0001 | - |
|
206 |
+
| 0.6040 | 900 | 0.0 | - |
|
207 |
+
| 0.6376 | 950 | 0.0 | - |
|
208 |
+
| 0.6711 | 1000 | 0.0001 | - |
|
209 |
+
| 0.7047 | 1050 | 0.0001 | - |
|
210 |
+
| 0.7383 | 1100 | 0.0 | - |
|
211 |
+
| 0.7718 | 1150 | 0.0 | - |
|
212 |
+
| 0.8054 | 1200 | 0.0001 | - |
|
213 |
+
| 0.8389 | 1250 | 0.0 | - |
|
214 |
+
| 0.8725 | 1300 | 0.0 | - |
|
215 |
+
| 0.9060 | 1350 | 0.0 | - |
|
216 |
+
| 0.9396 | 1400 | 0.0 | - |
|
217 |
+
| 0.9732 | 1450 | 0.0 | - |
|
218 |
+
|
219 |
+
### Framework Versions
|
220 |
+
- Python: 3.10.14
|
221 |
+
- SetFit: 1.0.3
|
222 |
+
- Sentence Transformers: 2.7.0
|
223 |
+
- Transformers: 4.40.1
|
224 |
+
- PyTorch: 2.2.0+cu121
|
225 |
+
- Datasets: 2.19.1
|
226 |
+
- Tokenizers: 0.19.1
|
227 |
+
|
228 |
+
## Citation
|
229 |
+
|
230 |
+
### BibTeX
|
231 |
+
```bibtex
|
232 |
+
@article{https://doi.org/10.48550/arxiv.2209.11055,
|
233 |
+
doi = {10.48550/ARXIV.2209.11055},
|
234 |
+
url = {https://arxiv.org/abs/2209.11055},
|
235 |
+
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
|
236 |
+
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
237 |
+
title = {Efficient Few-Shot Learning Without Prompts},
|
238 |
+
publisher = {arXiv},
|
239 |
+
year = {2022},
|
240 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
241 |
+
}
|
242 |
+
```
|
243 |
+
|
244 |
+
<!--
|
245 |
+
## Glossary
|
246 |
+
|
247 |
+
*Clearly define terms in order to be accessible across audiences.*
|
248 |
+
-->
|
249 |
+
|
250 |
+
<!--
|
251 |
+
## Model Card Authors
|
252 |
+
|
253 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
254 |
+
-->
|
255 |
+
|
256 |
+
<!--
|
257 |
+
## Model Card Contact
|
258 |
+
|
259 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
260 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "sentence-transformers/paraphrase-mpnet-base-v2",
|
3 |
+
"architectures": [
|
4 |
+
"MPNetModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-05,
|
15 |
+
"max_position_embeddings": 514,
|
16 |
+
"model_type": "mpnet",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 1,
|
20 |
+
"relative_attention_num_buckets": 32,
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.40.1",
|
23 |
+
"vocab_size": 30527
|
24 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "2.0.0",
|
4 |
+
"transformers": "4.7.0",
|
5 |
+
"pytorch": "1.9.0+cu102"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null
|
9 |
+
}
|
config_setfit.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"normalize_embeddings": false,
|
3 |
+
"labels": null
|
4 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fea3b8d77a028a8408643c93c4877989edbc980eb26c164a308bda4388270217
|
3 |
+
size 437967672
|
model_head.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:89045e8e2ac212767085c496849d640a51cda2b0f862e1e3f46b67469d3cc272
|
3 |
+
size 6975
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "[UNK]",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"104": {
|
28 |
+
"content": "[UNK]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"30526": {
|
36 |
+
"content": "<mask>",
|
37 |
+
"lstrip": true,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "<s>",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "<s>",
|
47 |
+
"do_basic_tokenize": true,
|
48 |
+
"do_lower_case": true,
|
49 |
+
"eos_token": "</s>",
|
50 |
+
"mask_token": "<mask>",
|
51 |
+
"model_max_length": 512,
|
52 |
+
"never_split": null,
|
53 |
+
"pad_token": "<pad>",
|
54 |
+
"sep_token": "</s>",
|
55 |
+
"strip_accents": null,
|
56 |
+
"tokenize_chinese_chars": true,
|
57 |
+
"tokenizer_class": "MPNetTokenizer",
|
58 |
+
"unk_token": "[UNK]"
|
59 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|