Add new SentenceTransformer model.
Browse files- README.md +95 -96
- model.safetensors +1 -1
README.md
CHANGED
@@ -46,7 +46,7 @@ tags:
|
|
46 |
- feature-extraction
|
47 |
- generated_from_trainer
|
48 |
- dataset_size:560
|
49 |
-
- loss:
|
50 |
widget:
|
51 |
- source_sentence: Let's search inside
|
52 |
sentences:
|
@@ -84,109 +84,109 @@ model-index:
|
|
84 |
type: custom-arc-semantics-data
|
85 |
metrics:
|
86 |
- type: cosine_accuracy
|
87 |
-
value: 0.
|
88 |
name: Cosine Accuracy
|
89 |
- type: cosine_accuracy_threshold
|
90 |
-
value: 0.
|
91 |
name: Cosine Accuracy Threshold
|
92 |
- type: cosine_f1
|
93 |
-
value: 0.
|
94 |
name: Cosine F1
|
95 |
- type: cosine_f1_threshold
|
96 |
-
value: 0.
|
97 |
name: Cosine F1 Threshold
|
98 |
- type: cosine_precision
|
99 |
-
value: 0.
|
100 |
name: Cosine Precision
|
101 |
- type: cosine_recall
|
102 |
-
value: 0.
|
103 |
name: Cosine Recall
|
104 |
- type: cosine_ap
|
105 |
-
value: 0.
|
106 |
name: Cosine Ap
|
107 |
- type: dot_accuracy
|
108 |
-
value: 0.
|
109 |
name: Dot Accuracy
|
110 |
- type: dot_accuracy_threshold
|
111 |
-
value: 0.
|
112 |
name: Dot Accuracy Threshold
|
113 |
- type: dot_f1
|
114 |
-
value: 0.
|
115 |
name: Dot F1
|
116 |
- type: dot_f1_threshold
|
117 |
-
value: 0.
|
118 |
name: Dot F1 Threshold
|
119 |
- type: dot_precision
|
120 |
-
value: 0.
|
121 |
name: Dot Precision
|
122 |
- type: dot_recall
|
123 |
-
value: 0.
|
124 |
name: Dot Recall
|
125 |
- type: dot_ap
|
126 |
-
value: 0.
|
127 |
name: Dot Ap
|
128 |
- type: manhattan_accuracy
|
129 |
-
value: 0.
|
130 |
name: Manhattan Accuracy
|
131 |
- type: manhattan_accuracy_threshold
|
132 |
-
value:
|
133 |
name: Manhattan Accuracy Threshold
|
134 |
- type: manhattan_f1
|
135 |
-
value: 0.
|
136 |
name: Manhattan F1
|
137 |
- type: manhattan_f1_threshold
|
138 |
-
value:
|
139 |
name: Manhattan F1 Threshold
|
140 |
- type: manhattan_precision
|
141 |
-
value: 0.
|
142 |
name: Manhattan Precision
|
143 |
- type: manhattan_recall
|
144 |
-
value: 0.
|
145 |
name: Manhattan Recall
|
146 |
- type: manhattan_ap
|
147 |
-
value: 0.
|
148 |
name: Manhattan Ap
|
149 |
- type: euclidean_accuracy
|
150 |
-
value: 0.
|
151 |
name: Euclidean Accuracy
|
152 |
- type: euclidean_accuracy_threshold
|
153 |
-
value: 1.
|
154 |
name: Euclidean Accuracy Threshold
|
155 |
- type: euclidean_f1
|
156 |
-
value: 0.
|
157 |
name: Euclidean F1
|
158 |
- type: euclidean_f1_threshold
|
159 |
-
value: 1.
|
160 |
name: Euclidean F1 Threshold
|
161 |
- type: euclidean_precision
|
162 |
-
value: 0.
|
163 |
name: Euclidean Precision
|
164 |
- type: euclidean_recall
|
165 |
-
value: 0.
|
166 |
name: Euclidean Recall
|
167 |
- type: euclidean_ap
|
168 |
-
value: 0.
|
169 |
name: Euclidean Ap
|
170 |
- type: max_accuracy
|
171 |
-
value: 0.
|
172 |
name: Max Accuracy
|
173 |
- type: max_accuracy_threshold
|
174 |
-
value:
|
175 |
name: Max Accuracy Threshold
|
176 |
- type: max_f1
|
177 |
-
value: 0.
|
178 |
name: Max F1
|
179 |
- type: max_f1_threshold
|
180 |
-
value:
|
181 |
name: Max F1 Threshold
|
182 |
- type: max_precision
|
183 |
-
value: 0.
|
184 |
name: Max Precision
|
185 |
- type: max_recall
|
186 |
-
value: 0.
|
187 |
name: Max Recall
|
188 |
- type: max_ap
|
189 |
-
value: 0.
|
190 |
name: Max Ap
|
191 |
---
|
192 |
|
@@ -288,41 +288,41 @@ You can finetune this model on your own dataset.
|
|
288 |
|
289 |
| Metric | Value |
|
290 |
|:-----------------------------|:-----------|
|
291 |
-
| cosine_accuracy | 0.
|
292 |
-
| cosine_accuracy_threshold | 0.
|
293 |
-
| cosine_f1 | 0.
|
294 |
-
| cosine_f1_threshold | 0.
|
295 |
-
| cosine_precision | 0.
|
296 |
-
| cosine_recall | 0.
|
297 |
-
| cosine_ap | 0.
|
298 |
-
| dot_accuracy | 0.
|
299 |
-
| dot_accuracy_threshold | 0.
|
300 |
-
| dot_f1 | 0.
|
301 |
-
| dot_f1_threshold | 0.
|
302 |
-
| dot_precision | 0.
|
303 |
-
| dot_recall | 0.
|
304 |
-
| dot_ap | 0.
|
305 |
-
| manhattan_accuracy | 0.
|
306 |
-
| manhattan_accuracy_threshold |
|
307 |
-
| manhattan_f1 | 0.
|
308 |
-
| manhattan_f1_threshold |
|
309 |
-
| manhattan_precision | 0.
|
310 |
-
| manhattan_recall | 0.
|
311 |
-
| manhattan_ap | 0.
|
312 |
-
| euclidean_accuracy | 0.
|
313 |
-
| euclidean_accuracy_threshold | 1.
|
314 |
-
| euclidean_f1 | 0.
|
315 |
-
| euclidean_f1_threshold | 1.
|
316 |
-
| euclidean_precision | 0.
|
317 |
-
| euclidean_recall | 0.
|
318 |
-
| euclidean_ap | 0.
|
319 |
-
| max_accuracy | 0.
|
320 |
-
| max_accuracy_threshold |
|
321 |
-
| max_f1 | 0.
|
322 |
-
| max_f1_threshold |
|
323 |
-
| max_precision | 0.
|
324 |
-
| max_recall | 0.
|
325 |
-
| **max_ap** | **0.
|
326 |
|
327 |
<!--
|
328 |
## Bias, Risks and Limitations
|
@@ -356,11 +356,11 @@ You can finetune this model on your own dataset.
|
|
356 |
| <code>When it was dinner</code> | <code>Dinner time</code> | <code>1</code> |
|
357 |
| <code>Did you cook chicken noodle last night?</code> | <code>Did you make chicken noodle for dinner?</code> | <code>1</code> |
|
358 |
| <code>Someone who can change item</code> | <code>Someone who uses magic that turns something into something. </code> | <code>1</code> |
|
359 |
-
* Loss: [<code>
|
360 |
```json
|
361 |
{
|
362 |
"scale": 20.0,
|
363 |
-
"similarity_fct": "
|
364 |
}
|
365 |
```
|
366 |
|
@@ -382,11 +382,11 @@ You can finetune this model on your own dataset.
|
|
382 |
| <code>Let's check inside</code> | <code>Let's search inside</code> | <code>1</code> |
|
383 |
| <code>Sohpie, are you okay?</code> | <code>Sophie Are you pressured?</code> | <code>0</code> |
|
384 |
| <code>This wine glass is related.</code> | <code>This sword looks important.</code> | <code>0</code> |
|
385 |
-
* Loss: [<code>
|
386 |
```json
|
387 |
{
|
388 |
"scale": 20.0,
|
389 |
-
"similarity_fct": "
|
390 |
}
|
391 |
```
|
392 |
|
@@ -521,19 +521,19 @@ You can finetune this model on your own dataset.
|
|
521 |
| Epoch | Step | Training Loss | loss | custom-arc-semantics-data_max_ap |
|
522 |
|:-----:|:----:|:-------------:|:------:|:--------------------------------:|
|
523 |
| None | 0 | - | - | 0.9254 |
|
524 |
-
| 1.0 | 70 |
|
525 |
-
| 2.0 | 140 |
|
526 |
-
| 3.0 | 210 | 0.
|
527 |
-
| 4.0 | 280 | 0.
|
528 |
-
| 5.0 | 350 | 0.
|
529 |
-
| 6.0 | 420 | 0.
|
530 |
-
| 7.0 | 490 | 0.
|
531 |
-
| 8.0 | 560 | 0.
|
532 |
-
| 9.0 | 630 | 0.
|
533 |
-
| 10.0 | 700 | 0.
|
534 |
-
| 11.0 | 770 | 0.
|
535 |
-
| 12.0 | 840 | 0.
|
536 |
-
| 13.0 | 910 | 0.
|
537 |
|
538 |
|
539 |
### Framework Versions
|
@@ -562,15 +562,14 @@ You can finetune this model on your own dataset.
|
|
562 |
}
|
563 |
```
|
564 |
|
565 |
-
####
|
566 |
```bibtex
|
567 |
-
@
|
568 |
-
title={
|
569 |
-
author={
|
570 |
-
year={
|
571 |
-
|
572 |
-
|
573 |
-
primaryClass={cs.CL}
|
574 |
}
|
575 |
```
|
576 |
|
|
|
46 |
- feature-extraction
|
47 |
- generated_from_trainer
|
48 |
- dataset_size:560
|
49 |
+
- loss:CoSENTLoss
|
50 |
widget:
|
51 |
- source_sentence: Let's search inside
|
52 |
sentences:
|
|
|
84 |
type: custom-arc-semantics-data
|
85 |
metrics:
|
86 |
- type: cosine_accuracy
|
87 |
+
value: 0.9285714285714286
|
88 |
name: Cosine Accuracy
|
89 |
- type: cosine_accuracy_threshold
|
90 |
+
value: 0.42927420139312744
|
91 |
name: Cosine Accuracy Threshold
|
92 |
- type: cosine_f1
|
93 |
+
value: 0.9425287356321839
|
94 |
name: Cosine F1
|
95 |
- type: cosine_f1_threshold
|
96 |
+
value: 0.2269928753376007
|
97 |
name: Cosine F1 Threshold
|
98 |
- type: cosine_precision
|
99 |
+
value: 0.9111111111111111
|
100 |
name: Cosine Precision
|
101 |
- type: cosine_recall
|
102 |
+
value: 0.9761904761904762
|
103 |
name: Cosine Recall
|
104 |
- type: cosine_ap
|
105 |
+
value: 0.9720863676601571
|
106 |
name: Cosine Ap
|
107 |
- type: dot_accuracy
|
108 |
+
value: 0.9285714285714286
|
109 |
name: Dot Accuracy
|
110 |
- type: dot_accuracy_threshold
|
111 |
+
value: 0.42927438020706177
|
112 |
name: Dot Accuracy Threshold
|
113 |
- type: dot_f1
|
114 |
+
value: 0.9425287356321839
|
115 |
name: Dot F1
|
116 |
- type: dot_f1_threshold
|
117 |
+
value: 0.22699296474456787
|
118 |
name: Dot F1 Threshold
|
119 |
- type: dot_precision
|
120 |
+
value: 0.9111111111111111
|
121 |
name: Dot Precision
|
122 |
- type: dot_recall
|
123 |
+
value: 0.9761904761904762
|
124 |
name: Dot Recall
|
125 |
- type: dot_ap
|
126 |
+
value: 0.9720863676601571
|
127 |
name: Dot Ap
|
128 |
- type: manhattan_accuracy
|
129 |
+
value: 0.9285714285714286
|
130 |
name: Manhattan Accuracy
|
131 |
- type: manhattan_accuracy_threshold
|
132 |
+
value: 16.630834579467773
|
133 |
name: Manhattan Accuracy Threshold
|
134 |
- type: manhattan_f1
|
135 |
+
value: 0.9431818181818182
|
136 |
name: Manhattan F1
|
137 |
- type: manhattan_f1_threshold
|
138 |
+
value: 19.740108489990234
|
139 |
name: Manhattan F1 Threshold
|
140 |
- type: manhattan_precision
|
141 |
+
value: 0.9021739130434783
|
142 |
name: Manhattan Precision
|
143 |
- type: manhattan_recall
|
144 |
+
value: 0.9880952380952381
|
145 |
name: Manhattan Recall
|
146 |
- type: manhattan_ap
|
147 |
+
value: 0.9728353486982702
|
148 |
name: Manhattan Ap
|
149 |
- type: euclidean_accuracy
|
150 |
+
value: 0.9285714285714286
|
151 |
name: Euclidean Accuracy
|
152 |
- type: euclidean_accuracy_threshold
|
153 |
+
value: 1.068155288696289
|
154 |
name: Euclidean Accuracy Threshold
|
155 |
- type: euclidean_f1
|
156 |
+
value: 0.9425287356321839
|
157 |
name: Euclidean F1
|
158 |
- type: euclidean_f1_threshold
|
159 |
+
value: 1.2433418035507202
|
160 |
name: Euclidean F1 Threshold
|
161 |
- type: euclidean_precision
|
162 |
+
value: 0.9111111111111111
|
163 |
name: Euclidean Precision
|
164 |
- type: euclidean_recall
|
165 |
+
value: 0.9761904761904762
|
166 |
name: Euclidean Recall
|
167 |
- type: euclidean_ap
|
168 |
+
value: 0.9720863676601571
|
169 |
name: Euclidean Ap
|
170 |
- type: max_accuracy
|
171 |
+
value: 0.9285714285714286
|
172 |
name: Max Accuracy
|
173 |
- type: max_accuracy_threshold
|
174 |
+
value: 16.630834579467773
|
175 |
name: Max Accuracy Threshold
|
176 |
- type: max_f1
|
177 |
+
value: 0.9431818181818182
|
178 |
name: Max F1
|
179 |
- type: max_f1_threshold
|
180 |
+
value: 19.740108489990234
|
181 |
name: Max F1 Threshold
|
182 |
- type: max_precision
|
183 |
+
value: 0.9111111111111111
|
184 |
name: Max Precision
|
185 |
- type: max_recall
|
186 |
+
value: 0.9880952380952381
|
187 |
name: Max Recall
|
188 |
- type: max_ap
|
189 |
+
value: 0.9728353486982702
|
190 |
name: Max Ap
|
191 |
---
|
192 |
|
|
|
288 |
|
289 |
| Metric | Value |
|
290 |
|:-----------------------------|:-----------|
|
291 |
+
| cosine_accuracy | 0.9286 |
|
292 |
+
| cosine_accuracy_threshold | 0.4293 |
|
293 |
+
| cosine_f1 | 0.9425 |
|
294 |
+
| cosine_f1_threshold | 0.227 |
|
295 |
+
| cosine_precision | 0.9111 |
|
296 |
+
| cosine_recall | 0.9762 |
|
297 |
+
| cosine_ap | 0.9721 |
|
298 |
+
| dot_accuracy | 0.9286 |
|
299 |
+
| dot_accuracy_threshold | 0.4293 |
|
300 |
+
| dot_f1 | 0.9425 |
|
301 |
+
| dot_f1_threshold | 0.227 |
|
302 |
+
| dot_precision | 0.9111 |
|
303 |
+
| dot_recall | 0.9762 |
|
304 |
+
| dot_ap | 0.9721 |
|
305 |
+
| manhattan_accuracy | 0.9286 |
|
306 |
+
| manhattan_accuracy_threshold | 16.6308 |
|
307 |
+
| manhattan_f1 | 0.9432 |
|
308 |
+
| manhattan_f1_threshold | 19.7401 |
|
309 |
+
| manhattan_precision | 0.9022 |
|
310 |
+
| manhattan_recall | 0.9881 |
|
311 |
+
| manhattan_ap | 0.9728 |
|
312 |
+
| euclidean_accuracy | 0.9286 |
|
313 |
+
| euclidean_accuracy_threshold | 1.0682 |
|
314 |
+
| euclidean_f1 | 0.9425 |
|
315 |
+
| euclidean_f1_threshold | 1.2433 |
|
316 |
+
| euclidean_precision | 0.9111 |
|
317 |
+
| euclidean_recall | 0.9762 |
|
318 |
+
| euclidean_ap | 0.9721 |
|
319 |
+
| max_accuracy | 0.9286 |
|
320 |
+
| max_accuracy_threshold | 16.6308 |
|
321 |
+
| max_f1 | 0.9432 |
|
322 |
+
| max_f1_threshold | 19.7401 |
|
323 |
+
| max_precision | 0.9111 |
|
324 |
+
| max_recall | 0.9881 |
|
325 |
+
| **max_ap** | **0.9728** |
|
326 |
|
327 |
<!--
|
328 |
## Bias, Risks and Limitations
|
|
|
356 |
| <code>When it was dinner</code> | <code>Dinner time</code> | <code>1</code> |
|
357 |
| <code>Did you cook chicken noodle last night?</code> | <code>Did you make chicken noodle for dinner?</code> | <code>1</code> |
|
358 |
| <code>Someone who can change item</code> | <code>Someone who uses magic that turns something into something. </code> | <code>1</code> |
|
359 |
+
* Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:
|
360 |
```json
|
361 |
{
|
362 |
"scale": 20.0,
|
363 |
+
"similarity_fct": "pairwise_cos_sim"
|
364 |
}
|
365 |
```
|
366 |
|
|
|
382 |
| <code>Let's check inside</code> | <code>Let's search inside</code> | <code>1</code> |
|
383 |
| <code>Sohpie, are you okay?</code> | <code>Sophie Are you pressured?</code> | <code>0</code> |
|
384 |
| <code>This wine glass is related.</code> | <code>This sword looks important.</code> | <code>0</code> |
|
385 |
+
* Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:
|
386 |
```json
|
387 |
{
|
388 |
"scale": 20.0,
|
389 |
+
"similarity_fct": "pairwise_cos_sim"
|
390 |
}
|
391 |
```
|
392 |
|
|
|
521 |
| Epoch | Step | Training Loss | loss | custom-arc-semantics-data_max_ap |
|
522 |
|:-----:|:----:|:-------------:|:------:|:--------------------------------:|
|
523 |
| None | 0 | - | - | 0.9254 |
|
524 |
+
| 1.0 | 70 | 2.9684 | 1.4087 | 0.9425 |
|
525 |
+
| 2.0 | 140 | 1.4461 | 1.0942 | 0.9629 |
|
526 |
+
| 3.0 | 210 | 0.6005 | 0.8398 | 0.9680 |
|
527 |
+
| 4.0 | 280 | 0.3021 | 0.7577 | 0.9703 |
|
528 |
+
| 5.0 | 350 | 0.2412 | 0.7216 | 0.9715 |
|
529 |
+
| 6.0 | 420 | 0.1816 | 0.7538 | 0.9722 |
|
530 |
+
| 7.0 | 490 | 0.1512 | 0.8049 | 0.9726 |
|
531 |
+
| 8.0 | 560 | 0.1208 | 0.7602 | 0.9726 |
|
532 |
+
| 9.0 | 630 | 0.0915 | 0.7286 | 0.9729 |
|
533 |
+
| 10.0 | 700 | 0.0553 | 0.7072 | 0.9729 |
|
534 |
+
| 11.0 | 770 | 0.0716 | 0.6984 | 0.9730 |
|
535 |
+
| 12.0 | 840 | 0.0297 | 0.7063 | 0.9725 |
|
536 |
+
| 13.0 | 910 | 0.0462 | 0.6997 | 0.9728 |
|
537 |
|
538 |
|
539 |
### Framework Versions
|
|
|
562 |
}
|
563 |
```
|
564 |
|
565 |
+
#### CoSENTLoss
|
566 |
```bibtex
|
567 |
+
@online{kexuefm-8847,
|
568 |
+
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
|
569 |
+
author={Su Jianlin},
|
570 |
+
year={2022},
|
571 |
+
month={Jan},
|
572 |
+
url={https://kexue.fm/archives/8847},
|
|
|
573 |
}
|
574 |
```
|
575 |
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 90864192
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2d9ab6b7472780e4b9271e02f535d125c33cef1b145ab2f8d3135ed97c72aea5
|
3 |
size 90864192
|