Add new SentenceTransformer model
Browse files- 1_Pooling/config.json +10 -0
- README.md +1243 -0
- config.json +24 -0
- config_sentence_transformers.json +10 -0
- model.safetensors +3 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +51 -0
- tokenizer.json +0 -0
- tokenizer_config.json +65 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,1243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
license: apache-2.0
|
5 |
+
tags:
|
6 |
+
- sentence-transformers
|
7 |
+
- sentence-similarity
|
8 |
+
- feature-extraction
|
9 |
+
- generated_from_trainer
|
10 |
+
- dataset_size:50000
|
11 |
+
- loss:CachedGISTEmbedLoss
|
12 |
+
base_model: microsoft/mpnet-base
|
13 |
+
widget:
|
14 |
+
- source_sentence: what does the accounts receivable turnover measure?
|
15 |
+
sentences:
|
16 |
+
- The accounts receivable turnover ratio is an accounting measure used to quantify
|
17 |
+
a company's effectiveness in collecting its receivables or money owed by clients.
|
18 |
+
The ratio shows how well a company uses and manages the credit it extends to customers
|
19 |
+
and how quickly that short-term debt is collected or is paid.
|
20 |
+
- Capital budgeting, and investment appraisal, is the planning process used to determine
|
21 |
+
whether an organization's long term investments such as new machinery, replacement
|
22 |
+
of machinery, new plants, new products, and research development projects are
|
23 |
+
worth the funding of cash through the firm's capitalization structure ( ...
|
24 |
+
- The accounts receivable turnover ratio is an accounting measure used to quantify
|
25 |
+
a company's effectiveness in collecting its receivables or money owed by clients.
|
26 |
+
The ratio shows how well a company uses and manages the credit it extends to customers
|
27 |
+
and how quickly that short-term debt is collected or is paid.
|
28 |
+
- source_sentence: does gabapentin cause liver problems?
|
29 |
+
sentences:
|
30 |
+
- Gabapentin has no appreciable liver metabolism, yet, suspected cases of gabapentin-induced
|
31 |
+
hepatotoxicity have been reported. Per literature review, two cases of possible
|
32 |
+
gabapentin-induced liver injury have been reported.
|
33 |
+
- Strongholds are a type of story mission which only unlocks after enough progression
|
34 |
+
through the game. There are three Stronghold's during the first section of progression
|
35 |
+
through The Division 2. You'll need to complete the first two and have reached
|
36 |
+
level 30 before being able to unlock the final Stronghold.
|
37 |
+
- The most-common side effects attributed to Gabapentin include mild sedation, ataxia,
|
38 |
+
and occasional diarrhea. Sedation can be minimized by tapering from a smaller
|
39 |
+
starting dose to the desired dose. When treating seizures, it is ideal to wean
|
40 |
+
off the drug to reduce the risk of withdrawal seizures.
|
41 |
+
- source_sentence: how long should you wait to give blood after eating?
|
42 |
+
sentences:
|
43 |
+
- Until the bleeding has stopped it is natural to taste blood or to see traces of
|
44 |
+
blood in your saliva. You may stop using gauze after the flow stops – usually
|
45 |
+
around 8 hours after surgery.
|
46 |
+
- Before donation The first and most important rule—never donate blood on an empty
|
47 |
+
stomach. “Eat a wholesome meal about 2-3 hours before donating to keep your blood
|
48 |
+
sugar stable," says Dr Chaturvedi. The timing of the meal is important too. You
|
49 |
+
need to allow the food to be digested properly before the blood is drawn.
|
50 |
+
- While grid computing involves virtualizing computing resources to store massive
|
51 |
+
amounts of data, whereas cloud computing is where an application doesn't access
|
52 |
+
resources directly, rather it accesses them through a service over the internet.
|
53 |
+
...
|
54 |
+
- source_sentence: what is the difference between chicken francese and chicken marsala?
|
55 |
+
sentences:
|
56 |
+
- Chicken is the species name, equivalent to our “human.” Rooster is an adult male,
|
57 |
+
equivalent to “man.” Hen is an adult female, equivalent to “woman.” Cockerel is
|
58 |
+
a juvenile male, equivalent to “boy/young man.”
|
59 |
+
- What is 99 kg in pounds? - 99 kg is equal to 218.26 pounds.
|
60 |
+
- The difference between the two is for Francese, the chicken breast is first dipped
|
61 |
+
in flour, then into a beaten egg mixture, before being cooked. For piccata, the
|
62 |
+
chicken is first dipped in egg and then in flour. Both are then simmered in a
|
63 |
+
lemony butter sauce, but the piccata sauce includes capers.”
|
64 |
+
- source_sentence: what energy is released when coal is burned?
|
65 |
+
sentences:
|
66 |
+
- When coal is burned, it reacts with the oxygen in the air. This chemical reaction
|
67 |
+
converts the stored solar energy into thermal energy, which is released as heat.
|
68 |
+
But it also produces carbon dioxide and methane.
|
69 |
+
- When coal is burned it releases a number of airborne toxins and pollutants. They
|
70 |
+
include mercury, lead, sulfur dioxide, nitrogen oxides, particulates, and various
|
71 |
+
other heavy metals.
|
72 |
+
- Squad Building Challenges allow you to exchange sets of players for coins, packs,
|
73 |
+
and special items in FUT 20. Each of these challenges come with specific requirements,
|
74 |
+
such as including players from certain teams. ... Live SBCs are time-limited challenges
|
75 |
+
which often give out unique, high-rated versions of players.
|
76 |
+
datasets:
|
77 |
+
- tomaarsen/gooaq-hard-negatives
|
78 |
+
pipeline_tag: sentence-similarity
|
79 |
+
library_name: sentence-transformers
|
80 |
+
metrics:
|
81 |
+
- cosine_accuracy@1
|
82 |
+
- cosine_accuracy@3
|
83 |
+
- cosine_accuracy@5
|
84 |
+
- cosine_accuracy@10
|
85 |
+
- cosine_precision@1
|
86 |
+
- cosine_precision@3
|
87 |
+
- cosine_precision@5
|
88 |
+
- cosine_precision@10
|
89 |
+
- cosine_recall@1
|
90 |
+
- cosine_recall@3
|
91 |
+
- cosine_recall@5
|
92 |
+
- cosine_recall@10
|
93 |
+
- cosine_ndcg@10
|
94 |
+
- cosine_mrr@10
|
95 |
+
- cosine_map@100
|
96 |
+
co2_eq_emissions:
|
97 |
+
emissions: 40.54325678627484
|
98 |
+
energy_consumed: 0.10430421450436282
|
99 |
+
source: codecarbon
|
100 |
+
training_type: fine-tuning
|
101 |
+
on_cloud: false
|
102 |
+
cpu_model: 13th Gen Intel(R) Core(TM) i7-13700K
|
103 |
+
ram_total_size: 31.777088165283203
|
104 |
+
hours_used: 0.301
|
105 |
+
hardware_used: 1 x NVIDIA GeForce RTX 3090
|
106 |
+
model-index:
|
107 |
+
- name: MPNet base trained on Natural Questions pairs
|
108 |
+
results:
|
109 |
+
- task:
|
110 |
+
type: information-retrieval
|
111 |
+
name: Information Retrieval
|
112 |
+
dataset:
|
113 |
+
name: NanoClimateFEVER
|
114 |
+
type: NanoClimateFEVER
|
115 |
+
metrics:
|
116 |
+
- type: cosine_accuracy@1
|
117 |
+
value: 0.22
|
118 |
+
name: Cosine Accuracy@1
|
119 |
+
- type: cosine_accuracy@3
|
120 |
+
value: 0.44
|
121 |
+
name: Cosine Accuracy@3
|
122 |
+
- type: cosine_accuracy@5
|
123 |
+
value: 0.52
|
124 |
+
name: Cosine Accuracy@5
|
125 |
+
- type: cosine_accuracy@10
|
126 |
+
value: 0.72
|
127 |
+
name: Cosine Accuracy@10
|
128 |
+
- type: cosine_precision@1
|
129 |
+
value: 0.22
|
130 |
+
name: Cosine Precision@1
|
131 |
+
- type: cosine_precision@3
|
132 |
+
value: 0.16666666666666663
|
133 |
+
name: Cosine Precision@3
|
134 |
+
- type: cosine_precision@5
|
135 |
+
value: 0.12
|
136 |
+
name: Cosine Precision@5
|
137 |
+
- type: cosine_precision@10
|
138 |
+
value: 0.09399999999999999
|
139 |
+
name: Cosine Precision@10
|
140 |
+
- type: cosine_recall@1
|
141 |
+
value: 0.09333333333333332
|
142 |
+
name: Cosine Recall@1
|
143 |
+
- type: cosine_recall@3
|
144 |
+
value: 0.195
|
145 |
+
name: Cosine Recall@3
|
146 |
+
- type: cosine_recall@5
|
147 |
+
value: 0.2333333333333333
|
148 |
+
name: Cosine Recall@5
|
149 |
+
- type: cosine_recall@10
|
150 |
+
value: 0.37233333333333335
|
151 |
+
name: Cosine Recall@10
|
152 |
+
- type: cosine_ndcg@10
|
153 |
+
value: 0.2744024872493329
|
154 |
+
name: Cosine Ndcg@10
|
155 |
+
- type: cosine_mrr@10
|
156 |
+
value: 0.3594365079365079
|
157 |
+
name: Cosine Mrr@10
|
158 |
+
- type: cosine_map@100
|
159 |
+
value: 0.20181676147957636
|
160 |
+
name: Cosine Map@100
|
161 |
+
- task:
|
162 |
+
type: information-retrieval
|
163 |
+
name: Information Retrieval
|
164 |
+
dataset:
|
165 |
+
name: NanoDBPedia
|
166 |
+
type: NanoDBPedia
|
167 |
+
metrics:
|
168 |
+
- type: cosine_accuracy@1
|
169 |
+
value: 0.46
|
170 |
+
name: Cosine Accuracy@1
|
171 |
+
- type: cosine_accuracy@3
|
172 |
+
value: 0.62
|
173 |
+
name: Cosine Accuracy@3
|
174 |
+
- type: cosine_accuracy@5
|
175 |
+
value: 0.76
|
176 |
+
name: Cosine Accuracy@5
|
177 |
+
- type: cosine_accuracy@10
|
178 |
+
value: 0.82
|
179 |
+
name: Cosine Accuracy@10
|
180 |
+
- type: cosine_precision@1
|
181 |
+
value: 0.46
|
182 |
+
name: Cosine Precision@1
|
183 |
+
- type: cosine_precision@3
|
184 |
+
value: 0.38666666666666666
|
185 |
+
name: Cosine Precision@3
|
186 |
+
- type: cosine_precision@5
|
187 |
+
value: 0.38799999999999996
|
188 |
+
name: Cosine Precision@5
|
189 |
+
- type: cosine_precision@10
|
190 |
+
value: 0.344
|
191 |
+
name: Cosine Precision@10
|
192 |
+
- type: cosine_recall@1
|
193 |
+
value: 0.03065300183409328
|
194 |
+
name: Cosine Recall@1
|
195 |
+
- type: cosine_recall@3
|
196 |
+
value: 0.07730098142643593
|
197 |
+
name: Cosine Recall@3
|
198 |
+
- type: cosine_recall@5
|
199 |
+
value: 0.14588470319900892
|
200 |
+
name: Cosine Recall@5
|
201 |
+
- type: cosine_recall@10
|
202 |
+
value: 0.22159653924772912
|
203 |
+
name: Cosine Recall@10
|
204 |
+
- type: cosine_ndcg@10
|
205 |
+
value: 0.3920743245484332
|
206 |
+
name: Cosine Ndcg@10
|
207 |
+
- type: cosine_mrr@10
|
208 |
+
value: 0.567
|
209 |
+
name: Cosine Mrr@10
|
210 |
+
- type: cosine_map@100
|
211 |
+
value: 0.28153419189397744
|
212 |
+
name: Cosine Map@100
|
213 |
+
- task:
|
214 |
+
type: information-retrieval
|
215 |
+
name: Information Retrieval
|
216 |
+
dataset:
|
217 |
+
name: NanoFEVER
|
218 |
+
type: NanoFEVER
|
219 |
+
metrics:
|
220 |
+
- type: cosine_accuracy@1
|
221 |
+
value: 0.38
|
222 |
+
name: Cosine Accuracy@1
|
223 |
+
- type: cosine_accuracy@3
|
224 |
+
value: 0.54
|
225 |
+
name: Cosine Accuracy@3
|
226 |
+
- type: cosine_accuracy@5
|
227 |
+
value: 0.58
|
228 |
+
name: Cosine Accuracy@5
|
229 |
+
- type: cosine_accuracy@10
|
230 |
+
value: 0.68
|
231 |
+
name: Cosine Accuracy@10
|
232 |
+
- type: cosine_precision@1
|
233 |
+
value: 0.38
|
234 |
+
name: Cosine Precision@1
|
235 |
+
- type: cosine_precision@3
|
236 |
+
value: 0.18
|
237 |
+
name: Cosine Precision@3
|
238 |
+
- type: cosine_precision@5
|
239 |
+
value: 0.12
|
240 |
+
name: Cosine Precision@5
|
241 |
+
- type: cosine_precision@10
|
242 |
+
value: 0.07
|
243 |
+
name: Cosine Precision@10
|
244 |
+
- type: cosine_recall@1
|
245 |
+
value: 0.37
|
246 |
+
name: Cosine Recall@1
|
247 |
+
- type: cosine_recall@3
|
248 |
+
value: 0.52
|
249 |
+
name: Cosine Recall@3
|
250 |
+
- type: cosine_recall@5
|
251 |
+
value: 0.57
|
252 |
+
name: Cosine Recall@5
|
253 |
+
- type: cosine_recall@10
|
254 |
+
value: 0.66
|
255 |
+
name: Cosine Recall@10
|
256 |
+
- type: cosine_ndcg@10
|
257 |
+
value: 0.5156585003907987
|
258 |
+
name: Cosine Ndcg@10
|
259 |
+
- type: cosine_mrr@10
|
260 |
+
value: 0.4756666666666666
|
261 |
+
name: Cosine Mrr@10
|
262 |
+
- type: cosine_map@100
|
263 |
+
value: 0.47620972127897226
|
264 |
+
name: Cosine Map@100
|
265 |
+
- task:
|
266 |
+
type: information-retrieval
|
267 |
+
name: Information Retrieval
|
268 |
+
dataset:
|
269 |
+
name: NanoFiQA2018
|
270 |
+
type: NanoFiQA2018
|
271 |
+
metrics:
|
272 |
+
- type: cosine_accuracy@1
|
273 |
+
value: 0.28
|
274 |
+
name: Cosine Accuracy@1
|
275 |
+
- type: cosine_accuracy@3
|
276 |
+
value: 0.5
|
277 |
+
name: Cosine Accuracy@3
|
278 |
+
- type: cosine_accuracy@5
|
279 |
+
value: 0.52
|
280 |
+
name: Cosine Accuracy@5
|
281 |
+
- type: cosine_accuracy@10
|
282 |
+
value: 0.58
|
283 |
+
name: Cosine Accuracy@10
|
284 |
+
- type: cosine_precision@1
|
285 |
+
value: 0.28
|
286 |
+
name: Cosine Precision@1
|
287 |
+
- type: cosine_precision@3
|
288 |
+
value: 0.22
|
289 |
+
name: Cosine Precision@3
|
290 |
+
- type: cosine_precision@5
|
291 |
+
value: 0.16399999999999998
|
292 |
+
name: Cosine Precision@5
|
293 |
+
- type: cosine_precision@10
|
294 |
+
value: 0.09799999999999999
|
295 |
+
name: Cosine Precision@10
|
296 |
+
- type: cosine_recall@1
|
297 |
+
value: 0.1371904761904762
|
298 |
+
name: Cosine Recall@1
|
299 |
+
- type: cosine_recall@3
|
300 |
+
value: 0.3226904761904762
|
301 |
+
name: Cosine Recall@3
|
302 |
+
- type: cosine_recall@5
|
303 |
+
value: 0.3682142857142857
|
304 |
+
name: Cosine Recall@5
|
305 |
+
- type: cosine_recall@10
|
306 |
+
value: 0.43073809523809525
|
307 |
+
name: Cosine Recall@10
|
308 |
+
- type: cosine_ndcg@10
|
309 |
+
value: 0.3420135901424927
|
310 |
+
name: Cosine Ndcg@10
|
311 |
+
- type: cosine_mrr@10
|
312 |
+
value: 0.38405555555555554
|
313 |
+
name: Cosine Mrr@10
|
314 |
+
- type: cosine_map@100
|
315 |
+
value: 0.2826394452885763
|
316 |
+
name: Cosine Map@100
|
317 |
+
- task:
|
318 |
+
type: information-retrieval
|
319 |
+
name: Information Retrieval
|
320 |
+
dataset:
|
321 |
+
name: NanoHotpotQA
|
322 |
+
type: NanoHotpotQA
|
323 |
+
metrics:
|
324 |
+
- type: cosine_accuracy@1
|
325 |
+
value: 0.34
|
326 |
+
name: Cosine Accuracy@1
|
327 |
+
- type: cosine_accuracy@3
|
328 |
+
value: 0.52
|
329 |
+
name: Cosine Accuracy@3
|
330 |
+
- type: cosine_accuracy@5
|
331 |
+
value: 0.62
|
332 |
+
name: Cosine Accuracy@5
|
333 |
+
- type: cosine_accuracy@10
|
334 |
+
value: 0.72
|
335 |
+
name: Cosine Accuracy@10
|
336 |
+
- type: cosine_precision@1
|
337 |
+
value: 0.34
|
338 |
+
name: Cosine Precision@1
|
339 |
+
- type: cosine_precision@3
|
340 |
+
value: 0.19333333333333333
|
341 |
+
name: Cosine Precision@3
|
342 |
+
- type: cosine_precision@5
|
343 |
+
value: 0.14400000000000002
|
344 |
+
name: Cosine Precision@5
|
345 |
+
- type: cosine_precision@10
|
346 |
+
value: 0.09200000000000001
|
347 |
+
name: Cosine Precision@10
|
348 |
+
- type: cosine_recall@1
|
349 |
+
value: 0.17
|
350 |
+
name: Cosine Recall@1
|
351 |
+
- type: cosine_recall@3
|
352 |
+
value: 0.29
|
353 |
+
name: Cosine Recall@3
|
354 |
+
- type: cosine_recall@5
|
355 |
+
value: 0.36
|
356 |
+
name: Cosine Recall@5
|
357 |
+
- type: cosine_recall@10
|
358 |
+
value: 0.46
|
359 |
+
name: Cosine Recall@10
|
360 |
+
- type: cosine_ndcg@10
|
361 |
+
value: 0.3723049657456267
|
362 |
+
name: Cosine Ndcg@10
|
363 |
+
- type: cosine_mrr@10
|
364 |
+
value: 0.4570793650793651
|
365 |
+
name: Cosine Mrr@10
|
366 |
+
- type: cosine_map@100
|
367 |
+
value: 0.2995175868330484
|
368 |
+
name: Cosine Map@100
|
369 |
+
- task:
|
370 |
+
type: information-retrieval
|
371 |
+
name: Information Retrieval
|
372 |
+
dataset:
|
373 |
+
name: NanoMSMARCO
|
374 |
+
type: NanoMSMARCO
|
375 |
+
metrics:
|
376 |
+
- type: cosine_accuracy@1
|
377 |
+
value: 0.1
|
378 |
+
name: Cosine Accuracy@1
|
379 |
+
- type: cosine_accuracy@3
|
380 |
+
value: 0.28
|
381 |
+
name: Cosine Accuracy@3
|
382 |
+
- type: cosine_accuracy@5
|
383 |
+
value: 0.52
|
384 |
+
name: Cosine Accuracy@5
|
385 |
+
- type: cosine_accuracy@10
|
386 |
+
value: 0.68
|
387 |
+
name: Cosine Accuracy@10
|
388 |
+
- type: cosine_precision@1
|
389 |
+
value: 0.1
|
390 |
+
name: Cosine Precision@1
|
391 |
+
- type: cosine_precision@3
|
392 |
+
value: 0.09333333333333332
|
393 |
+
name: Cosine Precision@3
|
394 |
+
- type: cosine_precision@5
|
395 |
+
value: 0.10400000000000001
|
396 |
+
name: Cosine Precision@5
|
397 |
+
- type: cosine_precision@10
|
398 |
+
value: 0.068
|
399 |
+
name: Cosine Precision@10
|
400 |
+
- type: cosine_recall@1
|
401 |
+
value: 0.1
|
402 |
+
name: Cosine Recall@1
|
403 |
+
- type: cosine_recall@3
|
404 |
+
value: 0.28
|
405 |
+
name: Cosine Recall@3
|
406 |
+
- type: cosine_recall@5
|
407 |
+
value: 0.52
|
408 |
+
name: Cosine Recall@5
|
409 |
+
- type: cosine_recall@10
|
410 |
+
value: 0.68
|
411 |
+
name: Cosine Recall@10
|
412 |
+
- type: cosine_ndcg@10
|
413 |
+
value: 0.36083481845261806
|
414 |
+
name: Cosine Ndcg@10
|
415 |
+
- type: cosine_mrr@10
|
416 |
+
value: 0.26157142857142857
|
417 |
+
name: Cosine Mrr@10
|
418 |
+
- type: cosine_map@100
|
419 |
+
value: 0.27215692684924997
|
420 |
+
name: Cosine Map@100
|
421 |
+
- task:
|
422 |
+
type: information-retrieval
|
423 |
+
name: Information Retrieval
|
424 |
+
dataset:
|
425 |
+
name: NanoNFCorpus
|
426 |
+
type: NanoNFCorpus
|
427 |
+
metrics:
|
428 |
+
- type: cosine_accuracy@1
|
429 |
+
value: 0.26
|
430 |
+
name: Cosine Accuracy@1
|
431 |
+
- type: cosine_accuracy@3
|
432 |
+
value: 0.38
|
433 |
+
name: Cosine Accuracy@3
|
434 |
+
- type: cosine_accuracy@5
|
435 |
+
value: 0.44
|
436 |
+
name: Cosine Accuracy@5
|
437 |
+
- type: cosine_accuracy@10
|
438 |
+
value: 0.5
|
439 |
+
name: Cosine Accuracy@10
|
440 |
+
- type: cosine_precision@1
|
441 |
+
value: 0.26
|
442 |
+
name: Cosine Precision@1
|
443 |
+
- type: cosine_precision@3
|
444 |
+
value: 0.21333333333333332
|
445 |
+
name: Cosine Precision@3
|
446 |
+
- type: cosine_precision@5
|
447 |
+
value: 0.19599999999999998
|
448 |
+
name: Cosine Precision@5
|
449 |
+
- type: cosine_precision@10
|
450 |
+
value: 0.13799999999999998
|
451 |
+
name: Cosine Precision@10
|
452 |
+
- type: cosine_recall@1
|
453 |
+
value: 0.01122167476431692
|
454 |
+
name: Cosine Recall@1
|
455 |
+
- type: cosine_recall@3
|
456 |
+
value: 0.02047531859468654
|
457 |
+
name: Cosine Recall@3
|
458 |
+
- type: cosine_recall@5
|
459 |
+
value: 0.03079316493603994
|
460 |
+
name: Cosine Recall@5
|
461 |
+
- type: cosine_recall@10
|
462 |
+
value: 0.0422192068561938
|
463 |
+
name: Cosine Recall@10
|
464 |
+
- type: cosine_ndcg@10
|
465 |
+
value: 0.1654539374427929
|
466 |
+
name: Cosine Ndcg@10
|
467 |
+
- type: cosine_mrr@10
|
468 |
+
value: 0.3367460317460317
|
469 |
+
name: Cosine Mrr@10
|
470 |
+
- type: cosine_map@100
|
471 |
+
value: 0.04901233559063261
|
472 |
+
name: Cosine Map@100
|
473 |
+
- task:
|
474 |
+
type: information-retrieval
|
475 |
+
name: Information Retrieval
|
476 |
+
dataset:
|
477 |
+
name: NanoNQ
|
478 |
+
type: NanoNQ
|
479 |
+
metrics:
|
480 |
+
- type: cosine_accuracy@1
|
481 |
+
value: 0.14
|
482 |
+
name: Cosine Accuracy@1
|
483 |
+
- type: cosine_accuracy@3
|
484 |
+
value: 0.36
|
485 |
+
name: Cosine Accuracy@3
|
486 |
+
- type: cosine_accuracy@5
|
487 |
+
value: 0.44
|
488 |
+
name: Cosine Accuracy@5
|
489 |
+
- type: cosine_accuracy@10
|
490 |
+
value: 0.58
|
491 |
+
name: Cosine Accuracy@10
|
492 |
+
- type: cosine_precision@1
|
493 |
+
value: 0.14
|
494 |
+
name: Cosine Precision@1
|
495 |
+
- type: cosine_precision@3
|
496 |
+
value: 0.11999999999999998
|
497 |
+
name: Cosine Precision@3
|
498 |
+
- type: cosine_precision@5
|
499 |
+
value: 0.08800000000000002
|
500 |
+
name: Cosine Precision@5
|
501 |
+
- type: cosine_precision@10
|
502 |
+
value: 0.06000000000000001
|
503 |
+
name: Cosine Precision@10
|
504 |
+
- type: cosine_recall@1
|
505 |
+
value: 0.13
|
506 |
+
name: Cosine Recall@1
|
507 |
+
- type: cosine_recall@3
|
508 |
+
value: 0.34
|
509 |
+
name: Cosine Recall@3
|
510 |
+
- type: cosine_recall@5
|
511 |
+
value: 0.41
|
512 |
+
name: Cosine Recall@5
|
513 |
+
- type: cosine_recall@10
|
514 |
+
value: 0.55
|
515 |
+
name: Cosine Recall@10
|
516 |
+
- type: cosine_ndcg@10
|
517 |
+
value: 0.33223439819785083
|
518 |
+
name: Cosine Ndcg@10
|
519 |
+
- type: cosine_mrr@10
|
520 |
+
value: 0.2734365079365079
|
521 |
+
name: Cosine Mrr@10
|
522 |
+
- type: cosine_map@100
|
523 |
+
value: 0.2764557370904448
|
524 |
+
name: Cosine Map@100
|
525 |
+
- task:
|
526 |
+
type: information-retrieval
|
527 |
+
name: Information Retrieval
|
528 |
+
dataset:
|
529 |
+
name: NanoQuoraRetrieval
|
530 |
+
type: NanoQuoraRetrieval
|
531 |
+
metrics:
|
532 |
+
- type: cosine_accuracy@1
|
533 |
+
value: 0.82
|
534 |
+
name: Cosine Accuracy@1
|
535 |
+
- type: cosine_accuracy@3
|
536 |
+
value: 0.9
|
537 |
+
name: Cosine Accuracy@3
|
538 |
+
- type: cosine_accuracy@5
|
539 |
+
value: 0.92
|
540 |
+
name: Cosine Accuracy@5
|
541 |
+
- type: cosine_accuracy@10
|
542 |
+
value: 0.96
|
543 |
+
name: Cosine Accuracy@10
|
544 |
+
- type: cosine_precision@1
|
545 |
+
value: 0.82
|
546 |
+
name: Cosine Precision@1
|
547 |
+
- type: cosine_precision@3
|
548 |
+
value: 0.3666666666666666
|
549 |
+
name: Cosine Precision@3
|
550 |
+
- type: cosine_precision@5
|
551 |
+
value: 0.244
|
552 |
+
name: Cosine Precision@5
|
553 |
+
- type: cosine_precision@10
|
554 |
+
value: 0.13399999999999998
|
555 |
+
name: Cosine Precision@10
|
556 |
+
- type: cosine_recall@1
|
557 |
+
value: 0.7206666666666667
|
558 |
+
name: Cosine Recall@1
|
559 |
+
- type: cosine_recall@3
|
560 |
+
value: 0.8553333333333333
|
561 |
+
name: Cosine Recall@3
|
562 |
+
- type: cosine_recall@5
|
563 |
+
value: 0.8993333333333333
|
564 |
+
name: Cosine Recall@5
|
565 |
+
- type: cosine_recall@10
|
566 |
+
value: 0.9566666666666666
|
567 |
+
name: Cosine Recall@10
|
568 |
+
- type: cosine_ndcg@10
|
569 |
+
value: 0.8807317086981499
|
570 |
+
name: Cosine Ndcg@10
|
571 |
+
- type: cosine_mrr@10
|
572 |
+
value: 0.8616666666666666
|
573 |
+
name: Cosine Mrr@10
|
574 |
+
- type: cosine_map@100
|
575 |
+
value: 0.8525831566094724
|
576 |
+
name: Cosine Map@100
|
577 |
+
- task:
|
578 |
+
type: information-retrieval
|
579 |
+
name: Information Retrieval
|
580 |
+
dataset:
|
581 |
+
name: NanoSCIDOCS
|
582 |
+
type: NanoSCIDOCS
|
583 |
+
metrics:
|
584 |
+
- type: cosine_accuracy@1
|
585 |
+
value: 0.34
|
586 |
+
name: Cosine Accuracy@1
|
587 |
+
- type: cosine_accuracy@3
|
588 |
+
value: 0.48
|
589 |
+
name: Cosine Accuracy@3
|
590 |
+
- type: cosine_accuracy@5
|
591 |
+
value: 0.54
|
592 |
+
name: Cosine Accuracy@5
|
593 |
+
- type: cosine_accuracy@10
|
594 |
+
value: 0.66
|
595 |
+
name: Cosine Accuracy@10
|
596 |
+
- type: cosine_precision@1
|
597 |
+
value: 0.34
|
598 |
+
name: Cosine Precision@1
|
599 |
+
- type: cosine_precision@3
|
600 |
+
value: 0.24666666666666667
|
601 |
+
name: Cosine Precision@3
|
602 |
+
- type: cosine_precision@5
|
603 |
+
value: 0.212
|
604 |
+
name: Cosine Precision@5
|
605 |
+
- type: cosine_precision@10
|
606 |
+
value: 0.14800000000000002
|
607 |
+
name: Cosine Precision@10
|
608 |
+
- type: cosine_recall@1
|
609 |
+
value: 0.07066666666666668
|
610 |
+
name: Cosine Recall@1
|
611 |
+
- type: cosine_recall@3
|
612 |
+
value: 0.15366666666666667
|
613 |
+
name: Cosine Recall@3
|
614 |
+
- type: cosine_recall@5
|
615 |
+
value: 0.21866666666666668
|
616 |
+
name: Cosine Recall@5
|
617 |
+
- type: cosine_recall@10
|
618 |
+
value: 0.30466666666666664
|
619 |
+
name: Cosine Recall@10
|
620 |
+
- type: cosine_ndcg@10
|
621 |
+
value: 0.28968259227673265
|
622 |
+
name: Cosine Ndcg@10
|
623 |
+
- type: cosine_mrr@10
|
624 |
+
value: 0.4286349206349206
|
625 |
+
name: Cosine Mrr@10
|
626 |
+
- type: cosine_map@100
|
627 |
+
value: 0.22985309744949503
|
628 |
+
name: Cosine Map@100
|
629 |
+
- task:
|
630 |
+
type: information-retrieval
|
631 |
+
name: Information Retrieval
|
632 |
+
dataset:
|
633 |
+
name: NanoArguAna
|
634 |
+
type: NanoArguAna
|
635 |
+
metrics:
|
636 |
+
- type: cosine_accuracy@1
|
637 |
+
value: 0.18
|
638 |
+
name: Cosine Accuracy@1
|
639 |
+
- type: cosine_accuracy@3
|
640 |
+
value: 0.56
|
641 |
+
name: Cosine Accuracy@3
|
642 |
+
- type: cosine_accuracy@5
|
643 |
+
value: 0.62
|
644 |
+
name: Cosine Accuracy@5
|
645 |
+
- type: cosine_accuracy@10
|
646 |
+
value: 0.84
|
647 |
+
name: Cosine Accuracy@10
|
648 |
+
- type: cosine_precision@1
|
649 |
+
value: 0.18
|
650 |
+
name: Cosine Precision@1
|
651 |
+
- type: cosine_precision@3
|
652 |
+
value: 0.18666666666666668
|
653 |
+
name: Cosine Precision@3
|
654 |
+
- type: cosine_precision@5
|
655 |
+
value: 0.124
|
656 |
+
name: Cosine Precision@5
|
657 |
+
- type: cosine_precision@10
|
658 |
+
value: 0.08399999999999999
|
659 |
+
name: Cosine Precision@10
|
660 |
+
- type: cosine_recall@1
|
661 |
+
value: 0.18
|
662 |
+
name: Cosine Recall@1
|
663 |
+
- type: cosine_recall@3
|
664 |
+
value: 0.56
|
665 |
+
name: Cosine Recall@3
|
666 |
+
- type: cosine_recall@5
|
667 |
+
value: 0.62
|
668 |
+
name: Cosine Recall@5
|
669 |
+
- type: cosine_recall@10
|
670 |
+
value: 0.84
|
671 |
+
name: Cosine Recall@10
|
672 |
+
- type: cosine_ndcg@10
|
673 |
+
value: 0.49726259302609505
|
674 |
+
name: Cosine Ndcg@10
|
675 |
+
- type: cosine_mrr@10
|
676 |
+
value: 0.389079365079365
|
677 |
+
name: Cosine Mrr@10
|
678 |
+
- type: cosine_map@100
|
679 |
+
value: 0.3967117258845785
|
680 |
+
name: Cosine Map@100
|
681 |
+
- task:
|
682 |
+
type: information-retrieval
|
683 |
+
name: Information Retrieval
|
684 |
+
dataset:
|
685 |
+
name: NanoSciFact
|
686 |
+
type: NanoSciFact
|
687 |
+
metrics:
|
688 |
+
- type: cosine_accuracy@1
|
689 |
+
value: 0.38
|
690 |
+
name: Cosine Accuracy@1
|
691 |
+
- type: cosine_accuracy@3
|
692 |
+
value: 0.46
|
693 |
+
name: Cosine Accuracy@3
|
694 |
+
- type: cosine_accuracy@5
|
695 |
+
value: 0.48
|
696 |
+
name: Cosine Accuracy@5
|
697 |
+
- type: cosine_accuracy@10
|
698 |
+
value: 0.62
|
699 |
+
name: Cosine Accuracy@10
|
700 |
+
- type: cosine_precision@1
|
701 |
+
value: 0.38
|
702 |
+
name: Cosine Precision@1
|
703 |
+
- type: cosine_precision@3
|
704 |
+
value: 0.16666666666666663
|
705 |
+
name: Cosine Precision@3
|
706 |
+
- type: cosine_precision@5
|
707 |
+
value: 0.10400000000000001
|
708 |
+
name: Cosine Precision@5
|
709 |
+
- type: cosine_precision@10
|
710 |
+
value: 0.068
|
711 |
+
name: Cosine Precision@10
|
712 |
+
- type: cosine_recall@1
|
713 |
+
value: 0.345
|
714 |
+
name: Cosine Recall@1
|
715 |
+
- type: cosine_recall@3
|
716 |
+
value: 0.44
|
717 |
+
name: Cosine Recall@3
|
718 |
+
- type: cosine_recall@5
|
719 |
+
value: 0.46
|
720 |
+
name: Cosine Recall@5
|
721 |
+
- type: cosine_recall@10
|
722 |
+
value: 0.605
|
723 |
+
name: Cosine Recall@10
|
724 |
+
- type: cosine_ndcg@10
|
725 |
+
value: 0.47012843706683605
|
726 |
+
name: Cosine Ndcg@10
|
727 |
+
- type: cosine_mrr@10
|
728 |
+
value: 0.4409285714285714
|
729 |
+
name: Cosine Mrr@10
|
730 |
+
- type: cosine_map@100
|
731 |
+
value: 0.43840522432574647
|
732 |
+
name: Cosine Map@100
|
733 |
+
- task:
|
734 |
+
type: information-retrieval
|
735 |
+
name: Information Retrieval
|
736 |
+
dataset:
|
737 |
+
name: NanoTouche2020
|
738 |
+
type: NanoTouche2020
|
739 |
+
metrics:
|
740 |
+
- type: cosine_accuracy@1
|
741 |
+
value: 0.5306122448979592
|
742 |
+
name: Cosine Accuracy@1
|
743 |
+
- type: cosine_accuracy@3
|
744 |
+
value: 0.7551020408163265
|
745 |
+
name: Cosine Accuracy@3
|
746 |
+
- type: cosine_accuracy@5
|
747 |
+
value: 0.8571428571428571
|
748 |
+
name: Cosine Accuracy@5
|
749 |
+
- type: cosine_accuracy@10
|
750 |
+
value: 0.9387755102040817
|
751 |
+
name: Cosine Accuracy@10
|
752 |
+
- type: cosine_precision@1
|
753 |
+
value: 0.5306122448979592
|
754 |
+
name: Cosine Precision@1
|
755 |
+
- type: cosine_precision@3
|
756 |
+
value: 0.45578231292517
|
757 |
+
name: Cosine Precision@3
|
758 |
+
- type: cosine_precision@5
|
759 |
+
value: 0.4040816326530612
|
760 |
+
name: Cosine Precision@5
|
761 |
+
- type: cosine_precision@10
|
762 |
+
value: 0.336734693877551
|
763 |
+
name: Cosine Precision@10
|
764 |
+
- type: cosine_recall@1
|
765 |
+
value: 0.03881638827876476
|
766 |
+
name: Cosine Recall@1
|
767 |
+
- type: cosine_recall@3
|
768 |
+
value: 0.10008002766114979
|
769 |
+
name: Cosine Recall@3
|
770 |
+
- type: cosine_recall@5
|
771 |
+
value: 0.13975964122053652
|
772 |
+
name: Cosine Recall@5
|
773 |
+
- type: cosine_recall@10
|
774 |
+
value: 0.22966349775526734
|
775 |
+
name: Cosine Recall@10
|
776 |
+
- type: cosine_ndcg@10
|
777 |
+
value: 0.39339080810676896
|
778 |
+
name: Cosine Ndcg@10
|
779 |
+
- type: cosine_mrr@10
|
780 |
+
value: 0.6553206997084549
|
781 |
+
name: Cosine Mrr@10
|
782 |
+
- type: cosine_map@100
|
783 |
+
value: 0.31344772891929434
|
784 |
+
name: Cosine Map@100
|
785 |
+
- task:
|
786 |
+
type: nano-beir
|
787 |
+
name: Nano BEIR
|
788 |
+
dataset:
|
789 |
+
name: NanoBEIR mean
|
790 |
+
type: NanoBEIR_mean
|
791 |
+
metrics:
|
792 |
+
- type: cosine_accuracy@1
|
793 |
+
value: 0.3408163265306122
|
794 |
+
name: Cosine Accuracy@1
|
795 |
+
- type: cosine_accuracy@3
|
796 |
+
value: 0.5227001569858712
|
797 |
+
name: Cosine Accuracy@3
|
798 |
+
- type: cosine_accuracy@5
|
799 |
+
value: 0.6013186813186814
|
800 |
+
name: Cosine Accuracy@5
|
801 |
+
- type: cosine_accuracy@10
|
802 |
+
value: 0.7152904238618524
|
803 |
+
name: Cosine Accuracy@10
|
804 |
+
- type: cosine_precision@1
|
805 |
+
value: 0.3408163265306122
|
806 |
+
name: Cosine Precision@1
|
807 |
+
- type: cosine_precision@3
|
808 |
+
value: 0.23044479330193612
|
809 |
+
name: Cosine Precision@3
|
810 |
+
- type: cosine_precision@5
|
811 |
+
value: 0.1855447409733124
|
812 |
+
name: Cosine Precision@5
|
813 |
+
- type: cosine_precision@10
|
814 |
+
value: 0.13344113029827318
|
815 |
+
name: Cosine Precision@10
|
816 |
+
- type: cosine_recall@1
|
817 |
+
value: 0.18442678521033212
|
818 |
+
name: Cosine Recall@1
|
819 |
+
- type: cosine_recall@3
|
820 |
+
value: 0.31958052337482684
|
821 |
+
name: Cosine Recall@3
|
822 |
+
- type: cosine_recall@5
|
823 |
+
value: 0.3827680868002465
|
824 |
+
name: Cosine Recall@5
|
825 |
+
- type: cosine_recall@10
|
826 |
+
value: 0.4886833850587655
|
827 |
+
name: Cosine Recall@10
|
828 |
+
- type: cosine_ndcg@10
|
829 |
+
value: 0.4066287047188099
|
830 |
+
name: Cosine Ndcg@10
|
831 |
+
- type: cosine_mrr@10
|
832 |
+
value: 0.4531247913084647
|
833 |
+
name: Cosine Mrr@10
|
834 |
+
- type: cosine_map@100
|
835 |
+
value: 0.33618027996100497
|
836 |
+
name: Cosine Map@100
|
837 |
+
---
|
838 |
+
|
839 |
+
# MPNet base trained on Natural Questions pairs
|
840 |
+
|
841 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) on the [gooaq-hard-negatives](https://huggingface.co/datasets/tomaarsen/gooaq-hard-negatives) dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
842 |
+
|
843 |
+
## Model Details
|
844 |
+
|
845 |
+
### Model Description
|
846 |
+
- **Model Type:** Sentence Transformer
|
847 |
+
- **Base model:** [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) <!-- at revision 6996ce1e91bd2a9c7d7f61daec37463394f73f09 -->
|
848 |
+
- **Maximum Sequence Length:** 512 tokens
|
849 |
+
- **Output Dimensionality:** 768 dimensions
|
850 |
+
- **Similarity Function:** Cosine Similarity
|
851 |
+
- **Training Dataset:**
|
852 |
+
- [gooaq-hard-negatives](https://huggingface.co/datasets/tomaarsen/gooaq-hard-negatives)
|
853 |
+
- **Language:** en
|
854 |
+
- **License:** apache-2.0
|
855 |
+
|
856 |
+
### Model Sources
|
857 |
+
|
858 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
859 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
860 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
861 |
+
|
862 |
+
### Full Model Architecture
|
863 |
+
|
864 |
+
```
|
865 |
+
SentenceTransformer(
|
866 |
+
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel
|
867 |
+
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
868 |
+
)
|
869 |
+
```
|
870 |
+
|
871 |
+
## Usage
|
872 |
+
|
873 |
+
### Direct Usage (Sentence Transformers)
|
874 |
+
|
875 |
+
First install the Sentence Transformers library:
|
876 |
+
|
877 |
+
```bash
|
878 |
+
pip install -U sentence-transformers
|
879 |
+
```
|
880 |
+
|
881 |
+
Then you can load this model and run inference.
|
882 |
+
```python
|
883 |
+
from sentence_transformers import SentenceTransformer
|
884 |
+
|
885 |
+
# Download from the 🤗 Hub
|
886 |
+
model = SentenceTransformer("tomaarsen/mpnet-base-nq-cgist-triplet-neg-gte")
|
887 |
+
# Run inference
|
888 |
+
sentences = [
|
889 |
+
'what energy is released when coal is burned?',
|
890 |
+
'When coal is burned, it reacts with the oxygen in the air. This chemical reaction converts the stored solar energy into thermal energy, which is released as heat. But it also produces carbon dioxide and methane.',
|
891 |
+
'When coal is burned it releases a number of airborne toxins and pollutants. They include mercury, lead, sulfur dioxide, nitrogen oxides, particulates, and various other heavy metals.',
|
892 |
+
]
|
893 |
+
embeddings = model.encode(sentences)
|
894 |
+
print(embeddings.shape)
|
895 |
+
# [3, 768]
|
896 |
+
|
897 |
+
# Get the similarity scores for the embeddings
|
898 |
+
similarities = model.similarity(embeddings, embeddings)
|
899 |
+
print(similarities.shape)
|
900 |
+
# [3, 3]
|
901 |
+
```
|
902 |
+
|
903 |
+
<!--
|
904 |
+
### Direct Usage (Transformers)
|
905 |
+
|
906 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
907 |
+
|
908 |
+
</details>
|
909 |
+
-->
|
910 |
+
|
911 |
+
<!--
|
912 |
+
### Downstream Usage (Sentence Transformers)
|
913 |
+
|
914 |
+
You can finetune this model on your own dataset.
|
915 |
+
|
916 |
+
<details><summary>Click to expand</summary>
|
917 |
+
|
918 |
+
</details>
|
919 |
+
-->
|
920 |
+
|
921 |
+
<!--
|
922 |
+
### Out-of-Scope Use
|
923 |
+
|
924 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
925 |
+
-->
|
926 |
+
|
927 |
+
## Evaluation
|
928 |
+
|
929 |
+
### Metrics
|
930 |
+
|
931 |
+
#### Information Retrieval
|
932 |
+
|
933 |
+
* Datasets: `NanoClimateFEVER`, `NanoDBPedia`, `NanoFEVER`, `NanoFiQA2018`, `NanoHotpotQA`, `NanoMSMARCO`, `NanoNFCorpus`, `NanoNQ`, `NanoQuoraRetrieval`, `NanoSCIDOCS`, `NanoArguAna`, `NanoSciFact` and `NanoTouche2020`
|
934 |
+
* Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
|
935 |
+
|
936 |
+
| Metric | NanoClimateFEVER | NanoDBPedia | NanoFEVER | NanoFiQA2018 | NanoHotpotQA | NanoMSMARCO | NanoNFCorpus | NanoNQ | NanoQuoraRetrieval | NanoSCIDOCS | NanoArguAna | NanoSciFact | NanoTouche2020 |
|
937 |
+
|:--------------------|:-----------------|:------------|:-----------|:-------------|:-------------|:------------|:-------------|:-----------|:-------------------|:------------|:------------|:------------|:---------------|
|
938 |
+
| cosine_accuracy@1 | 0.22 | 0.46 | 0.38 | 0.28 | 0.34 | 0.1 | 0.26 | 0.14 | 0.82 | 0.34 | 0.18 | 0.38 | 0.5306 |
|
939 |
+
| cosine_accuracy@3 | 0.44 | 0.62 | 0.54 | 0.5 | 0.52 | 0.28 | 0.38 | 0.36 | 0.9 | 0.48 | 0.56 | 0.46 | 0.7551 |
|
940 |
+
| cosine_accuracy@5 | 0.52 | 0.76 | 0.58 | 0.52 | 0.62 | 0.52 | 0.44 | 0.44 | 0.92 | 0.54 | 0.62 | 0.48 | 0.8571 |
|
941 |
+
| cosine_accuracy@10 | 0.72 | 0.82 | 0.68 | 0.58 | 0.72 | 0.68 | 0.5 | 0.58 | 0.96 | 0.66 | 0.84 | 0.62 | 0.9388 |
|
942 |
+
| cosine_precision@1 | 0.22 | 0.46 | 0.38 | 0.28 | 0.34 | 0.1 | 0.26 | 0.14 | 0.82 | 0.34 | 0.18 | 0.38 | 0.5306 |
|
943 |
+
| cosine_precision@3 | 0.1667 | 0.3867 | 0.18 | 0.22 | 0.1933 | 0.0933 | 0.2133 | 0.12 | 0.3667 | 0.2467 | 0.1867 | 0.1667 | 0.4558 |
|
944 |
+
| cosine_precision@5 | 0.12 | 0.388 | 0.12 | 0.164 | 0.144 | 0.104 | 0.196 | 0.088 | 0.244 | 0.212 | 0.124 | 0.104 | 0.4041 |
|
945 |
+
| cosine_precision@10 | 0.094 | 0.344 | 0.07 | 0.098 | 0.092 | 0.068 | 0.138 | 0.06 | 0.134 | 0.148 | 0.084 | 0.068 | 0.3367 |
|
946 |
+
| cosine_recall@1 | 0.0933 | 0.0307 | 0.37 | 0.1372 | 0.17 | 0.1 | 0.0112 | 0.13 | 0.7207 | 0.0707 | 0.18 | 0.345 | 0.0388 |
|
947 |
+
| cosine_recall@3 | 0.195 | 0.0773 | 0.52 | 0.3227 | 0.29 | 0.28 | 0.0205 | 0.34 | 0.8553 | 0.1537 | 0.56 | 0.44 | 0.1001 |
|
948 |
+
| cosine_recall@5 | 0.2333 | 0.1459 | 0.57 | 0.3682 | 0.36 | 0.52 | 0.0308 | 0.41 | 0.8993 | 0.2187 | 0.62 | 0.46 | 0.1398 |
|
949 |
+
| cosine_recall@10 | 0.3723 | 0.2216 | 0.66 | 0.4307 | 0.46 | 0.68 | 0.0422 | 0.55 | 0.9567 | 0.3047 | 0.84 | 0.605 | 0.2297 |
|
950 |
+
| **cosine_ndcg@10** | **0.2744** | **0.3921** | **0.5157** | **0.342** | **0.3723** | **0.3608** | **0.1655** | **0.3322** | **0.8807** | **0.2897** | **0.4973** | **0.4701** | **0.3934** |
|
951 |
+
| cosine_mrr@10 | 0.3594 | 0.567 | 0.4757 | 0.3841 | 0.4571 | 0.2616 | 0.3367 | 0.2734 | 0.8617 | 0.4286 | 0.3891 | 0.4409 | 0.6553 |
|
952 |
+
| cosine_map@100 | 0.2018 | 0.2815 | 0.4762 | 0.2826 | 0.2995 | 0.2722 | 0.049 | 0.2765 | 0.8526 | 0.2299 | 0.3967 | 0.4384 | 0.3134 |
|
953 |
+
|
954 |
+
#### Nano BEIR
|
955 |
+
|
956 |
+
* Dataset: `NanoBEIR_mean`
|
957 |
+
* Evaluated with [<code>NanoBEIREvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.NanoBEIREvaluator)
|
958 |
+
|
959 |
+
| Metric | Value |
|
960 |
+
|:--------------------|:-----------|
|
961 |
+
| cosine_accuracy@1 | 0.3408 |
|
962 |
+
| cosine_accuracy@3 | 0.5227 |
|
963 |
+
| cosine_accuracy@5 | 0.6013 |
|
964 |
+
| cosine_accuracy@10 | 0.7153 |
|
965 |
+
| cosine_precision@1 | 0.3408 |
|
966 |
+
| cosine_precision@3 | 0.2304 |
|
967 |
+
| cosine_precision@5 | 0.1855 |
|
968 |
+
| cosine_precision@10 | 0.1334 |
|
969 |
+
| cosine_recall@1 | 0.1844 |
|
970 |
+
| cosine_recall@3 | 0.3196 |
|
971 |
+
| cosine_recall@5 | 0.3828 |
|
972 |
+
| cosine_recall@10 | 0.4887 |
|
973 |
+
| **cosine_ndcg@10** | **0.4066** |
|
974 |
+
| cosine_mrr@10 | 0.4531 |
|
975 |
+
| cosine_map@100 | 0.3362 |
|
976 |
+
|
977 |
+
<!--
|
978 |
+
## Bias, Risks and Limitations
|
979 |
+
|
980 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
981 |
+
-->
|
982 |
+
|
983 |
+
<!--
|
984 |
+
### Recommendations
|
985 |
+
|
986 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
987 |
+
-->
|
988 |
+
|
989 |
+
## Training Details
|
990 |
+
|
991 |
+
### Training Dataset
|
992 |
+
|
993 |
+
#### gooaq-hard-negatives
|
994 |
+
|
995 |
+
* Dataset: [gooaq-hard-negatives](https://huggingface.co/datasets/tomaarsen/gooaq-hard-negatives) at [87594a1](https://huggingface.co/datasets/tomaarsen/gooaq-hard-negatives/tree/87594a1e6c58e88b5843afa9da3a97ffd75d01c2)
|
996 |
+
* Size: 50,000 training samples
|
997 |
+
* Columns: <code>question</code>, <code>answer</code>, and <code>negative</code>
|
998 |
+
* Approximate statistics based on the first 1000 samples:
|
999 |
+
| | question | answer | negative |
|
1000 |
+
|:--------|:----------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|
|
1001 |
+
| type | string | string | string |
|
1002 |
+
| details | <ul><li>min: 8 tokens</li><li>mean: 11.53 tokens</li><li>max: 28 tokens</li></ul> | <ul><li>min: 14 tokens</li><li>mean: 59.79 tokens</li><li>max: 150 tokens</li></ul> | <ul><li>min: 15 tokens</li><li>mean: 58.76 tokens</li><li>max: 143 tokens</li></ul> |
|
1003 |
+
* Samples:
|
1004 |
+
| question | answer | negative |
|
1005 |
+
|:--------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
1006 |
+
| <code>what is the difference between calories from fat and total fat?</code> | <code>Fat has more than twice as many calories per gram as carbohydrates and proteins. A gram of fat has about 9 calories, while a gram of carbohydrate or protein has about 4 calories. In other words, you could eat twice as much carbohydrates or proteins as fat for the same amount of calories.</code> | <code>Fat has more than twice as many calories per gram as carbohydrates and proteins. A gram of fat has about 9 calories, while a gram of carbohydrate or protein has about 4 calories. In other words, you could eat twice as much carbohydrates or proteins as fat for the same amount of calories.</code> |
|
1007 |
+
| <code>what is the difference between return transcript and account transcript?</code> | <code>A tax return transcript usually meets the needs of lending institutions offering mortgages and student loans. ... Tax Account Transcript - shows basic data such as return type, marital status, adjusted gross income, taxable income and all payment types. It also shows changes made after you filed your original return.</code> | <code>Trial balance is not a financial statement whereas a balance sheet is a financial statement. Trial balance is solely used for internal purposes whereas a balance sheet is used for purposes other than internal i.e. external. In a trial balance, each and every account is divided into debit (dr.) and credit (cr.)</code> |
|
1008 |
+
| <code>how long does my dog need to fast before sedation?</code> | <code>Now, guidelines are aimed towards 6-8 hours before surgery. This pre-op fasting time is much more beneficial for your pets because you have enough food in there to neutralize the stomach acid, preventing it from coming up the esophagus that causes regurgitation under anesthetic.</code> | <code>Try not to let your pooch rapidly wolf down his/her food! Do not let the dog play or exercise (e.g. go for a walk) for at least two hours after having a meal. Ensure continuous fresh water is available to avoid your pet gulping down a large amount after eating.</code> |
|
1009 |
+
* Loss: [<code>CachedGISTEmbedLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cachedgistembedloss) with these parameters:
|
1010 |
+
```json
|
1011 |
+
{'guide': SentenceTransformer(
|
1012 |
+
(0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel
|
1013 |
+
(1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
1014 |
+
(2): Normalize()
|
1015 |
+
), 'temperature': 0.01}
|
1016 |
+
```
|
1017 |
+
|
1018 |
+
### Evaluation Dataset
|
1019 |
+
|
1020 |
+
#### gooaq-hard-negatives
|
1021 |
+
|
1022 |
+
* Dataset: [gooaq-hard-negatives](https://huggingface.co/datasets/tomaarsen/gooaq-hard-negatives) at [87594a1](https://huggingface.co/datasets/tomaarsen/gooaq-hard-negatives/tree/87594a1e6c58e88b5843afa9da3a97ffd75d01c2)
|
1023 |
+
* Size: 10,048,700 evaluation samples
|
1024 |
+
* Columns: <code>question</code>, <code>answer</code>, and <code>negative</code>
|
1025 |
+
* Approximate statistics based on the first 1000 samples:
|
1026 |
+
| | question | answer | negative |
|
1027 |
+
|:--------|:----------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|
|
1028 |
+
| type | string | string | string |
|
1029 |
+
| details | <ul><li>min: 8 tokens</li><li>mean: 11.61 tokens</li><li>max: 21 tokens</li></ul> | <ul><li>min: 16 tokens</li><li>mean: 58.16 tokens</li><li>max: 131 tokens</li></ul> | <ul><li>min: 14 tokens</li><li>mean: 57.98 tokens</li><li>max: 157 tokens</li></ul> |
|
1030 |
+
* Samples:
|
1031 |
+
| question | answer | negative |
|
1032 |
+
|:--------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
1033 |
+
| <code>how is height width and length written?</code> | <code>The Graphics' industry standard is width by height (width x height). Meaning that when you write your measurements, you write them from your point of view, beginning with the width.</code> | <code>The Graphics' industry standard is width by height (width x height). Meaning that when you write your measurements, you write them from your point of view, beginning with the width. That's important.</code> |
|
1034 |
+
| <code>what is the difference between pork shoulder and loin?</code> | <code>All the recipes I've found for pulled pork recommends a shoulder/butt. Shoulders take longer to cook than a loin, because they're tougher. Loins are lean, while shoulders have marbled fat inside.</code> | <code>They are extracted from the loin, which runs from the hip to the shoulder, and it has a small strip of meat called the tenderloin. Unlike other pork, this pork chop is cut from four major sections, which are the shoulder, also known as the blade chops, ribs chops, loin chops, and the last, which is the sirloin chops.</code> |
|
1035 |
+
| <code>is the yin yang symbol religious?</code> | <code>The ubiquitous yin-yang symbol holds its roots in Taoism/Daoism, a Chinese religion and philosophy. The yin, the dark swirl, is associated with shadows, femininity, and the trough of a wave; the yang, the light swirl, represents brightness, passion and growth.</code> | <code>Yin energy is in the calm colors around you, in the soft music, in the soothing sound of a water fountain, or the relaxing images of water. Yang (active energy) is the feng shui energy expressed in strong, vibrant sounds and colors, bright lights, upward moving energy, tall plants, etc.</code> |
|
1036 |
+
* Loss: [<code>CachedGISTEmbedLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cachedgistembedloss) with these parameters:
|
1037 |
+
```json
|
1038 |
+
{'guide': SentenceTransformer(
|
1039 |
+
(0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel
|
1040 |
+
(1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
1041 |
+
(2): Normalize()
|
1042 |
+
), 'temperature': 0.01}
|
1043 |
+
```
|
1044 |
+
|
1045 |
+
### Training Hyperparameters
|
1046 |
+
#### Non-Default Hyperparameters
|
1047 |
+
|
1048 |
+
- `eval_strategy`: steps
|
1049 |
+
- `per_device_train_batch_size`: 2048
|
1050 |
+
- `per_device_eval_batch_size`: 2048
|
1051 |
+
- `learning_rate`: 2e-05
|
1052 |
+
- `num_train_epochs`: 1
|
1053 |
+
- `warmup_ratio`: 0.1
|
1054 |
+
- `seed`: 12
|
1055 |
+
- `bf16`: True
|
1056 |
+
|
1057 |
+
#### All Hyperparameters
|
1058 |
+
<details><summary>Click to expand</summary>
|
1059 |
+
|
1060 |
+
- `overwrite_output_dir`: False
|
1061 |
+
- `do_predict`: False
|
1062 |
+
- `eval_strategy`: steps
|
1063 |
+
- `prediction_loss_only`: True
|
1064 |
+
- `per_device_train_batch_size`: 2048
|
1065 |
+
- `per_device_eval_batch_size`: 2048
|
1066 |
+
- `per_gpu_train_batch_size`: None
|
1067 |
+
- `per_gpu_eval_batch_size`: None
|
1068 |
+
- `gradient_accumulation_steps`: 1
|
1069 |
+
- `eval_accumulation_steps`: None
|
1070 |
+
- `torch_empty_cache_steps`: None
|
1071 |
+
- `learning_rate`: 2e-05
|
1072 |
+
- `weight_decay`: 0.0
|
1073 |
+
- `adam_beta1`: 0.9
|
1074 |
+
- `adam_beta2`: 0.999
|
1075 |
+
- `adam_epsilon`: 1e-08
|
1076 |
+
- `max_grad_norm`: 1.0
|
1077 |
+
- `num_train_epochs`: 1
|
1078 |
+
- `max_steps`: -1
|
1079 |
+
- `lr_scheduler_type`: linear
|
1080 |
+
- `lr_scheduler_kwargs`: {}
|
1081 |
+
- `warmup_ratio`: 0.1
|
1082 |
+
- `warmup_steps`: 0
|
1083 |
+
- `log_level`: passive
|
1084 |
+
- `log_level_replica`: warning
|
1085 |
+
- `log_on_each_node`: True
|
1086 |
+
- `logging_nan_inf_filter`: True
|
1087 |
+
- `save_safetensors`: True
|
1088 |
+
- `save_on_each_node`: False
|
1089 |
+
- `save_only_model`: False
|
1090 |
+
- `restore_callback_states_from_checkpoint`: False
|
1091 |
+
- `no_cuda`: False
|
1092 |
+
- `use_cpu`: False
|
1093 |
+
- `use_mps_device`: False
|
1094 |
+
- `seed`: 12
|
1095 |
+
- `data_seed`: None
|
1096 |
+
- `jit_mode_eval`: False
|
1097 |
+
- `use_ipex`: False
|
1098 |
+
- `bf16`: True
|
1099 |
+
- `fp16`: False
|
1100 |
+
- `fp16_opt_level`: O1
|
1101 |
+
- `half_precision_backend`: auto
|
1102 |
+
- `bf16_full_eval`: False
|
1103 |
+
- `fp16_full_eval`: False
|
1104 |
+
- `tf32`: None
|
1105 |
+
- `local_rank`: 0
|
1106 |
+
- `ddp_backend`: None
|
1107 |
+
- `tpu_num_cores`: None
|
1108 |
+
- `tpu_metrics_debug`: False
|
1109 |
+
- `debug`: []
|
1110 |
+
- `dataloader_drop_last`: False
|
1111 |
+
- `dataloader_num_workers`: 0
|
1112 |
+
- `dataloader_prefetch_factor`: None
|
1113 |
+
- `past_index`: -1
|
1114 |
+
- `disable_tqdm`: False
|
1115 |
+
- `remove_unused_columns`: True
|
1116 |
+
- `label_names`: None
|
1117 |
+
- `load_best_model_at_end`: False
|
1118 |
+
- `ignore_data_skip`: False
|
1119 |
+
- `fsdp`: []
|
1120 |
+
- `fsdp_min_num_params`: 0
|
1121 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
1122 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
1123 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
1124 |
+
- `deepspeed`: None
|
1125 |
+
- `label_smoothing_factor`: 0.0
|
1126 |
+
- `optim`: adamw_torch
|
1127 |
+
- `optim_args`: None
|
1128 |
+
- `adafactor`: False
|
1129 |
+
- `group_by_length`: False
|
1130 |
+
- `length_column_name`: length
|
1131 |
+
- `ddp_find_unused_parameters`: None
|
1132 |
+
- `ddp_bucket_cap_mb`: None
|
1133 |
+
- `ddp_broadcast_buffers`: False
|
1134 |
+
- `dataloader_pin_memory`: True
|
1135 |
+
- `dataloader_persistent_workers`: False
|
1136 |
+
- `skip_memory_metrics`: True
|
1137 |
+
- `use_legacy_prediction_loop`: False
|
1138 |
+
- `push_to_hub`: False
|
1139 |
+
- `resume_from_checkpoint`: None
|
1140 |
+
- `hub_model_id`: None
|
1141 |
+
- `hub_strategy`: every_save
|
1142 |
+
- `hub_private_repo`: False
|
1143 |
+
- `hub_always_push`: False
|
1144 |
+
- `gradient_checkpointing`: False
|
1145 |
+
- `gradient_checkpointing_kwargs`: None
|
1146 |
+
- `include_inputs_for_metrics`: False
|
1147 |
+
- `include_for_metrics`: []
|
1148 |
+
- `eval_do_concat_batches`: True
|
1149 |
+
- `fp16_backend`: auto
|
1150 |
+
- `push_to_hub_model_id`: None
|
1151 |
+
- `push_to_hub_organization`: None
|
1152 |
+
- `mp_parameters`:
|
1153 |
+
- `auto_find_batch_size`: False
|
1154 |
+
- `full_determinism`: False
|
1155 |
+
- `torchdynamo`: None
|
1156 |
+
- `ray_scope`: last
|
1157 |
+
- `ddp_timeout`: 1800
|
1158 |
+
- `torch_compile`: False
|
1159 |
+
- `torch_compile_backend`: None
|
1160 |
+
- `torch_compile_mode`: None
|
1161 |
+
- `dispatch_batches`: None
|
1162 |
+
- `split_batches`: None
|
1163 |
+
- `include_tokens_per_second`: False
|
1164 |
+
- `include_num_input_tokens_seen`: False
|
1165 |
+
- `neftune_noise_alpha`: None
|
1166 |
+
- `optim_target_modules`: None
|
1167 |
+
- `batch_eval_metrics`: False
|
1168 |
+
- `eval_on_start`: False
|
1169 |
+
- `use_liger_kernel`: False
|
1170 |
+
- `eval_use_gather_object`: False
|
1171 |
+
- `average_tokens_across_devices`: False
|
1172 |
+
- `prompts`: None
|
1173 |
+
- `batch_sampler`: batch_sampler
|
1174 |
+
- `multi_dataset_batch_sampler`: proportional
|
1175 |
+
|
1176 |
+
</details>
|
1177 |
+
|
1178 |
+
### Training Logs
|
1179 |
+
| Epoch | Step | Training Loss | Validation Loss | NanoClimateFEVER_cosine_ndcg@10 | NanoDBPedia_cosine_ndcg@10 | NanoFEVER_cosine_ndcg@10 | NanoFiQA2018_cosine_ndcg@10 | NanoHotpotQA_cosine_ndcg@10 | NanoMSMARCO_cosine_ndcg@10 | NanoNFCorpus_cosine_ndcg@10 | NanoNQ_cosine_ndcg@10 | NanoQuoraRetrieval_cosine_ndcg@10 | NanoSCIDOCS_cosine_ndcg@10 | NanoArguAna_cosine_ndcg@10 | NanoSciFact_cosine_ndcg@10 | NanoTouche2020_cosine_ndcg@10 | NanoBEIR_mean_cosine_ndcg@10 |
|
1180 |
+
|:-----:|:----:|:-------------:|:---------------:|:-------------------------------:|:--------------------------:|:------------------------:|:---------------------------:|:---------------------------:|:--------------------------:|:---------------------------:|:---------------------:|:---------------------------------:|:--------------------------:|:--------------------------:|:--------------------------:|:-----------------------------:|:----------------------------:|
|
1181 |
+
| 0.04 | 1 | 11.5141 | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - |
|
1182 |
+
| 0.2 | 5 | 9.4407 | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - |
|
1183 |
+
| 0.4 | 10 | 5.6005 | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - |
|
1184 |
+
| 0.6 | 15 | 3.7323 | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - |
|
1185 |
+
| 0.8 | 20 | 2.7976 | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - |
|
1186 |
+
| 1.0 | 25 | 2.1899 | 1.3429 | 0.2744 | 0.3921 | 0.5157 | 0.3420 | 0.3723 | 0.3608 | 0.1655 | 0.3322 | 0.8807 | 0.2897 | 0.4973 | 0.4701 | 0.3934 | 0.4066 |
|
1187 |
+
|
1188 |
+
|
1189 |
+
### Environmental Impact
|
1190 |
+
Carbon emissions were measured using [CodeCarbon](https://github.com/mlco2/codecarbon).
|
1191 |
+
- **Energy Consumed**: 0.104 kWh
|
1192 |
+
- **Carbon Emitted**: 0.041 kg of CO2
|
1193 |
+
- **Hours Used**: 0.301 hours
|
1194 |
+
|
1195 |
+
### Training Hardware
|
1196 |
+
- **On Cloud**: No
|
1197 |
+
- **GPU Model**: 1 x NVIDIA GeForce RTX 3090
|
1198 |
+
- **CPU Model**: 13th Gen Intel(R) Core(TM) i7-13700K
|
1199 |
+
- **RAM Size**: 31.78 GB
|
1200 |
+
|
1201 |
+
### Framework Versions
|
1202 |
+
- Python: 3.11.6
|
1203 |
+
- Sentence Transformers: 3.4.0.dev0
|
1204 |
+
- Transformers: 4.46.2
|
1205 |
+
- PyTorch: 2.5.0+cu121
|
1206 |
+
- Accelerate: 0.35.0.dev0
|
1207 |
+
- Datasets: 2.20.0
|
1208 |
+
- Tokenizers: 0.20.3
|
1209 |
+
|
1210 |
+
## Citation
|
1211 |
+
|
1212 |
+
### BibTeX
|
1213 |
+
|
1214 |
+
#### Sentence Transformers
|
1215 |
+
```bibtex
|
1216 |
+
@inproceedings{reimers-2019-sentence-bert,
|
1217 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
1218 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
1219 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
1220 |
+
month = "11",
|
1221 |
+
year = "2019",
|
1222 |
+
publisher = "Association for Computational Linguistics",
|
1223 |
+
url = "https://arxiv.org/abs/1908.10084",
|
1224 |
+
}
|
1225 |
+
```
|
1226 |
+
|
1227 |
+
<!--
|
1228 |
+
## Glossary
|
1229 |
+
|
1230 |
+
*Clearly define terms in order to be accessible across audiences.*
|
1231 |
+
-->
|
1232 |
+
|
1233 |
+
<!--
|
1234 |
+
## Model Card Authors
|
1235 |
+
|
1236 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
1237 |
+
-->
|
1238 |
+
|
1239 |
+
<!--
|
1240 |
+
## Model Card Contact
|
1241 |
+
|
1242 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
1243 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "microsoft/mpnet-base",
|
3 |
+
"architectures": [
|
4 |
+
"MPNetModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-05,
|
15 |
+
"max_position_embeddings": 514,
|
16 |
+
"model_type": "mpnet",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 1,
|
20 |
+
"relative_attention_num_buckets": 32,
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.46.2",
|
23 |
+
"vocab_size": 30527
|
24 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.4.0.dev0",
|
4 |
+
"transformers": "4.46.2",
|
5 |
+
"pytorch": "2.5.0+cu121"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": "cosine"
|
10 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:88e220228e703bd50a9d05e113fceab3cb6fcdc6084b80b6b04e228c15001418
|
3 |
+
size 437967672
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": true,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "[UNK]",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": true,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"104": {
|
36 |
+
"content": "[UNK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
},
|
43 |
+
"30526": {
|
44 |
+
"content": "<mask>",
|
45 |
+
"lstrip": true,
|
46 |
+
"normalized": false,
|
47 |
+
"rstrip": false,
|
48 |
+
"single_word": false,
|
49 |
+
"special": true
|
50 |
+
}
|
51 |
+
},
|
52 |
+
"bos_token": "<s>",
|
53 |
+
"clean_up_tokenization_spaces": false,
|
54 |
+
"cls_token": "<s>",
|
55 |
+
"do_lower_case": true,
|
56 |
+
"eos_token": "</s>",
|
57 |
+
"mask_token": "<mask>",
|
58 |
+
"model_max_length": 512,
|
59 |
+
"pad_token": "<pad>",
|
60 |
+
"sep_token": "</s>",
|
61 |
+
"strip_accents": null,
|
62 |
+
"tokenize_chinese_chars": true,
|
63 |
+
"tokenizer_class": "MPNetTokenizer",
|
64 |
+
"unk_token": "[UNK]"
|
65 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|