Add new SentenceTransformer model.
Browse files- 1_Pooling/config.json +10 -0
- README.md +941 -0
- config.json +58 -0
- config_sentence_transformers.json +10 -0
- model.safetensors +3 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +55 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,941 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
license: apache-2.0
|
5 |
+
library_name: sentence-transformers
|
6 |
+
tags:
|
7 |
+
- sentence-transformers
|
8 |
+
- sentence-similarity
|
9 |
+
- feature-extraction
|
10 |
+
- generated_from_trainer
|
11 |
+
- dataset_size:1453
|
12 |
+
- loss:MatryoshkaLoss
|
13 |
+
- loss:MultipleNegativesRankingLoss
|
14 |
+
base_model: nomic-ai/nomic-embed-text-v1.5
|
15 |
+
datasets: []
|
16 |
+
metrics:
|
17 |
+
- cosine_accuracy@1
|
18 |
+
- cosine_accuracy@3
|
19 |
+
- cosine_accuracy@5
|
20 |
+
- cosine_accuracy@10
|
21 |
+
- cosine_precision@1
|
22 |
+
- cosine_precision@3
|
23 |
+
- cosine_precision@5
|
24 |
+
- cosine_precision@10
|
25 |
+
- cosine_recall@1
|
26 |
+
- cosine_recall@3
|
27 |
+
- cosine_recall@5
|
28 |
+
- cosine_recall@10
|
29 |
+
- cosine_ndcg@10
|
30 |
+
- cosine_mrr@10
|
31 |
+
- cosine_map@100
|
32 |
+
widget:
|
33 |
+
- source_sentence: 'We therefore conducted a hospital based cross sectional study
|
34 |
+
involving 101 HCWs from two facilities in Kumasi, Ghana to assess the level of
|
35 |
+
preparedness of HCWs to respond to any possible EVD. METHODS: We administered
|
36 |
+
a face-to-face questionnaire using an adapted WHO (2015) and CDC (2014) Checklist
|
37 |
+
for Ebola Preparedness and assessed overall knowledge gaps, and preparedness of
|
38 |
+
the Ghanaian HCWs in selected health facilities of the Ashanti Region of Ghana
|
39 |
+
from October to December 2015. RESULTS: A total 92 (91.09%) HCWs indicated they
|
40 |
+
were not adequately trained to handle an EVD suspected case. Only 25.74% (n =
|
41 |
+
26) considered their facilities sufficiently equipped to handle and manage EVD
|
42 |
+
patients. When asked which disinfectant to use after attending to and caring for
|
43 |
+
a suspected patient with EVD, only 8.91% (n = 9) could correctly identify the
|
44 |
+
right disinfectant (χ(2) = 28.52, p = 0.001). CONCLUSION: Our study demonstrates
|
45 |
+
poor knowledge and ill preparedness and unwillingness of many HCWs to attend to
|
46 |
+
EVD. Beyond knowledge acquisition, there is the need for more training from time
|
47 |
+
to time to fully prepare HCWs to handle any possible EVD case. Text: During the
|
48 |
+
last outbreak of Ebola Virus Disease (EVD) and its consequential massive epidemic
|
49 |
+
with very high mortality [1] , many health systems and services in West Africa
|
50 |
+
were overwhelmed and disrupted.'
|
51 |
+
sentences:
|
52 |
+
- How many facilities believed they were adequately equipped to handle Ebla virus
|
53 |
+
disease?
|
54 |
+
- What developments have been made possible by the study of B-cell repertoire?
|
55 |
+
- Where does the NLRP3 inflammasome activate after a SARS-CoV infection?
|
56 |
+
- source_sentence: All influenza A pandemics since that time, and indeed almost all
|
57 |
+
cases of influenza A worldwide (except- ing human infections from avian Viruses
|
58 |
+
such as H5N1 and H7N7), have been caused by descendants of the 1918 Virus, including
|
59 |
+
“drifted” H1N1 Viruses and reassorted H2N2 and H3N2 Viruses. The latter are composed
|
60 |
+
of key genes from the 1918 Virus, updated by subsequently-incor— porated avian
|
61 |
+
influenza genes that code for novel surface *Armed Forces Institute of Pathology,
|
62 |
+
Rockville, Maryland, USA; and TNational Institutes of Health, Bethesda, Maryland,
|
63 |
+
USA proteins, making the 1918 Virus indeed the “mother” of all pandemics. In 1918,
|
64 |
+
the cause of human influenza and its links to avian and swine influenza were unknown.
|
65 |
+
Despite clinical and epidemiologic similarities to influenza pandemics of 1889,
|
66 |
+
1847, and even earlier, many questioned whether such an explosively fatal disease
|
67 |
+
could be influenza at all. That question did not begin to be resolved until the
|
68 |
+
1930s, when closely related influenza Viruses (now known to be H1N1 Viruses) were
|
69 |
+
isolated, first from pigs and shortly thereafter from humans. Seroepidemiologic
|
70 |
+
studies soon linked both of these viruses to the 1918 pandemic (8). Subsequent
|
71 |
+
research indicates that descendants of the 1918 Virus still persists enzootically
|
72 |
+
in pigs. They probably also circulated continuously in humans, undergoing gradual
|
73 |
+
antigenic drift and causing annual epidemics, until the 1950s.
|
74 |
+
sentences:
|
75 |
+
- What causes Q fever?
|
76 |
+
- What was the mean length of the sequenced read?
|
77 |
+
- When was it determined that the 1918 pandemic was caused by the H1N1 Influenza
|
78 |
+
virus?
|
79 |
+
- source_sentence: These results showed that CD3 + CD4 + T cells have obviously (P<0.01)
|
80 |
+
increased ( Figure 5B ), nevertheless the CD3 + CD8 + T cells remarkably (P<0.05)
|
81 |
+
declined ( Figure 5C ). After calculation, the ratio of CD4 + /CD8 + T cells increased
|
82 |
+
( Figure 5D ). This ratio could also further measure the immunity levels of piglets.
|
83 |
+
Cytokine IL-1β and IL-10 levels were determined to evaluate cellular immune responses
|
84 |
+
induced by B. subtilis-RC as shown in Figure 6A ,B. As we can see from the diagram,
|
85 |
+
significantly (P<0.01) higher IL-1β and IL-10 were produced after oral administration
|
86 |
+
with B. subtilis-RC than the other two groups. These all revealed that B. subtilis-RC
|
87 |
+
could stimulate cytokines release to mediate communication with and between cells
|
88 |
+
of the immune system, improving the mucosal immune response to PEDV infection.
|
89 |
+
The PEDV neutralizing antibodies were detected by PRNT assay. Oral administration
|
90 |
+
with B. subtilis-RC could effectively reduce the plaque-forming ability of PEDV
|
91 |
+
(P<0.01) compared with other two groups in Figure 7 .
|
92 |
+
sentences:
|
93 |
+
- Why are antibody epitope based peptide vaccines are no longer an active research
|
94 |
+
area?
|
95 |
+
- What is a conclusion of this study?
|
96 |
+
- What is an effective indicator of a vaccine's ability to generate an immune response?
|
97 |
+
- source_sentence: Many types of bacteriophage and engineered phage variants, including
|
98 |
+
filamentous phage, have been proposed for prophylactic use ex vivo in food safety,
|
99 |
+
either in the production pipeline (reviewed in Dalmasso et al., 2014) or for detection
|
100 |
+
of foodborne pathogens post-production (reviewed in Schmelcher and Loessner, 2014)
|
101 |
+
. Filamentous phage displaying a tetracysteine tag on pIII were used to detect
|
102 |
+
E. coli cells through staining with biarsenical dye . M13 phage functionalized
|
103 |
+
with metallic silver were highly bactericidal against E. coli and Staphylococcus
|
104 |
+
epidermidis . Biosensors based on surface plasmon resonance (Nanduri et al., 2007)
|
105 |
+
, piezoelectric transducers (Olsen et al., 2006) , linear dichroism (Pacheco-Gomez
|
106 |
+
et al., 2012) , and magnetoelastic sensor technology (Lakshmanan et al., 2007;
|
107 |
+
Huang et al., 2009) were devised using filamentous phage displaying scFv or conjugated
|
108 |
+
to whole IgG against E. coli, Listeria monocytogenes, Salmonella typhimurium,
|
109 |
+
and Bacillus anthracis with limits of detection on the order of 10 2 -10 6 bacterial
|
110 |
+
cells/mL. Proof of concept has been demonstrated for use of such phage-based biosensors
|
111 |
+
to detect bacterial contamination of live produce (Li et al., 2010b) and eggs
|
112 |
+
(Chai et al., 2012) . The filamentous phage particle is enclosed by a rod-like
|
113 |
+
protein capsid, ∼1000 nm long and 5 nm wide, made up almost entirely of overlapping
|
114 |
+
pVIII monomers, each of which lies ∼27 angstroms from its nearest neighbor and
|
115 |
+
exposes two amine groups as well as at least three carboxyl groups (Henry et al.,
|
116 |
+
2011) . The regularity of the phage pVIII lattice and its diversity of chemically
|
117 |
+
addressable groups make it an ideal scaffold for bioconjugation (Figure 3) . The
|
118 |
+
most commonly used approach is functionalization of amine groups with NHS esters
|
119 |
+
(van Houten et al., 2006 (van Houten et al., , 2010 Yacoby et al., 2006) , although
|
120 |
+
this can result in unwanted acylation of pIII and any displayed biomolecules.
|
121 |
+
sentences:
|
122 |
+
- What is the contrast with SARS-COV and MERS=COV?
|
123 |
+
- What is the structure of a filamentous phage particle?
|
124 |
+
- Why do treatment and management vary in efficacy?
|
125 |
+
- source_sentence: The monolayers were removed from their plastic surfaces and serially
|
126 |
+
passaged whenever they became confluent. Cells were plated out onto 96-well culture
|
127 |
+
plates for cytotoxicity and anti-influenza assays, and propagated at 37 °C in
|
128 |
+
an atmosphere of 5% CO 2 . The influenza strain A/Leningrad/134/17/1957 H2N2)
|
129 |
+
was purchased from National Control Institute of Veterinary Bioproducts and Pharmaceuticals
|
130 |
+
(Beijing, China). Virus was routinely grown on MDCK cells. The stock cultures
|
131 |
+
were prepared from supernatants of infected cells and stored at −80 °C. The cellular
|
132 |
+
toxicity of patchouli alcohol on MDCK cells was assessed by the MTT method. Briefly,
|
133 |
+
cells were seeded on a microtiter plate in the absence or presence of various
|
134 |
+
concentrations (20 µM -0.0098 µM) of patchouli alcohol (eight replicates) and
|
135 |
+
incubated at 37 °C in a humidified atmosphere of 5% CO 2 for 72 h. The supernatants
|
136 |
+
were discarded, washed with PBS twice and MTT reagent (5 mg/mL in PBS) was added
|
137 |
+
to each well. After incubation at 37 °C for 4 h, the supernatants were removed,
|
138 |
+
then 200 μL DMSO was added and incubated at 37 °C for another 30 min.
|
139 |
+
sentences:
|
140 |
+
- What can be a factor in using common vectors for the delivery of vaccines?
|
141 |
+
- ' What can some of the other activities of N have, be linked to?'
|
142 |
+
- What method was used to measure the inhibition of viral replication?
|
143 |
+
pipeline_tag: sentence-similarity
|
144 |
+
model-index:
|
145 |
+
- name: nomic-text-embed COVID QA Matryoshka test
|
146 |
+
results:
|
147 |
+
- task:
|
148 |
+
type: information-retrieval
|
149 |
+
name: Information Retrieval
|
150 |
+
dataset:
|
151 |
+
name: dim 768
|
152 |
+
type: dim_768
|
153 |
+
metrics:
|
154 |
+
- type: cosine_accuracy@1
|
155 |
+
value: 0.32098765432098764
|
156 |
+
name: Cosine Accuracy@1
|
157 |
+
- type: cosine_accuracy@3
|
158 |
+
value: 0.6049382716049383
|
159 |
+
name: Cosine Accuracy@3
|
160 |
+
- type: cosine_accuracy@5
|
161 |
+
value: 0.7222222222222222
|
162 |
+
name: Cosine Accuracy@5
|
163 |
+
- type: cosine_accuracy@10
|
164 |
+
value: 0.8580246913580247
|
165 |
+
name: Cosine Accuracy@10
|
166 |
+
- type: cosine_precision@1
|
167 |
+
value: 0.32098765432098764
|
168 |
+
name: Cosine Precision@1
|
169 |
+
- type: cosine_precision@3
|
170 |
+
value: 0.20164609053497942
|
171 |
+
name: Cosine Precision@3
|
172 |
+
- type: cosine_precision@5
|
173 |
+
value: 0.14444444444444443
|
174 |
+
name: Cosine Precision@5
|
175 |
+
- type: cosine_precision@10
|
176 |
+
value: 0.08580246913580246
|
177 |
+
name: Cosine Precision@10
|
178 |
+
- type: cosine_recall@1
|
179 |
+
value: 0.32098765432098764
|
180 |
+
name: Cosine Recall@1
|
181 |
+
- type: cosine_recall@3
|
182 |
+
value: 0.6049382716049383
|
183 |
+
name: Cosine Recall@3
|
184 |
+
- type: cosine_recall@5
|
185 |
+
value: 0.7222222222222222
|
186 |
+
name: Cosine Recall@5
|
187 |
+
- type: cosine_recall@10
|
188 |
+
value: 0.8580246913580247
|
189 |
+
name: Cosine Recall@10
|
190 |
+
- type: cosine_ndcg@10
|
191 |
+
value: 0.5726476297998092
|
192 |
+
name: Cosine Ndcg@10
|
193 |
+
- type: cosine_mrr@10
|
194 |
+
value: 0.4831545169508133
|
195 |
+
name: Cosine Mrr@10
|
196 |
+
- type: cosine_map@100
|
197 |
+
value: 0.4876624839192167
|
198 |
+
name: Cosine Map@100
|
199 |
+
- task:
|
200 |
+
type: information-retrieval
|
201 |
+
name: Information Retrieval
|
202 |
+
dataset:
|
203 |
+
name: dim 512
|
204 |
+
type: dim_512
|
205 |
+
metrics:
|
206 |
+
- type: cosine_accuracy@1
|
207 |
+
value: 0.3395061728395062
|
208 |
+
name: Cosine Accuracy@1
|
209 |
+
- type: cosine_accuracy@3
|
210 |
+
value: 0.6172839506172839
|
211 |
+
name: Cosine Accuracy@3
|
212 |
+
- type: cosine_accuracy@5
|
213 |
+
value: 0.691358024691358
|
214 |
+
name: Cosine Accuracy@5
|
215 |
+
- type: cosine_accuracy@10
|
216 |
+
value: 0.8395061728395061
|
217 |
+
name: Cosine Accuracy@10
|
218 |
+
- type: cosine_precision@1
|
219 |
+
value: 0.3395061728395062
|
220 |
+
name: Cosine Precision@1
|
221 |
+
- type: cosine_precision@3
|
222 |
+
value: 0.20576131687242796
|
223 |
+
name: Cosine Precision@3
|
224 |
+
- type: cosine_precision@5
|
225 |
+
value: 0.1382716049382716
|
226 |
+
name: Cosine Precision@5
|
227 |
+
- type: cosine_precision@10
|
228 |
+
value: 0.0839506172839506
|
229 |
+
name: Cosine Precision@10
|
230 |
+
- type: cosine_recall@1
|
231 |
+
value: 0.3395061728395062
|
232 |
+
name: Cosine Recall@1
|
233 |
+
- type: cosine_recall@3
|
234 |
+
value: 0.6172839506172839
|
235 |
+
name: Cosine Recall@3
|
236 |
+
- type: cosine_recall@5
|
237 |
+
value: 0.691358024691358
|
238 |
+
name: Cosine Recall@5
|
239 |
+
- type: cosine_recall@10
|
240 |
+
value: 0.8395061728395061
|
241 |
+
name: Cosine Recall@10
|
242 |
+
- type: cosine_ndcg@10
|
243 |
+
value: 0.5769674187028887
|
244 |
+
name: Cosine Ndcg@10
|
245 |
+
- type: cosine_mrr@10
|
246 |
+
value: 0.4942803252988438
|
247 |
+
name: Cosine Mrr@10
|
248 |
+
- type: cosine_map@100
|
249 |
+
value: 0.49996505521200235
|
250 |
+
name: Cosine Map@100
|
251 |
+
- task:
|
252 |
+
type: information-retrieval
|
253 |
+
name: Information Retrieval
|
254 |
+
dataset:
|
255 |
+
name: dim 256
|
256 |
+
type: dim_256
|
257 |
+
metrics:
|
258 |
+
- type: cosine_accuracy@1
|
259 |
+
value: 0.3148148148148148
|
260 |
+
name: Cosine Accuracy@1
|
261 |
+
- type: cosine_accuracy@3
|
262 |
+
value: 0.5864197530864198
|
263 |
+
name: Cosine Accuracy@3
|
264 |
+
- type: cosine_accuracy@5
|
265 |
+
value: 0.6604938271604939
|
266 |
+
name: Cosine Accuracy@5
|
267 |
+
- type: cosine_accuracy@10
|
268 |
+
value: 0.7901234567901234
|
269 |
+
name: Cosine Accuracy@10
|
270 |
+
- type: cosine_precision@1
|
271 |
+
value: 0.3148148148148148
|
272 |
+
name: Cosine Precision@1
|
273 |
+
- type: cosine_precision@3
|
274 |
+
value: 0.19547325102880658
|
275 |
+
name: Cosine Precision@3
|
276 |
+
- type: cosine_precision@5
|
277 |
+
value: 0.13209876543209875
|
278 |
+
name: Cosine Precision@5
|
279 |
+
- type: cosine_precision@10
|
280 |
+
value: 0.07901234567901234
|
281 |
+
name: Cosine Precision@10
|
282 |
+
- type: cosine_recall@1
|
283 |
+
value: 0.3148148148148148
|
284 |
+
name: Cosine Recall@1
|
285 |
+
- type: cosine_recall@3
|
286 |
+
value: 0.5864197530864198
|
287 |
+
name: Cosine Recall@3
|
288 |
+
- type: cosine_recall@5
|
289 |
+
value: 0.6604938271604939
|
290 |
+
name: Cosine Recall@5
|
291 |
+
- type: cosine_recall@10
|
292 |
+
value: 0.7901234567901234
|
293 |
+
name: Cosine Recall@10
|
294 |
+
- type: cosine_ndcg@10
|
295 |
+
value: 0.5454859667021819
|
296 |
+
name: Cosine Ndcg@10
|
297 |
+
- type: cosine_mrr@10
|
298 |
+
value: 0.46796492259455236
|
299 |
+
name: Cosine Mrr@10
|
300 |
+
- type: cosine_map@100
|
301 |
+
value: 0.4775435566293839
|
302 |
+
name: Cosine Map@100
|
303 |
+
- task:
|
304 |
+
type: information-retrieval
|
305 |
+
name: Information Retrieval
|
306 |
+
dataset:
|
307 |
+
name: dim 128
|
308 |
+
type: dim_128
|
309 |
+
metrics:
|
310 |
+
- type: cosine_accuracy@1
|
311 |
+
value: 0.2716049382716049
|
312 |
+
name: Cosine Accuracy@1
|
313 |
+
- type: cosine_accuracy@3
|
314 |
+
value: 0.5370370370370371
|
315 |
+
name: Cosine Accuracy@3
|
316 |
+
- type: cosine_accuracy@5
|
317 |
+
value: 0.654320987654321
|
318 |
+
name: Cosine Accuracy@5
|
319 |
+
- type: cosine_accuracy@10
|
320 |
+
value: 0.7283950617283951
|
321 |
+
name: Cosine Accuracy@10
|
322 |
+
- type: cosine_precision@1
|
323 |
+
value: 0.2716049382716049
|
324 |
+
name: Cosine Precision@1
|
325 |
+
- type: cosine_precision@3
|
326 |
+
value: 0.17901234567901234
|
327 |
+
name: Cosine Precision@3
|
328 |
+
- type: cosine_precision@5
|
329 |
+
value: 0.1308641975308642
|
330 |
+
name: Cosine Precision@5
|
331 |
+
- type: cosine_precision@10
|
332 |
+
value: 0.0728395061728395
|
333 |
+
name: Cosine Precision@10
|
334 |
+
- type: cosine_recall@1
|
335 |
+
value: 0.2716049382716049
|
336 |
+
name: Cosine Recall@1
|
337 |
+
- type: cosine_recall@3
|
338 |
+
value: 0.5370370370370371
|
339 |
+
name: Cosine Recall@3
|
340 |
+
- type: cosine_recall@5
|
341 |
+
value: 0.654320987654321
|
342 |
+
name: Cosine Recall@5
|
343 |
+
- type: cosine_recall@10
|
344 |
+
value: 0.7283950617283951
|
345 |
+
name: Cosine Recall@10
|
346 |
+
- type: cosine_ndcg@10
|
347 |
+
value: 0.4965852195530764
|
348 |
+
name: Cosine Ndcg@10
|
349 |
+
- type: cosine_mrr@10
|
350 |
+
value: 0.4220825984714875
|
351 |
+
name: Cosine Mrr@10
|
352 |
+
- type: cosine_map@100
|
353 |
+
value: 0.43352458189921866
|
354 |
+
name: Cosine Map@100
|
355 |
+
- task:
|
356 |
+
type: information-retrieval
|
357 |
+
name: Information Retrieval
|
358 |
+
dataset:
|
359 |
+
name: dim 64
|
360 |
+
type: dim_64
|
361 |
+
metrics:
|
362 |
+
- type: cosine_accuracy@1
|
363 |
+
value: 0.24074074074074073
|
364 |
+
name: Cosine Accuracy@1
|
365 |
+
- type: cosine_accuracy@3
|
366 |
+
value: 0.47530864197530864
|
367 |
+
name: Cosine Accuracy@3
|
368 |
+
- type: cosine_accuracy@5
|
369 |
+
value: 0.5864197530864198
|
370 |
+
name: Cosine Accuracy@5
|
371 |
+
- type: cosine_accuracy@10
|
372 |
+
value: 0.6728395061728395
|
373 |
+
name: Cosine Accuracy@10
|
374 |
+
- type: cosine_precision@1
|
375 |
+
value: 0.24074074074074073
|
376 |
+
name: Cosine Precision@1
|
377 |
+
- type: cosine_precision@3
|
378 |
+
value: 0.15843621399176952
|
379 |
+
name: Cosine Precision@3
|
380 |
+
- type: cosine_precision@5
|
381 |
+
value: 0.11728395061728394
|
382 |
+
name: Cosine Precision@5
|
383 |
+
- type: cosine_precision@10
|
384 |
+
value: 0.06728395061728394
|
385 |
+
name: Cosine Precision@10
|
386 |
+
- type: cosine_recall@1
|
387 |
+
value: 0.24074074074074073
|
388 |
+
name: Cosine Recall@1
|
389 |
+
- type: cosine_recall@3
|
390 |
+
value: 0.47530864197530864
|
391 |
+
name: Cosine Recall@3
|
392 |
+
- type: cosine_recall@5
|
393 |
+
value: 0.5864197530864198
|
394 |
+
name: Cosine Recall@5
|
395 |
+
- type: cosine_recall@10
|
396 |
+
value: 0.6728395061728395
|
397 |
+
name: Cosine Recall@10
|
398 |
+
- type: cosine_ndcg@10
|
399 |
+
value: 0.4508577703429953
|
400 |
+
name: Cosine Ndcg@10
|
401 |
+
- type: cosine_mrr@10
|
402 |
+
value: 0.3797864001567706
|
403 |
+
name: Cosine Mrr@10
|
404 |
+
- type: cosine_map@100
|
405 |
+
value: 0.39108804574508443
|
406 |
+
name: Cosine Map@100
|
407 |
+
---
|
408 |
+
|
409 |
+
# nomic-text-embed COVID QA Matryoshka test
|
410 |
+
|
411 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [nomic-ai/nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
412 |
+
|
413 |
+
## Model Details
|
414 |
+
|
415 |
+
### Model Description
|
416 |
+
- **Model Type:** Sentence Transformer
|
417 |
+
- **Base model:** [nomic-ai/nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) <!-- at revision b0753ae76394dd36bcfb912a46018088bca48be0 -->
|
418 |
+
- **Maximum Sequence Length:** 8192 tokens
|
419 |
+
- **Output Dimensionality:** 768 tokens
|
420 |
+
- **Similarity Function:** Cosine Similarity
|
421 |
+
<!-- - **Training Dataset:** Unknown -->
|
422 |
+
- **Language:** en
|
423 |
+
- **License:** apache-2.0
|
424 |
+
|
425 |
+
### Model Sources
|
426 |
+
|
427 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
428 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
429 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
430 |
+
|
431 |
+
### Full Model Architecture
|
432 |
+
|
433 |
+
```
|
434 |
+
SentenceTransformer(
|
435 |
+
(0): Transformer({'max_seq_length': 8192, 'do_lower_case': False}) with Transformer model: NomicBertModel
|
436 |
+
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
437 |
+
)
|
438 |
+
```
|
439 |
+
|
440 |
+
## Usage
|
441 |
+
|
442 |
+
### Direct Usage (Sentence Transformers)
|
443 |
+
|
444 |
+
First install the Sentence Transformers library:
|
445 |
+
|
446 |
+
```bash
|
447 |
+
pip install -U sentence-transformers
|
448 |
+
```
|
449 |
+
|
450 |
+
Then you can load this model and run inference.
|
451 |
+
```python
|
452 |
+
from sentence_transformers import SentenceTransformer
|
453 |
+
|
454 |
+
# Download from the 🤗 Hub
|
455 |
+
model = SentenceTransformer("JerryO3/test")
|
456 |
+
# Run inference
|
457 |
+
sentences = [
|
458 |
+
'The monolayers were removed from their plastic surfaces and serially passaged whenever they became confluent. Cells were plated out onto 96-well culture plates for cytotoxicity and anti-influenza assays, and propagated at 37 °C in an atmosphere of 5% CO 2 . The influenza strain A/Leningrad/134/17/1957 H2N2) was purchased from National Control Institute of Veterinary Bioproducts and Pharmaceuticals (Beijing, China). Virus was routinely grown on MDCK cells. The stock cultures were prepared from supernatants of infected cells and stored at −80 °C. The cellular toxicity of patchouli alcohol on MDCK cells was assessed by the MTT method. Briefly, cells were seeded on a microtiter plate in the absence or presence of various concentrations (20 µM -0.0098 µM) of patchouli alcohol (eight replicates) and incubated at 37 °C in a humidified atmosphere of 5% CO 2 for 72 h. The supernatants were discarded, washed with PBS twice and MTT reagent (5 mg/mL in PBS) was added to each well. After incubation at 37 °C for 4 h, the supernatants were removed, then 200 μL DMSO was added and incubated at 37 °C for another 30 min.',
|
459 |
+
'What method was used to measure the inhibition of viral replication?',
|
460 |
+
'What can be a factor in using common vectors for the delivery of vaccines?',
|
461 |
+
]
|
462 |
+
embeddings = model.encode(sentences)
|
463 |
+
print(embeddings.shape)
|
464 |
+
# [3, 768]
|
465 |
+
|
466 |
+
# Get the similarity scores for the embeddings
|
467 |
+
similarities = model.similarity(embeddings, embeddings)
|
468 |
+
print(similarities.shape)
|
469 |
+
# [3, 3]
|
470 |
+
```
|
471 |
+
|
472 |
+
<!--
|
473 |
+
### Direct Usage (Transformers)
|
474 |
+
|
475 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
476 |
+
|
477 |
+
</details>
|
478 |
+
-->
|
479 |
+
|
480 |
+
<!--
|
481 |
+
### Downstream Usage (Sentence Transformers)
|
482 |
+
|
483 |
+
You can finetune this model on your own dataset.
|
484 |
+
|
485 |
+
<details><summary>Click to expand</summary>
|
486 |
+
|
487 |
+
</details>
|
488 |
+
-->
|
489 |
+
|
490 |
+
<!--
|
491 |
+
### Out-of-Scope Use
|
492 |
+
|
493 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
494 |
+
-->
|
495 |
+
|
496 |
+
## Evaluation
|
497 |
+
|
498 |
+
### Metrics
|
499 |
+
|
500 |
+
#### Information Retrieval
|
501 |
+
* Dataset: `dim_768`
|
502 |
+
* Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
|
503 |
+
|
504 |
+
| Metric | Value |
|
505 |
+
|:--------------------|:-----------|
|
506 |
+
| cosine_accuracy@1 | 0.321 |
|
507 |
+
| cosine_accuracy@3 | 0.6049 |
|
508 |
+
| cosine_accuracy@5 | 0.7222 |
|
509 |
+
| cosine_accuracy@10 | 0.858 |
|
510 |
+
| cosine_precision@1 | 0.321 |
|
511 |
+
| cosine_precision@3 | 0.2016 |
|
512 |
+
| cosine_precision@5 | 0.1444 |
|
513 |
+
| cosine_precision@10 | 0.0858 |
|
514 |
+
| cosine_recall@1 | 0.321 |
|
515 |
+
| cosine_recall@3 | 0.6049 |
|
516 |
+
| cosine_recall@5 | 0.7222 |
|
517 |
+
| cosine_recall@10 | 0.858 |
|
518 |
+
| cosine_ndcg@10 | 0.5726 |
|
519 |
+
| cosine_mrr@10 | 0.4832 |
|
520 |
+
| **cosine_map@100** | **0.4877** |
|
521 |
+
|
522 |
+
#### Information Retrieval
|
523 |
+
* Dataset: `dim_512`
|
524 |
+
* Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
|
525 |
+
|
526 |
+
| Metric | Value |
|
527 |
+
|:--------------------|:--------|
|
528 |
+
| cosine_accuracy@1 | 0.3395 |
|
529 |
+
| cosine_accuracy@3 | 0.6173 |
|
530 |
+
| cosine_accuracy@5 | 0.6914 |
|
531 |
+
| cosine_accuracy@10 | 0.8395 |
|
532 |
+
| cosine_precision@1 | 0.3395 |
|
533 |
+
| cosine_precision@3 | 0.2058 |
|
534 |
+
| cosine_precision@5 | 0.1383 |
|
535 |
+
| cosine_precision@10 | 0.084 |
|
536 |
+
| cosine_recall@1 | 0.3395 |
|
537 |
+
| cosine_recall@3 | 0.6173 |
|
538 |
+
| cosine_recall@5 | 0.6914 |
|
539 |
+
| cosine_recall@10 | 0.8395 |
|
540 |
+
| cosine_ndcg@10 | 0.577 |
|
541 |
+
| cosine_mrr@10 | 0.4943 |
|
542 |
+
| **cosine_map@100** | **0.5** |
|
543 |
+
|
544 |
+
#### Information Retrieval
|
545 |
+
* Dataset: `dim_256`
|
546 |
+
* Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
|
547 |
+
|
548 |
+
| Metric | Value |
|
549 |
+
|:--------------------|:-----------|
|
550 |
+
| cosine_accuracy@1 | 0.3148 |
|
551 |
+
| cosine_accuracy@3 | 0.5864 |
|
552 |
+
| cosine_accuracy@5 | 0.6605 |
|
553 |
+
| cosine_accuracy@10 | 0.7901 |
|
554 |
+
| cosine_precision@1 | 0.3148 |
|
555 |
+
| cosine_precision@3 | 0.1955 |
|
556 |
+
| cosine_precision@5 | 0.1321 |
|
557 |
+
| cosine_precision@10 | 0.079 |
|
558 |
+
| cosine_recall@1 | 0.3148 |
|
559 |
+
| cosine_recall@3 | 0.5864 |
|
560 |
+
| cosine_recall@5 | 0.6605 |
|
561 |
+
| cosine_recall@10 | 0.7901 |
|
562 |
+
| cosine_ndcg@10 | 0.5455 |
|
563 |
+
| cosine_mrr@10 | 0.468 |
|
564 |
+
| **cosine_map@100** | **0.4775** |
|
565 |
+
|
566 |
+
#### Information Retrieval
|
567 |
+
* Dataset: `dim_128`
|
568 |
+
* Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
|
569 |
+
|
570 |
+
| Metric | Value |
|
571 |
+
|:--------------------|:-----------|
|
572 |
+
| cosine_accuracy@1 | 0.2716 |
|
573 |
+
| cosine_accuracy@3 | 0.537 |
|
574 |
+
| cosine_accuracy@5 | 0.6543 |
|
575 |
+
| cosine_accuracy@10 | 0.7284 |
|
576 |
+
| cosine_precision@1 | 0.2716 |
|
577 |
+
| cosine_precision@3 | 0.179 |
|
578 |
+
| cosine_precision@5 | 0.1309 |
|
579 |
+
| cosine_precision@10 | 0.0728 |
|
580 |
+
| cosine_recall@1 | 0.2716 |
|
581 |
+
| cosine_recall@3 | 0.537 |
|
582 |
+
| cosine_recall@5 | 0.6543 |
|
583 |
+
| cosine_recall@10 | 0.7284 |
|
584 |
+
| cosine_ndcg@10 | 0.4966 |
|
585 |
+
| cosine_mrr@10 | 0.4221 |
|
586 |
+
| **cosine_map@100** | **0.4335** |
|
587 |
+
|
588 |
+
#### Information Retrieval
|
589 |
+
* Dataset: `dim_64`
|
590 |
+
* Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
|
591 |
+
|
592 |
+
| Metric | Value |
|
593 |
+
|:--------------------|:-----------|
|
594 |
+
| cosine_accuracy@1 | 0.2407 |
|
595 |
+
| cosine_accuracy@3 | 0.4753 |
|
596 |
+
| cosine_accuracy@5 | 0.5864 |
|
597 |
+
| cosine_accuracy@10 | 0.6728 |
|
598 |
+
| cosine_precision@1 | 0.2407 |
|
599 |
+
| cosine_precision@3 | 0.1584 |
|
600 |
+
| cosine_precision@5 | 0.1173 |
|
601 |
+
| cosine_precision@10 | 0.0673 |
|
602 |
+
| cosine_recall@1 | 0.2407 |
|
603 |
+
| cosine_recall@3 | 0.4753 |
|
604 |
+
| cosine_recall@5 | 0.5864 |
|
605 |
+
| cosine_recall@10 | 0.6728 |
|
606 |
+
| cosine_ndcg@10 | 0.4509 |
|
607 |
+
| cosine_mrr@10 | 0.3798 |
|
608 |
+
| **cosine_map@100** | **0.3911** |
|
609 |
+
|
610 |
+
<!--
|
611 |
+
## Bias, Risks and Limitations
|
612 |
+
|
613 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
614 |
+
-->
|
615 |
+
|
616 |
+
<!--
|
617 |
+
### Recommendations
|
618 |
+
|
619 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
620 |
+
-->
|
621 |
+
|
622 |
+
## Training Details
|
623 |
+
|
624 |
+
### Training Dataset
|
625 |
+
|
626 |
+
#### Unnamed Dataset
|
627 |
+
|
628 |
+
|
629 |
+
* Size: 1,453 training samples
|
630 |
+
* Columns: <code>positive</code> and <code>anchor</code>
|
631 |
+
* Approximate statistics based on the first 1000 samples:
|
632 |
+
| | positive | anchor |
|
633 |
+
|:--------|:--------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
|
634 |
+
| type | string | string |
|
635 |
+
| details | <ul><li>min: 112 tokens</li><li>mean: 319.17 tokens</li><li>max: 778 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 14.84 tokens</li><li>max: 65 tokens</li></ul> |
|
636 |
+
* Samples:
|
637 |
+
| positive | anchor |
|
638 |
+
|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
639 |
+
| <code>We find that the slowing growth in daily reported deaths in Italy is consistent with a significant impact of interventions implemented several weeks earlier. In Italy, we estimate that the effective reproduction number, Rt, dropped to close to 1 around the time of Iockdown (11th March), although with a high level of uncertainty. Overall, we estimate that countries have managed to reduce their reproduction number. Our estimates have wide credible intervals and contain 1 for countries that have implemented a|| interventions considered in our analysis. This means that the reproduction number may be above or below this value. With current interventions remaining in place to at least the end of March, we estimate that interventions across all 11 countries will have averted 59,000 deaths up to 31 March [95% credible interval 21,000-120,000]. Many more deaths will be averted through ensuring that interventions remain in place until transmission drops to low levels. We estimate that, across all 11 countries between 7 and 43 million individuals have been infected with SARS-CoV-Z up to 28th March, representing between 1.88% and 11.43% ofthe population.</code> | <code>Approximately how many deaths have been averted in Western Europe with current non-pharmaceutical interventions remaining in place until the end of March?</code> |
|
640 |
+
| <code>[46] Where the biological samples are taken from also play a role in the sensitivity of these tests. For SARS-CoV and MERS-CoV, specimens collected from the lower respiratory tract such as sputum and tracheal aspirates have higher and more prolonged levels of viral RNA because of the tropism of the virus. MERS-CoV viral loads are also higher for severe cases and have longer viral shedding compared to mild cases. Although upper respiratory tract specimens such as nasopharyngeal or oropharyngeal swabs can be used, they have potentially lower viral loads and may have higher risk of false-negatives among the mild MERS and SARS cases [102, 103] , and likely among the 2019-nCoV cases. The existing practices in detecting genetic material of coronaviruses such as SARS-CoV and MERS-CoV include (a) reverse transcription-polymerase chain reaction (RT-PCR), (b) real-time RT-PCR (rRT-PCR), (c) reverse transcription loop-mediated isothermal amplification (RT-LAMP) and (d) real-time RT-LAMP [104] . Nucleic amplification tests (NAAT) are usually preferred as in the case of MERS-CoV diagnosis as it has the highest sensitivity at the earliest time point in the acute phase of infection [102] . Chinese health authorities have recently posted the full genome of 2019-nCoV in the GenBank and in GISAID portal to facilitate in the detection of the virus [11] . Several laboratory assays have been developed to detect the novel coronavirus in Wuhan, as highlighted in WHO's interim guidance on nCoV laboratory testing of suspected cases.</code> | <code>Why are Nucleic amplification tests (NAAT) usually preferred as in the case of MERS-CoV diagnosis?</code> |
|
641 |
+
| <code>By the time symptoms appear in HCPS, both strong antiviral responses, and, for the more virulent viral genotypes, viral RNA can be detected in blood plasma or nucleated blood cells respectively [63, 64] . At least three studies have correlated plasma viral RNA with disease severity for HCPS and HFRS, suggesting that the replication of the virus plays an ongoing and real-time role in viral pathogenesis [65] [66] [67] . Several hallmark pathologic changes have been identified that occur in both HFRS and HCPS. A critical feature of both is a transient (~ 1-5 days) capillary leak involving the kidney and retroperitoneal space in HFRS and the lungs in HCPS. The resulting leakage is exudative in character, with chemical composition high in protein and resembling plasma. The continued experience indicating the strong tissue tropism for endothelial cells, specifically, is among the several factors that make β3 integrin an especially attractive candidate as an important in vivo receptor for hantaviruses. It is likely that hantaviruses arrive at their target tissues through uptake by regional lymph nodes, perhaps with or within an escorting lung histiocyte. The virus seeds local endothelium, where the first few infected cells give rise, ultimately, to a primary viremia, a process that appears to take a long time for hantavirus infections [62, 63] .</code> | <code>Which is an especially attractive candidate as an important in vivo receptor for hantaviruses?</code> |
|
642 |
+
* Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters:
|
643 |
+
```json
|
644 |
+
{
|
645 |
+
"loss": "MultipleNegativesRankingLoss",
|
646 |
+
"matryoshka_dims": [
|
647 |
+
768,
|
648 |
+
512,
|
649 |
+
256,
|
650 |
+
128,
|
651 |
+
64
|
652 |
+
],
|
653 |
+
"matryoshka_weights": [
|
654 |
+
1,
|
655 |
+
1,
|
656 |
+
1,
|
657 |
+
1,
|
658 |
+
1
|
659 |
+
],
|
660 |
+
"n_dims_per_step": -1
|
661 |
+
}
|
662 |
+
```
|
663 |
+
|
664 |
+
### Training Hyperparameters
|
665 |
+
#### Non-Default Hyperparameters
|
666 |
+
|
667 |
+
- `eval_strategy`: epoch
|
668 |
+
- `learning_rate`: 2e-05
|
669 |
+
- `num_train_epochs`: 4
|
670 |
+
- `lr_scheduler_type`: cosine
|
671 |
+
- `warmup_ratio`: 0.1
|
672 |
+
- `bf16`: True
|
673 |
+
- `tf32`: True
|
674 |
+
- `load_best_model_at_end`: True
|
675 |
+
- `optim`: adamw_torch_fused
|
676 |
+
- `auto_find_batch_size`: True
|
677 |
+
- `batch_sampler`: no_duplicates
|
678 |
+
|
679 |
+
#### All Hyperparameters
|
680 |
+
<details><summary>Click to expand</summary>
|
681 |
+
|
682 |
+
- `overwrite_output_dir`: False
|
683 |
+
- `do_predict`: False
|
684 |
+
- `eval_strategy`: epoch
|
685 |
+
- `prediction_loss_only`: True
|
686 |
+
- `per_device_train_batch_size`: 8
|
687 |
+
- `per_device_eval_batch_size`: 8
|
688 |
+
- `per_gpu_train_batch_size`: None
|
689 |
+
- `per_gpu_eval_batch_size`: None
|
690 |
+
- `gradient_accumulation_steps`: 1
|
691 |
+
- `eval_accumulation_steps`: None
|
692 |
+
- `learning_rate`: 2e-05
|
693 |
+
- `weight_decay`: 0.0
|
694 |
+
- `adam_beta1`: 0.9
|
695 |
+
- `adam_beta2`: 0.999
|
696 |
+
- `adam_epsilon`: 1e-08
|
697 |
+
- `max_grad_norm`: 1.0
|
698 |
+
- `num_train_epochs`: 4
|
699 |
+
- `max_steps`: -1
|
700 |
+
- `lr_scheduler_type`: cosine
|
701 |
+
- `lr_scheduler_kwargs`: {}
|
702 |
+
- `warmup_ratio`: 0.1
|
703 |
+
- `warmup_steps`: 0
|
704 |
+
- `log_level`: passive
|
705 |
+
- `log_level_replica`: warning
|
706 |
+
- `log_on_each_node`: True
|
707 |
+
- `logging_nan_inf_filter`: True
|
708 |
+
- `save_safetensors`: True
|
709 |
+
- `save_on_each_node`: False
|
710 |
+
- `save_only_model`: False
|
711 |
+
- `restore_callback_states_from_checkpoint`: False
|
712 |
+
- `no_cuda`: False
|
713 |
+
- `use_cpu`: False
|
714 |
+
- `use_mps_device`: False
|
715 |
+
- `seed`: 42
|
716 |
+
- `data_seed`: None
|
717 |
+
- `jit_mode_eval`: False
|
718 |
+
- `use_ipex`: False
|
719 |
+
- `bf16`: True
|
720 |
+
- `fp16`: False
|
721 |
+
- `fp16_opt_level`: O1
|
722 |
+
- `half_precision_backend`: auto
|
723 |
+
- `bf16_full_eval`: False
|
724 |
+
- `fp16_full_eval`: False
|
725 |
+
- `tf32`: True
|
726 |
+
- `local_rank`: 0
|
727 |
+
- `ddp_backend`: None
|
728 |
+
- `tpu_num_cores`: None
|
729 |
+
- `tpu_metrics_debug`: False
|
730 |
+
- `debug`: []
|
731 |
+
- `dataloader_drop_last`: False
|
732 |
+
- `dataloader_num_workers`: 0
|
733 |
+
- `dataloader_prefetch_factor`: None
|
734 |
+
- `past_index`: -1
|
735 |
+
- `disable_tqdm`: False
|
736 |
+
- `remove_unused_columns`: True
|
737 |
+
- `label_names`: None
|
738 |
+
- `load_best_model_at_end`: True
|
739 |
+
- `ignore_data_skip`: False
|
740 |
+
- `fsdp`: []
|
741 |
+
- `fsdp_min_num_params`: 0
|
742 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
743 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
744 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
745 |
+
- `deepspeed`: None
|
746 |
+
- `label_smoothing_factor`: 0.0
|
747 |
+
- `optim`: adamw_torch_fused
|
748 |
+
- `optim_args`: None
|
749 |
+
- `adafactor`: False
|
750 |
+
- `group_by_length`: False
|
751 |
+
- `length_column_name`: length
|
752 |
+
- `ddp_find_unused_parameters`: None
|
753 |
+
- `ddp_bucket_cap_mb`: None
|
754 |
+
- `ddp_broadcast_buffers`: False
|
755 |
+
- `dataloader_pin_memory`: True
|
756 |
+
- `dataloader_persistent_workers`: False
|
757 |
+
- `skip_memory_metrics`: True
|
758 |
+
- `use_legacy_prediction_loop`: False
|
759 |
+
- `push_to_hub`: False
|
760 |
+
- `resume_from_checkpoint`: None
|
761 |
+
- `hub_model_id`: None
|
762 |
+
- `hub_strategy`: every_save
|
763 |
+
- `hub_private_repo`: False
|
764 |
+
- `hub_always_push`: False
|
765 |
+
- `gradient_checkpointing`: False
|
766 |
+
- `gradient_checkpointing_kwargs`: None
|
767 |
+
- `include_inputs_for_metrics`: False
|
768 |
+
- `eval_do_concat_batches`: True
|
769 |
+
- `fp16_backend`: auto
|
770 |
+
- `push_to_hub_model_id`: None
|
771 |
+
- `push_to_hub_organization`: None
|
772 |
+
- `mp_parameters`:
|
773 |
+
- `auto_find_batch_size`: True
|
774 |
+
- `full_determinism`: False
|
775 |
+
- `torchdynamo`: None
|
776 |
+
- `ray_scope`: last
|
777 |
+
- `ddp_timeout`: 1800
|
778 |
+
- `torch_compile`: False
|
779 |
+
- `torch_compile_backend`: None
|
780 |
+
- `torch_compile_mode`: None
|
781 |
+
- `dispatch_batches`: None
|
782 |
+
- `split_batches`: None
|
783 |
+
- `include_tokens_per_second`: False
|
784 |
+
- `include_num_input_tokens_seen`: False
|
785 |
+
- `neftune_noise_alpha`: None
|
786 |
+
- `optim_target_modules`: None
|
787 |
+
- `batch_eval_metrics`: False
|
788 |
+
- `batch_sampler`: no_duplicates
|
789 |
+
- `multi_dataset_batch_sampler`: proportional
|
790 |
+
|
791 |
+
</details>
|
792 |
+
|
793 |
+
### Training Logs
|
794 |
+
| Epoch | Step | Training Loss | dim_128_cosine_map@100 | dim_256_cosine_map@100 | dim_512_cosine_map@100 | dim_64_cosine_map@100 | dim_768_cosine_map@100 |
|
795 |
+
|:-------:|:-------:|:-------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:|:----------------------:|
|
796 |
+
| 0.0549 | 10 | 5.6725 | - | - | - | - | - |
|
797 |
+
| 0.1099 | 20 | 4.6781 | - | - | - | - | - |
|
798 |
+
| 0.1648 | 30 | 3.9597 | - | - | - | - | - |
|
799 |
+
| 0.2198 | 40 | 3.2221 | - | - | - | - | - |
|
800 |
+
| 0.2747 | 50 | 2.2144 | - | - | - | - | - |
|
801 |
+
| 0.3297 | 60 | 2.8916 | - | - | - | - | - |
|
802 |
+
| 0.3846 | 70 | 1.7038 | - | - | - | - | - |
|
803 |
+
| 0.4396 | 80 | 2.4738 | - | - | - | - | - |
|
804 |
+
| 0.4945 | 90 | 1.8951 | - | - | - | - | - |
|
805 |
+
| 0.5495 | 100 | 1.515 | - | - | - | - | - |
|
806 |
+
| 0.6044 | 110 | 1.5431 | - | - | - | - | - |
|
807 |
+
| 0.6593 | 120 | 2.4492 | - | - | - | - | - |
|
808 |
+
| 0.7143 | 130 | 1.656 | - | - | - | - | - |
|
809 |
+
| 0.7692 | 140 | 1.7953 | - | - | - | - | - |
|
810 |
+
| 0.8242 | 150 | 1.8679 | - | - | - | - | - |
|
811 |
+
| 0.8791 | 160 | 2.1551 | - | - | - | - | - |
|
812 |
+
| 0.9341 | 170 | 1.5363 | - | - | - | - | - |
|
813 |
+
| 0.9890 | 180 | 1.2529 | - | - | - | - | - |
|
814 |
+
| 1.0 | 182 | - | 0.3894 | 0.4585 | 0.4805 | 0.3287 | 0.4926 |
|
815 |
+
| 1.0440 | 190 | 1.319 | - | - | - | - | - |
|
816 |
+
| 1.0989 | 200 | 1.0985 | - | - | - | - | - |
|
817 |
+
| 1.1538 | 210 | 1.0403 | - | - | - | - | - |
|
818 |
+
| 1.2088 | 220 | 0.4363 | - | - | - | - | - |
|
819 |
+
| 1.2637 | 230 | 0.2102 | - | - | - | - | - |
|
820 |
+
| 1.3187 | 240 | 0.3584 | - | - | - | - | - |
|
821 |
+
| 1.3736 | 250 | 0.2683 | - | - | - | - | - |
|
822 |
+
| 1.4286 | 260 | 0.4438 | - | - | - | - | - |
|
823 |
+
| 1.4835 | 270 | 0.34 | - | - | - | - | - |
|
824 |
+
| 1.5385 | 280 | 0.4296 | - | - | - | - | - |
|
825 |
+
| 1.5934 | 290 | 0.2323 | - | - | - | - | - |
|
826 |
+
| 1.6484 | 300 | 0.3259 | - | - | - | - | - |
|
827 |
+
| 1.7033 | 310 | 0.4339 | - | - | - | - | - |
|
828 |
+
| 1.7582 | 320 | 0.1524 | - | - | - | - | - |
|
829 |
+
| 1.8132 | 330 | 0.0782 | - | - | - | - | - |
|
830 |
+
| 1.8681 | 340 | 0.4306 | - | - | - | - | - |
|
831 |
+
| 1.9231 | 350 | 0.312 | - | - | - | - | - |
|
832 |
+
| 1.9780 | 360 | 0.2112 | - | - | - | - | - |
|
833 |
+
| 2.0 | 364 | - | 0.4139 | 0.4526 | 0.4762 | 0.3761 | 0.4672 |
|
834 |
+
| 2.0330 | 370 | 0.2341 | - | - | - | - | - |
|
835 |
+
| 2.0879 | 380 | 0.1965 | - | - | - | - | - |
|
836 |
+
| 2.1429 | 390 | 0.3019 | - | - | - | - | - |
|
837 |
+
| 2.1978 | 400 | 0.1518 | - | - | - | - | - |
|
838 |
+
| 2.2527 | 410 | 0.0203 | - | - | - | - | - |
|
839 |
+
| 2.3077 | 420 | 0.0687 | - | - | - | - | - |
|
840 |
+
| 2.3626 | 430 | 0.0206 | - | - | - | - | - |
|
841 |
+
| 2.4176 | 440 | 0.3615 | - | - | - | - | - |
|
842 |
+
| 2.4725 | 450 | 0.4674 | - | - | - | - | - |
|
843 |
+
| 2.5275 | 460 | 0.0623 | - | - | - | - | - |
|
844 |
+
| 2.5824 | 470 | 0.0222 | - | - | - | - | - |
|
845 |
+
| 2.6374 | 480 | 0.1049 | - | - | - | - | - |
|
846 |
+
| 2.6923 | 490 | 0.4955 | - | - | - | - | - |
|
847 |
+
| 2.7473 | 500 | 0.439 | - | - | - | - | - |
|
848 |
+
| 2.8022 | 510 | 0.0052 | - | - | - | - | - |
|
849 |
+
| 2.8571 | 520 | 0.16 | - | - | - | - | - |
|
850 |
+
| 2.9121 | 530 | 0.0583 | - | - | - | - | - |
|
851 |
+
| 2.9670 | 540 | 0.0127 | - | - | - | - | - |
|
852 |
+
| **3.0** | **546** | **-** | **0.4427** | **0.4765** | **0.508** | **0.397** | **0.5021** |
|
853 |
+
| 3.0220 | 550 | 0.0143 | - | - | - | - | - |
|
854 |
+
| 3.0769 | 560 | 0.0228 | - | - | - | - | - |
|
855 |
+
| 3.1319 | 570 | 0.0704 | - | - | - | - | - |
|
856 |
+
| 3.1868 | 580 | 0.0086 | - | - | - | - | - |
|
857 |
+
| 3.2418 | 590 | 0.001 | - | - | - | - | - |
|
858 |
+
| 3.2967 | 600 | 0.002 | - | - | - | - | - |
|
859 |
+
| 3.3516 | 610 | 0.0016 | - | - | - | - | - |
|
860 |
+
| 3.4066 | 620 | 0.021 | - | - | - | - | - |
|
861 |
+
| 3.4615 | 630 | 0.0013 | - | - | - | - | - |
|
862 |
+
| 3.5165 | 640 | 0.0723 | - | - | - | - | - |
|
863 |
+
| 3.5714 | 650 | 0.0045 | - | - | - | - | - |
|
864 |
+
| 3.6264 | 660 | 0.0048 | - | - | - | - | - |
|
865 |
+
| 3.6813 | 670 | 0.1005 | - | - | - | - | - |
|
866 |
+
| 3.7363 | 680 | 0.0018 | - | - | - | - | - |
|
867 |
+
| 3.7912 | 690 | 0.0101 | - | - | - | - | - |
|
868 |
+
| 3.8462 | 700 | 0.0104 | - | - | - | - | - |
|
869 |
+
| 3.9011 | 710 | 0.0025 | - | - | - | - | - |
|
870 |
+
| 3.9560 | 720 | 0.014 | - | - | - | - | - |
|
871 |
+
| 4.0 | 728 | - | 0.4335 | 0.4775 | 0.5000 | 0.3911 | 0.4877 |
|
872 |
+
|
873 |
+
* The bold row denotes the saved checkpoint.
|
874 |
+
|
875 |
+
### Framework Versions
|
876 |
+
- Python: 3.11.9
|
877 |
+
- Sentence Transformers: 3.0.1
|
878 |
+
- Transformers: 4.41.2
|
879 |
+
- PyTorch: 2.1.2+cu121
|
880 |
+
- Accelerate: 0.31.0
|
881 |
+
- Datasets: 2.19.1
|
882 |
+
- Tokenizers: 0.19.1
|
883 |
+
|
884 |
+
## Citation
|
885 |
+
|
886 |
+
### BibTeX
|
887 |
+
|
888 |
+
#### Sentence Transformers
|
889 |
+
```bibtex
|
890 |
+
@inproceedings{reimers-2019-sentence-bert,
|
891 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
892 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
893 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
894 |
+
month = "11",
|
895 |
+
year = "2019",
|
896 |
+
publisher = "Association for Computational Linguistics",
|
897 |
+
url = "https://arxiv.org/abs/1908.10084",
|
898 |
+
}
|
899 |
+
```
|
900 |
+
|
901 |
+
#### MatryoshkaLoss
|
902 |
+
```bibtex
|
903 |
+
@misc{kusupati2024matryoshka,
|
904 |
+
title={Matryoshka Representation Learning},
|
905 |
+
author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi},
|
906 |
+
year={2024},
|
907 |
+
eprint={2205.13147},
|
908 |
+
archivePrefix={arXiv},
|
909 |
+
primaryClass={cs.LG}
|
910 |
+
}
|
911 |
+
```
|
912 |
+
|
913 |
+
#### MultipleNegativesRankingLoss
|
914 |
+
```bibtex
|
915 |
+
@misc{henderson2017efficient,
|
916 |
+
title={Efficient Natural Language Response Suggestion for Smart Reply},
|
917 |
+
author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
|
918 |
+
year={2017},
|
919 |
+
eprint={1705.00652},
|
920 |
+
archivePrefix={arXiv},
|
921 |
+
primaryClass={cs.CL}
|
922 |
+
}
|
923 |
+
```
|
924 |
+
|
925 |
+
<!--
|
926 |
+
## Glossary
|
927 |
+
|
928 |
+
*Clearly define terms in order to be accessible across audiences.*
|
929 |
+
-->
|
930 |
+
|
931 |
+
<!--
|
932 |
+
## Model Card Authors
|
933 |
+
|
934 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
935 |
+
-->
|
936 |
+
|
937 |
+
<!--
|
938 |
+
## Model Card Contact
|
939 |
+
|
940 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
941 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "nomic-ai/nomic-embed-text-v1.5",
|
3 |
+
"activation_function": "swiglu",
|
4 |
+
"architectures": [
|
5 |
+
"NomicBertModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "nomic-ai/nomic-bert-2048--configuration_hf_nomic_bert.NomicBertConfig",
|
10 |
+
"AutoModel": "nomic-ai/nomic-bert-2048--modeling_hf_nomic_bert.NomicBertModel",
|
11 |
+
"AutoModelForMaskedLM": "nomic-ai/nomic-bert-2048--modeling_hf_nomic_bert.NomicBertForPreTraining"
|
12 |
+
},
|
13 |
+
"bos_token_id": null,
|
14 |
+
"causal": false,
|
15 |
+
"dense_seq_output": true,
|
16 |
+
"embd_pdrop": 0.0,
|
17 |
+
"eos_token_id": null,
|
18 |
+
"fused_bias_fc": true,
|
19 |
+
"fused_dropout_add_ln": true,
|
20 |
+
"initializer_range": 0.02,
|
21 |
+
"layer_norm_epsilon": 1e-12,
|
22 |
+
"max_trained_positions": 2048,
|
23 |
+
"mlp_fc1_bias": false,
|
24 |
+
"mlp_fc2_bias": false,
|
25 |
+
"model_type": "nomic_bert",
|
26 |
+
"n_embd": 768,
|
27 |
+
"n_head": 12,
|
28 |
+
"n_inner": 3072,
|
29 |
+
"n_layer": 12,
|
30 |
+
"n_positions": 8192,
|
31 |
+
"pad_vocab_size_multiple": 64,
|
32 |
+
"parallel_block": false,
|
33 |
+
"parallel_block_tied_norm": false,
|
34 |
+
"prenorm": false,
|
35 |
+
"qkv_proj_bias": false,
|
36 |
+
"reorder_and_upcast_attn": false,
|
37 |
+
"resid_pdrop": 0.0,
|
38 |
+
"rotary_emb_base": 1000,
|
39 |
+
"rotary_emb_fraction": 1.0,
|
40 |
+
"rotary_emb_interleaved": false,
|
41 |
+
"rotary_emb_scale_base": null,
|
42 |
+
"rotary_scaling_factor": null,
|
43 |
+
"scale_attn_by_inverse_layer_idx": false,
|
44 |
+
"scale_attn_weights": true,
|
45 |
+
"summary_activation": null,
|
46 |
+
"summary_first_dropout": 0.0,
|
47 |
+
"summary_proj_to_labels": true,
|
48 |
+
"summary_type": "cls_index",
|
49 |
+
"summary_use_proj": true,
|
50 |
+
"torch_dtype": "float32",
|
51 |
+
"transformers_version": "4.41.2",
|
52 |
+
"type_vocab_size": 2,
|
53 |
+
"use_cache": true,
|
54 |
+
"use_flash_attn": true,
|
55 |
+
"use_rms_norm": false,
|
56 |
+
"use_xentropy": true,
|
57 |
+
"vocab_size": 30528
|
58 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.0.1",
|
4 |
+
"transformers": "4.41.2",
|
5 |
+
"pytorch": "2.1.2+cu121"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": null
|
10 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:69b401c0bd1af2787fde09a704dff413492db2ef957e42bf62faf6bd36ae6636
|
3 |
+
size 546938168
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 8192,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 8192,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "BertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|