Commit
·
9cbb1a6
verified
·
0
Parent(s):
Initial commit.
Browse files- .gitattributes +36 -0
- 1_Dense/config.json +1 -0
- 1_Dense/model.safetensors +3 -0
- README.md +440 -0
- added_tokens.json +4 -0
- config.json +24 -0
- config_sentence_transformers.json +49 -0
- model.safetensors +3 -0
- modules.json +14 -0
- scatter_plot_performance_size_15m.png +3 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +31 -0
- tokenizer.json +0 -0
- tokenizer_config.json +81 -0
- vocab.txt +0 -0
.gitattributes
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
scatter_plot_performance_size_15m.png filter=lfs diff=lfs merge=lfs -text
|
1_Dense/config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"in_features": 288, "out_features": 128, "bias": false, "activation_function": "torch.nn.modules.linear.Identity"}
|
1_Dense/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:09e734ef2f37bb5be93d33d4f037aa31089ded5eccc0666228b743ba58789729
|
| 3 |
+
size 147544
|
README.md
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
tags:
|
| 3 |
+
- ColBERT
|
| 4 |
+
- PyLate
|
| 5 |
+
- sentence-transformers
|
| 6 |
+
- sentence-similarity
|
| 7 |
+
- feature-extraction
|
| 8 |
+
- multilingual
|
| 9 |
+
- late-interaction
|
| 10 |
+
- retrieval
|
| 11 |
+
- bright
|
| 12 |
+
- loss:Distillation
|
| 13 |
+
pipeline_tag: sentence-similarity
|
| 14 |
+
library_name: PyLate
|
| 15 |
+
license: apache-2.0
|
| 16 |
+
base_model:
|
| 17 |
+
- DavidGF/SauerkrautLM-Multi-ColBERT-15m
|
| 18 |
+
---
|
| 19 |
+
<img src="https://vago-solutions.ai/wp-content/uploads/2025/08/SauerkrautLM-Reason-Multi-ColBERT-15m.png" width="500" height="auto">
|
| 20 |
+
|
| 21 |
+
## SauerkrautLM-Reason-Multi-ColBERT
|
| 22 |
+
|
| 23 |
+
This model is an ultra-compact Late Interaction retriever that leverages:
|
| 24 |
+
|
| 25 |
+
**Knowledge Distillation** from strong synthetic data (200k samples generated with Qwen/Qwen3-32B-AWQ and scored by a high-performing reranker).
|
| 26 |
+
**Extreme compression** with just 15M parameters – optimized for edge deployment and resource-constrained environments.
|
| 27 |
+
|
| 28 |
+
### 🎯 Core Features and Innovations:
|
| 29 |
+
|
| 30 |
+
- **Next-Generation Knowledge Distillation**: By utilizing 200,000 synthetically generated, high-quality training examples (created with `Qwen/Qwen3-32B-AWQ` and scored by a state-of-the-art reranker).
|
| 31 |
+
|
| 32 |
+
- **Ultra-Efficient Architecture**: With extreme parameter compression to just 15M, enabling deployment on edge devices and mobile platforms
|
| 33 |
+
|
| 34 |
+
### 💪 The Ant Among Giants: Tiny but Effective
|
| 35 |
+
|
| 36 |
+
With **15 million parameters** – that's **less than 1/500th the size** of some competing models – SauerkrautLM-Reason-Multi-ColBERT-15m represents the extreme frontier of model compression:
|
| 37 |
+
- **500× smaller** than 7B+ parameter models
|
| 38 |
+
- **10× smaller** than typical BERT models
|
| 39 |
+
- Comparable to **SBERT-scale** encoders in size
|
| 40 |
+
|
| 41 |
+
What makes SauerkrautLM-Multi-ColBERT-15m truly remarkable is not just its tiny size, but its **exceptional performance in scientific and technical domains**:
|
| 42 |
+
|
| 43 |
+
#### Domain-Specific Strengths:
|
| 44 |
+
- **Scientific Retrieval Champion**: In Biology and Earth Sciences, it achieves scores (23.33, 23.78) that **surpass E5 (1B parameters)** and proprietary APIs
|
| 45 |
+
- **Technical Q&A Powerhouse**: Mean StackExchange score of 14.64 puts it in the same league as models 10-100× larger
|
| 46 |
+
- **🐴 PONY TASK SUPERSTAR**: Ranks **#2 globally** with 15.86, only behind GritLM-7B (22.00), crushing ReasonIR-8B (10.50) and Qwen-7B (9.90) - a 15M model beating 8 BILLION parameter models!
|
| 47 |
+
- **STEM-Optimized**: The knowledge distillation process has successfully preserved critical scientific reasoning capabilities
|
| 48 |
+
|
| 49 |
+
#### Bright Performance Comparisons:
|
| 50 |
+
| Domain | SauerkrautLM-15M | BGE | E5 (1B) | ReasonIR-8B | Qwen-7B | Performance |
|
| 51 |
+
|--------|------------------|-----|---------|-------------|---------|-------------|
|
| 52 |
+
| Biology | **23.33** | 11.70 | 18.60 | 26.20 | 30.60 | **2× BGE, 125% of E5** |
|
| 53 |
+
| Earth Science | **23.78** | 24.60 | 26.00 | 31.40 | 36.40 | **97% of BGE, 91% of E5** |
|
| 54 |
+
| Mean StackEx | **14.64** | 15.17 | 17.36 | 24.76 | 22.80 | **96% of BGE, 84% of E5** |
|
| 55 |
+
| **🐴 Pony** | **15.86** | 5.70 | 4.90 | 10.50 | 9.90 | **#2 Overall! 278% of BGE, 151% of 8B model, 160% of 7B!** |
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
## Model Overview
|
| 60 |
+
|
| 61 |
+
**Model:** `VAGOsolutions/SauerkrautLM-Reason-Multi-ColBERT-15m`\
|
| 62 |
+
**Base:** Fine-tuned from `VAGOsolutions/SauerkrautLM-Multi-ColBERT-15m` using knowledge distillation\
|
| 63 |
+
**Architecture:** PyLate / ColBERT (Late Interaction)\
|
| 64 |
+
**Languages:** Multilingual (optimized for 7 European languages: German, English, Spanish, French, Italian, Dutch, Portuguese)\
|
| 65 |
+
**License:** Apache 2.0\
|
| 66 |
+
**Model Size:** 15M parameters
|
| 67 |
+
**Efficiency Ratio:** Up to **500× smaller** than comparable performing models
|
| 68 |
+
|
| 69 |
+
### Model Description
|
| 70 |
+
- **Model Type:** PyLate model with innovative Late Interaction architecture
|
| 71 |
+
- **Document Length:** 2048 tokens (8× longer than traditional BERT models)
|
| 72 |
+
- **Query Length:** 256 tokens (optimized for complex, multi-part queries)
|
| 73 |
+
- **Output Dimensionality:** 128 tokens (efficient vector representation)
|
| 74 |
+
- **Similarity Function:** MaxSim (enables precise token-level matching)
|
| 75 |
+
- **Training Loss:** Knowledge Distillation (PyLate) - learns from models 2000× larger
|
| 76 |
+
|
| 77 |
+
### Architecture
|
| 78 |
+
|
| 79 |
+
```
|
| 80 |
+
ColBERT(
|
| 81 |
+
(0): Transformer(CompressedModernBertModel)
|
| 82 |
+
(1): Dense(384 -> 128 dim, no bias)
|
| 83 |
+
)
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
## 🔬 Technical Innovations in Detail
|
| 87 |
+
|
| 88 |
+
### Knowledge Distillation: David Learning from Goliath
|
| 89 |
+
|
| 90 |
+
Our 15M parameter model leverages state-of-the-art knowledge distillation:
|
| 91 |
+
|
| 92 |
+
1. **Synthetic Data Generation**: 200,000 high-quality query-document pairs generated using the `Qwen/Qwen3-32B-AWQ` model (32 billion parameters) based on the [ReasonIR approach](https://huggingface.co/datasets/reasonir/reasonir-data)
|
| 93 |
+
2. **Quality Assurance**: Each pair evaluated and filtered by a state-of-the-art reranker
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
### Ultra-Compact Design
|
| 97 |
+
|
| 98 |
+
SauerkrautLM-Reason-Multi-ColBERT-15m achieves extreme efficiency through:
|
| 99 |
+
|
| 100 |
+
- Ultra-Compact Architecture (~15 M params)
|
| 101 |
+
- Deep-yet-slim BERT — 10 layers, hidden_size = 288.
|
| 102 |
+
- Many heads — 12 attention heads (24-dim each) for fine-grained reasoning in a narrow model.
|
| 103 |
+
- Edge-ready — small footprint
|
| 104 |
+
|
| 105 |
+
This architecture enables Late Interaction Retrieval on mobile devices (or on even calculators :D).
|
| 106 |
+
|
| 107 |
+
---
|
| 108 |
+
|
| 109 |
+
## 🔬 Benchmarks: The Ant's Performance
|
| 110 |
+
|
| 111 |
+
Despite its microscopic size, SauerkrautLM-Reason-Multi-ColBERT-15m delivers functional retrieval performance, particularly impressive given its **500× size disadvantage** compared to state-of-the-art models.
|
| 112 |
+
|
| 113 |
+
### BRIGHT Benchmark (English, reasoning‑focused retrieval)
|
| 114 |
+
|
| 115 |
+
The [BRIGHT benchmark](https://huggingface.co/datasets/xlangai/BRIGHT) is designed to evaluate **reasoning‑intensive retrieval**. All scores are nDCG\@10. SauerkrautLM-Reason-Multi-ColBERT (15M parameters) demonstrates that extreme compression is possible while maintaining basic retrieval functionality.
|
| 116 |
+
|
| 117 |
+
| Model / Metric | Biology | Earth | Economics | Psychology | Robotics | Stackoverflow | Sustainable | Leetcode | Pony | AoPS | Theorem‑Q | Theorem‑T | Mean StackEx | Mean coding | Mean theorem | Full Mean |
|
| 118 |
+
| ---------------------------------------- | --------- | --------- | --------- | ---------- | -------- | ------------- | ----------- | --------- | --------- | --------- | --------- | --------- | ------------ | ----------- | ------------ | --------- |
|
| 119 |
+
| **BM25** | 18.90 | 27.20 | 14.90 | 12.50 | 13.60 | 18.40 | 15.00 | 24.40 | 7.90 | 6.20 | 10.40 | 4.90 | 17.21 | 16.15 | 7.17 | 14.53 |
|
| 120 |
+
| **< 1 B OS** | | | | | | | | | | | | | | | | |
|
| 121 |
+
| BGE | 11.70 | 24.60 | 16.60 | 17.50 | 11.70 | 10.80 | 13.30 | 26.70 | 5.70 | 6.00 | 13.00 | 6.90 | 15.17 | 16.20 | 8.63 | 13.71 |
|
| 122 |
+
| Inst‑L | 15.20 | 21.20 | 14.70 | 22.30 | 11.40 | 13.30 | 13.50 | 19.50 | 1.30 | 8.10 | 20.90 | 9.10 | 15.94 | 10.40 | 12.70 | 14.21 |
|
| 123 |
+
| SBERT | 15.10 | 20.40 | 16.60 | 22.70 | 8.20 | 11.00 | 15.30 | 26.40 | 7.00 | 5.30 | 20.00 | 10.80 | 15.61 | 16.70 | 12.03 | 14.90 |
|
| 124 |
+
| **> 1 B OS** | | | | | | | | | | | | | | | | |
|
| 125 |
+
| E5 | 18.60 | 26.00 | 15.50 | 15.80 | 16.30 | 11.20 | 18.10 | 28.70 | 4.90 | 7.10 | 26.10 | 26.80 | 17.36 | 16.80 | 20.00 | 17.93 |
|
| 126 |
+
| SFR | 19.10 | 26.70 | 17.80 | 19.00 | 16.30 | 14.40 | 19.20 | 27.40 | 2.00 | 7.40 | 24.30 | 26.00 | 18.93 | 14.70 | 19.23 | 18.30 |
|
| 127 |
+
| Inst‑XL | 21.60 | 34.30 | 22.40 | 27.40 | 18.20 | 21.20 | 19.10 | 27.50 | 5.00 | 8.50 | 15.60 | 5.90 | 23.46 | 16.25 | 10.00 | 18.89 |
|
| 128 |
+
| GritLM | 24.80 | 32.30 | 18.90 | 19.80 | 17.10 | 13.60 | 17.80 | 29.90 | 22.00 | 8.80 | 25.20 | 21.20 | 20.61 | 25.95 | 18.40 | 20.95 |
|
| 129 |
+
| Qwen | 30.60 | 36.40 | 17.80 | 24.60 | 13.20 | 22.20 | 14.80 | 25.50 | 9.90 | 14.40 | 27.80 | 32.90 | 22.80 | 17.70 | 25.03 | **22.51** |
|
| 130 |
+
| **Proprietary** | | | | | | | | | | | | | | | | |
|
| 131 |
+
| Cohere | 18.70 | 28.40 | 20.40 | 21.60 | 16.30 | 18.30 | 17.60 | 26.80 | 1.90 | 6.30 | 15.70 | 7.20 | 20.19 | 14.35 | 9.73 | 16.60 |
|
| 132 |
+
| OpenAI | 23.30 | 26.70 | 19.50 | 27.60 | 12.80 | 14.30 | 20.50 | 23.60 | 2.40 | 8.50 | 23.50 | 11.70 | 20.67 | 13.00 | 14.57 | 17.87 |
|
| 133 |
+
| Voyage | 23.10 | 25.40 | 19.90 | 24.90 | 10.80 | 16.80 | 15.40 | 30.60 | 1.50 | 7.50 | 27.40 | 11.60 | 19.47 | 16.05 | 15.50 | 17.91 |
|
| 134 |
+
| Google | 22.70 | 34.80 | 19.60 | 27.80 | 15.70 | 20.10 | 17.10 | 29.60 | 3.60 | 9.30 | 23.80 | 15.90 | 22.54 | 16.60 | 16.33 | 20.00 |
|
| 135 |
+
| **ReasonIR data** | | | | | | | | | | | | | | | | |
|
| 136 |
+
| ReasonIR‑8B | 26.20 | 31.40 | 23.30 | 30.00 | 18.00 | 23.90 | 20.50 | 35.00 | 10.50 | 14.70 | 31.90 | 27.20 | 24.76 | 22.75 | 24.60 | **24.38** |
|
| 137 |
+
| Reason‑ModernColBERT (149 M) reported | 33.25 | 41.02 | 24.93 | 30.73 | 21.12 | 20.62 | 20.31 | 31.07 | 8.51 | 9.17 | 19.51 | 11.24 | 27.43 | 19.79 | 15.38 | **22.62** |
|
| 138 |
+
| Reason‑ModernColBERT (149 M) our eval\*\* | 34.28 | 41.53 | 19.96 | 27.02 | 21.15 | 23.62 | 17.21 | 26.61 | 1.32 | 7.30 | 19.79 | 9.70 | 27.93 | 13.97 | 12.26 | 20.79 |
|
| 139 |
+
| **SauerkrautLM Reasoning data** | | | | | | | | | | | | | | | | |
|
| 140 |
+
| SauerkrautLM-Multi-Reason-ModernColBERT (149 M) | 36.92 | **45.53** | **19.47** | **27.04** | **19.35** | **25.31** | **20.78** | **29.74** | 12.54 | 10.52 | **14.62** | **7.65** | **28.94** | 21.14 | **10.93** | **22.45** |
|
| 141 |
+
| SauerkrautLM‑Reason‑EuroColBERT (210 M) | **38.16** | 39.43 | 16.99 | 24.49 | 17.50 | 17.60 | 20.72 | 29.10 | **13.57** | **12.04** | 10.43 | 4.95 | 25.70 | **21.33** | 9.14 | 20.42 |
|
| 142 |
+
| **SauerkrautLM‑Reason‑Multi‑ColBERT-15m (15 M)** | 23.33 | 23.78 | 10.53 | 9.03 | 10.28 | 10.88 | 13.13 | 18.10 | **15.86** | 1.75 | 4.29 | 0.81 | 14.64 | 16.98 | 2.28 | 11.81 |
|
| 143 |
+
|
| 144 |
+
**Remarkable Performance Highlights:**
|
| 145 |
+
- **Exceptional in Scientific Domains**: Achieves **23.33** (Biology) and **23.78** (Earth Science) - outperforming many models 10-100× its size
|
| 146 |
+
- **Strong StackExchange Performance**: With **14.64** Mean StackEx, it matches or exceeds BGE and approaches SBERT-level performance
|
| 147 |
+
- **🐴 PONY CHAMPION**: With **15.86**, it's the **2nd best model overall** on Pony, surpassing ReasonIR-8B (10.50), Qwen-7B (9.90), and only trailing GritLM-7B (22.00) - that's a 15M model outperforming 8 BILLION parameter models!
|
| 148 |
+
- **Efficiency Champion**: At just 15M parameters, it delivers **81% of SBERT's performance** in scientific domains while being deployable on smartwatches
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
#### ⚖️ Relative Efficiency
|
| 152 |
+
|
| 153 |
+
With just **15M parameters**, SauerkrautLM-Reason-Multi-ColBERT not only proves that functional retrieval is possible at extreme compression ratios but excels in scientific domains:
|
| 154 |
+
- **Beats E5 (1B)** in Biology (23.33 vs 18.60) despite being **67× smaller**
|
| 155 |
+
- **Matches proprietary APIs** like Cohere in scientific retrieval while being **1000× more efficient**
|
| 156 |
+
- **Outperforms BGE** across multiple scientific categories despite similar parameter count
|
| 157 |
+
e baselines while being deployable on edge devices.
|
| 158 |
+
|
| 159 |
+
### BRIGHT Benchmark (German, reasoning‑focused retrieval)
|
| 160 |
+
|
| 161 |
+
All scores are nDCG\@10.
|
| 162 |
+
|
| 163 |
+
| Model / Metric | Biology | Earth | Economics | Psychology | Robotics | Stackoverflow | Sustainable | Leetcode | Pony | AoPS | Theorem‑Q | Theorem‑T | Mean StackEx | Mean coding | Mean theorem | Full Mean |
|
| 164 |
+
| --------------------------------------------------- | --------- | --------- | --------- | ---------- | --------- | ------------- | ----------- | --------- | --------- | -------- | --------- | --------- | ------------ | ----------- | ------------ | --------- |
|
| 165 |
+
| SauerkrautLM‑Multi‑Reason‑ModernColBERT (149 M) | 28.00 | **34.71** | **12.90** | 17.98 | **13.67** | **19.64** | 17.70 | 11.66 | **15.49** | 7.27 | 6.76 | 1.32 | **21.15** | 13.57 | 5.11 | 15.59 |
|
| 166 |
+
| SauerkrautLM‑Reason‑EuroColBERT (210 M) | **31.09** | 31.48 | 11.95 | **18.39** | 11.25 | 14.43 | **20.26** | **25.67** | 12.15 | **9.58** | **8.15** | **2.76** | 19.76 | **18.91** | **6.83** | **16.43** |
|
| 167 |
+
| **SauerkrautLM‑Reason‑Multi‑ColBERT-15m (15 M)** | 15.37 | 20.11 | 7.36 | 7.07 | 4.24 | 4.71 | 7.67 | 0.77 | 6.31 | 3.81 | 0.76 | 0.00 | 9.81 | 3.54 | 1.52 | 6.51 |
|
| 168 |
+
|
| 169 |
+
> **Observation:** The 15M model, while significantly smaller, maintains functional performance particularly on StackExchange tasks (9.81), demonstrating effective knowledge distillation for specific domains.
|
| 170 |
+
|
| 171 |
+
---
|
| 172 |
+
|
| 173 |
+
### NanoBEIR Europe (multilingual retrieval)
|
| 174 |
+
|
| 175 |
+
Average nDCG\@10 across the seven languages we evaluated:
|
| 176 |
+
|
| 177 |
+
| Language | nDCG@10 |
|
| 178 |
+
| -------- | -------- |
|
| 179 |
+
| de | 32.98 |
|
| 180 |
+
| en | 50.94 |
|
| 181 |
+
| es | 35.30 |
|
| 182 |
+
| fr | 33.85 |
|
| 183 |
+
| it | 34.25 |
|
| 184 |
+
| nl | 32.59 |
|
| 185 |
+
| pt | 33.76 |
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
---
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
What makes SauerkrautLM-Multi-ColBERT-15m truly remarkable is not just its tiny size, but its **exceptional performance in scientific and technical domains**:
|
| 193 |
+
|
| 194 |
+
#### Domain-Specific Strengths:
|
| 195 |
+
- **Scientific Retrieval Champion**: In Biology and Earth Sciences, it achieves scores (23.33, 23.78) that **surpass E5 (1B parameters)** and proprietary APIs
|
| 196 |
+
- **Technical Q&A Powerhouse**: Mean StackExchange score of 14.64 puts it in the same league as models 10-100× larger
|
| 197 |
+
- **🐴 PONY TASK SUPERSTAR**: Ranks **#2 globally** with 15.86, only behind GritLM-7B (22.00), beating ReasonIR-8B (10.50) and Qwen-7B (9.90)
|
| 198 |
+
- **STEM-Optimized**: The knowledge distillation process has successfully preserved critical scientific reasoning capabilities
|
| 199 |
+
|
| 200 |
+
#### Bright Performance Comparisons:
|
| 201 |
+
| Domain | SauerkrautLM-15M | BGE | E5 (1B) | ReasonIR-8B | Qwen-7B | Performance |
|
| 202 |
+
|--------|------------------|-----|---------|-------------|---------|-------------|
|
| 203 |
+
| Biology | **23.33** | 11.70 | 18.60 | 26.20 | 30.60 | **2× BGE, 125% of E5** |
|
| 204 |
+
| Earth Science | **23.78** | 24.60 | 26.00 | 31.40 | 36.40 | **97% of BGE, 91% of E5** |
|
| 205 |
+
| Mean StackEx | **14.64** | 15.17 | 17.36 | 24.76 | 22.80 | **96% of BGE, 84% of E5** |
|
| 206 |
+
| **🐴 Pony** | **15.86** | 5.70 | 4.90 | 10.50 | 9.90 | **#2 Overall! 278% of BGE, 151% of 8B model, 160% of 7B!** |
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
---
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
### Why SauerkrautLM-Multi-ColBERT 15m Matters for Edge Deployment
|
| 213 |
+
|
| 214 |
+
- **Revolutionary Size**: At just **15M parameters**, enables late interaction retrieval on smartphones, IoT devices, and embedded systems
|
| 215 |
+
- **Battery-Friendly**: Ultra-low power consumption makes it ideal for always-on applications
|
| 216 |
+
- **Privacy-First**: Enables on-device search without sending data to the cloud
|
| 217 |
+
- **Rapid Inference**: fast query times on modern mobile processors
|
| 218 |
+
- **Multilingual Coverage**: Maintains support for 7 European languages despite extreme compression
|
| 219 |
+
|
| 220 |
+
This model opens entirely new deployment scenarios previously impossible with traditional retrieval models.
|
| 221 |
+
|
| 222 |
+
Below is a **scatter plot** that visualises model size (millions of parameters) against BRIGHT Full‑Mean nDCG\@10. SauerkrautLM-Multi-ColBERT 15m occupies a unique position as the smallest functional neural retriever on par with SBERT (110M), INST-L (335M) and BGE-bge-large-en-v1.5.
|
| 223 |
+
<img src="https://vago-solutions.ai/wp-content/uploads/2025/08/Image-graph-1.jpeg">
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
### Real-World Impact
|
| 227 |
+
|
| 228 |
+
The extreme efficiency enables revolutionary applications:
|
| 229 |
+
|
| 230 |
+
1. **Mobile Search**: late interaction retrieval directly on smartphones without cloud connectivity
|
| 231 |
+
2. **IoT Integration**: Smart home devices with semantic search capabilities
|
| 232 |
+
3. **Offline First**: Full retrieval functionality without internet access
|
| 233 |
+
4. **Privacy Preservation**: Sensitive document search stays on-device
|
| 234 |
+
5. **Massive Deployment**: Deploy on millions of edge devices at negligible cost
|
| 235 |
+
|
| 236 |
+
## 📈 Summary: Pushing the Boundaries of Compression
|
| 237 |
+
|
| 238 |
+
SauerkrautLM-Reason-Multi-ColBERT represents the extreme frontier of model compression in late interaction retrieval. By compressing a ColBERT model to just 15M parameters, we've created a model that:
|
| 239 |
+
|
| 240 |
+
- **Achieves functional retrieval performance** despite being 500× smaller than SOTA models
|
| 241 |
+
- **Enables entirely new deployment scenarios** on edge devices and mobile platforms
|
| 242 |
+
- **Maintains multilingual support** across 7 European languages
|
| 243 |
+
- **Delivers fast inference** on resource-constrained hardware
|
| 244 |
+
- **Opens late interaction retrieval to billions of devices** previously unable to run such models
|
| 245 |
+
|
| 246 |
+
This model proves that extreme compression is possible while maintaining the core benefits of late interaction retrieval, democratizing access to semantic search technology.
|
| 247 |
+
|
| 248 |
+
---
|
| 249 |
+
|
| 250 |
+
# PyLate
|
| 251 |
+
|
| 252 |
+
This is a [PyLate](https://github.com/lightonai/pylate) model trained. It maps sentences & paragraphs to sequences of 128-dimensional dense vectors and can be used for semantic textual similarity using the MaxSim operator.
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
## Usage
|
| 256 |
+
First install the PyLate library:
|
| 257 |
+
|
| 258 |
+
```bash
|
| 259 |
+
pip install -U pylate
|
| 260 |
+
```
|
| 261 |
+
|
| 262 |
+
### Retrieval
|
| 263 |
+
|
| 264 |
+
PyLate provides a streamlined interface to index and retrieve documents using ColBERT models. The index leverages the Voyager HNSW index to efficiently handle document embeddings and enable fast retrieval.
|
| 265 |
+
|
| 266 |
+
#### Indexing documents
|
| 267 |
+
|
| 268 |
+
First, load the ColBERT model and initialize the Voyager index, then encode and index your documents:
|
| 269 |
+
|
| 270 |
+
```python
|
| 271 |
+
from pylate import indexes, models, retrieve
|
| 272 |
+
|
| 273 |
+
# Step 1: Load the ColBERT model
|
| 274 |
+
model = models.ColBERT(
|
| 275 |
+
model_name_or_path="VAGOsolutions/SauerkrautLM-Reason-Multi-ColBERT",
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
# Step 2: Initialize the Voyager index
|
| 279 |
+
index = indexes.Voyager(
|
| 280 |
+
index_folder="pylate-index",
|
| 281 |
+
index_name="index",
|
| 282 |
+
override=True, # This overwrites the existing index if any
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
# Step 3: Encode the documents
|
| 286 |
+
documents_ids = ["1", "2", "3"]
|
| 287 |
+
documents = ["document 1 text", "document 2 text", "document 3 text"]
|
| 288 |
+
|
| 289 |
+
documents_embeddings = model.encode(
|
| 290 |
+
documents,
|
| 291 |
+
batch_size=32,
|
| 292 |
+
is_query=False, # Ensure that it is set to False to indicate that these are documents, not queries
|
| 293 |
+
show_progress_bar=True,
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
# Step 4: Add document embeddings to the index by providing embeddings and corresponding ids
|
| 297 |
+
index.add_documents(
|
| 298 |
+
documents_ids=documents_ids,
|
| 299 |
+
documents_embeddings=documents_embeddings,
|
| 300 |
+
)
|
| 301 |
+
```
|
| 302 |
+
|
| 303 |
+
Note that you do not have to recreate the index and encode the documents every time. Once you have created an index and added the documents, you can re-use the index later by loading it:
|
| 304 |
+
|
| 305 |
+
```python
|
| 306 |
+
# To load an index, simply instantiate it with the correct folder/name and without overriding it
|
| 307 |
+
index = indexes.Voyager(
|
| 308 |
+
index_folder="pylate-index",
|
| 309 |
+
index_name="index",
|
| 310 |
+
)
|
| 311 |
+
```
|
| 312 |
+
|
| 313 |
+
#### Retrieving top-k documents for queries
|
| 314 |
+
|
| 315 |
+
Once the documents are indexed, you can retrieve the top-k most relevant documents for a given set of queries.
|
| 316 |
+
To do so, initialize the ColBERT retriever with the index you want to search in, encode the queries and then retrieve the top-k documents to get the top matches ids and relevance scores:
|
| 317 |
+
|
| 318 |
+
```python
|
| 319 |
+
# Step 1: Initialize the ColBERT retriever
|
| 320 |
+
retriever = retrieve.ColBERT(index=index)
|
| 321 |
+
|
| 322 |
+
# Step 2: Encode the queries
|
| 323 |
+
queries_embeddings = model.encode(
|
| 324 |
+
["query for document 3", "query for document 1"],
|
| 325 |
+
batch_size=32,
|
| 326 |
+
is_query=True, # # Ensure that it is set to False to indicate that these are queries
|
| 327 |
+
show_progress_bar=True,
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
# Step 3: Retrieve top-k documents
|
| 331 |
+
scores = retriever.retrieve(
|
| 332 |
+
queries_embeddings=queries_embeddings,
|
| 333 |
+
k=10, # Retrieve the top 10 matches for each query
|
| 334 |
+
)
|
| 335 |
+
```
|
| 336 |
+
|
| 337 |
+
### Reranking
|
| 338 |
+
If you only want to use the ColBERT model to perform reranking on top of your first-stage retrieval pipeline without building an index, you can simply use rank function and pass the queries and documents to rerank:
|
| 339 |
+
|
| 340 |
+
```python
|
| 341 |
+
from pylate import rank, models
|
| 342 |
+
|
| 343 |
+
queries = [
|
| 344 |
+
"query A",
|
| 345 |
+
"query B",
|
| 346 |
+
]
|
| 347 |
+
|
| 348 |
+
documents = [
|
| 349 |
+
["document A", "document B"],
|
| 350 |
+
["document 1", "document C", "document B"],
|
| 351 |
+
]
|
| 352 |
+
|
| 353 |
+
documents_ids = [
|
| 354 |
+
[1, 2],
|
| 355 |
+
[1, 3, 2],
|
| 356 |
+
]
|
| 357 |
+
|
| 358 |
+
model = models.ColBERT(
|
| 359 |
+
model_name_or_path="VAGOsolutions/SauerkrautLM-Reason-Multi-ColBERT",
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
queries_embeddings = model.encode(
|
| 363 |
+
queries,
|
| 364 |
+
is_query=True,
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
documents_embeddings = model.encode(
|
| 368 |
+
documents,
|
| 369 |
+
is_query=False,
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
reranked_documents = rank.rerank(
|
| 373 |
+
documents_ids=documents_ids,
|
| 374 |
+
queries_embeddings=queries_embeddings,
|
| 375 |
+
documents_embeddings=documents_embeddings,
|
| 376 |
+
)
|
| 377 |
+
```
|
| 378 |
+
## Citation
|
| 379 |
+
|
| 380 |
+
### BibTeX
|
| 381 |
+
|
| 382 |
+
#### SauerkrautLM‑Reason‑Multi‑ColBERT-15m
|
| 383 |
+
|
| 384 |
+
```bibtex
|
| 385 |
+
@misc{SauerkrautLM-Reason-Multi-ColBERT-15m,
|
| 386 |
+
title={SauerkrautLM-Reason-Multi-ColBERT-15m},
|
| 387 |
+
author={David Golchinfar},
|
| 388 |
+
url={https://huggingface.co/VAGOsolutions/SauerkrautLM-Reason-Multi-ColBERT-15m},
|
| 389 |
+
year={2025}
|
| 390 |
+
}
|
| 391 |
+
```
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
#### Sentence Transformers
|
| 396 |
+
|
| 397 |
+
```bibtex
|
| 398 |
+
@inproceedings{reimers-2019-sentence-bert,
|
| 399 |
+
title = {Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks},
|
| 400 |
+
author = {Reimers, Nils and Gurevych, Iryna},
|
| 401 |
+
booktitle = {Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing},
|
| 402 |
+
month = {11},
|
| 403 |
+
year = {2019},
|
| 404 |
+
publisher = {Association for Computational Linguistics},
|
| 405 |
+
url = {https://arxiv.org/abs/1908.10084}
|
| 406 |
+
}
|
| 407 |
+
```
|
| 408 |
+
|
| 409 |
+
#### PyLate
|
| 410 |
+
|
| 411 |
+
```bibtex
|
| 412 |
+
@misc{PyLate,
|
| 413 |
+
title={PyLate: Flexible Training and Retrieval for Late Interaction Models},
|
| 414 |
+
author={Chaffin, Antoine and Sourty, Raphaël},
|
| 415 |
+
url={https://github.com/lightonai/pylate},
|
| 416 |
+
year={2024}
|
| 417 |
+
}
|
| 418 |
+
```
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
## Acknowledgements
|
| 422 |
+
We thank Antoine Chaffin (LightOn AI) for helpful discussions and for clarifying evaluation settings for Reason‑ModernColBERT, and the PyLate team for providing the training framework that made this work possible.
|
| 423 |
+
|
| 424 |
+
<!--
|
| 425 |
+
## Glossary
|
| 426 |
+
|
| 427 |
+
*Clearly define terms in order to be accessible across audiences.*
|
| 428 |
+
-->
|
| 429 |
+
|
| 430 |
+
<!--
|
| 431 |
+
## Model Card Authors
|
| 432 |
+
|
| 433 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
| 434 |
+
-->
|
| 435 |
+
|
| 436 |
+
<!--
|
| 437 |
+
## Model Card Contact
|
| 438 |
+
|
| 439 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
| 440 |
+
-->
|
added_tokens.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"[D] ": 30523,
|
| 3 |
+
"[Q] ": 30522
|
| 4 |
+
}
|
config.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"BertModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"classifier_dropout": null,
|
| 7 |
+
"hidden_act": "gelu",
|
| 8 |
+
"hidden_dropout_prob": 0.1,
|
| 9 |
+
"hidden_size": 288,
|
| 10 |
+
"initializer_range": 0.02,
|
| 11 |
+
"intermediate_size": 432,
|
| 12 |
+
"layer_norm_eps": 1e-12,
|
| 13 |
+
"max_position_embeddings": 2048,
|
| 14 |
+
"model_type": "bert",
|
| 15 |
+
"num_attention_heads": 12,
|
| 16 |
+
"num_hidden_layers": 10,
|
| 17 |
+
"pad_token_id": 0,
|
| 18 |
+
"position_embedding_type": "absolute",
|
| 19 |
+
"torch_dtype": "float32",
|
| 20 |
+
"transformers_version": "4.51.1",
|
| 21 |
+
"type_vocab_size": 2,
|
| 22 |
+
"use_cache": true,
|
| 23 |
+
"vocab_size": 30524
|
| 24 |
+
}
|
config_sentence_transformers.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"__version__": {
|
| 3 |
+
"sentence_transformers": "4.1.0",
|
| 4 |
+
"transformers": "4.51.1",
|
| 5 |
+
"pytorch": "2.8.0.dev20250319+cu128"
|
| 6 |
+
},
|
| 7 |
+
"prompts": {},
|
| 8 |
+
"default_prompt_name": null,
|
| 9 |
+
"similarity_fn_name": "MaxSim",
|
| 10 |
+
"query_prefix": "[Q] ",
|
| 11 |
+
"document_prefix": "[D] ",
|
| 12 |
+
"query_length": 256,
|
| 13 |
+
"document_length": 2048,
|
| 14 |
+
"attend_to_expansion_tokens": false,
|
| 15 |
+
"skiplist_words": [
|
| 16 |
+
"!",
|
| 17 |
+
"\"",
|
| 18 |
+
"#",
|
| 19 |
+
"$",
|
| 20 |
+
"%",
|
| 21 |
+
"&",
|
| 22 |
+
"'",
|
| 23 |
+
"(",
|
| 24 |
+
")",
|
| 25 |
+
"*",
|
| 26 |
+
"+",
|
| 27 |
+
",",
|
| 28 |
+
"-",
|
| 29 |
+
".",
|
| 30 |
+
"/",
|
| 31 |
+
":",
|
| 32 |
+
";",
|
| 33 |
+
"<",
|
| 34 |
+
"=",
|
| 35 |
+
">",
|
| 36 |
+
"?",
|
| 37 |
+
"@",
|
| 38 |
+
"[",
|
| 39 |
+
"\\",
|
| 40 |
+
"]",
|
| 41 |
+
"^",
|
| 42 |
+
"_",
|
| 43 |
+
"`",
|
| 44 |
+
"{",
|
| 45 |
+
"|",
|
| 46 |
+
"}",
|
| 47 |
+
"~"
|
| 48 |
+
]
|
| 49 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ab23082b4e76a66ad920522a76fb9d916ac535c631011aab2e47e1103970676
|
| 3 |
+
size 61224144
|
modules.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"idx": 0,
|
| 4 |
+
"name": "0",
|
| 5 |
+
"path": "",
|
| 6 |
+
"type": "sentence_transformers.models.Transformer"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"idx": 1,
|
| 10 |
+
"name": "1",
|
| 11 |
+
"path": "1_Dense",
|
| 12 |
+
"type": "pylate.models.Dense.Dense"
|
| 13 |
+
}
|
| 14 |
+
]
|
scatter_plot_performance_size_15m.png
ADDED
|
Git LFS Details
|
sentence_bert_config.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"max_seq_length": 255,
|
| 3 |
+
"do_lower_case": false
|
| 4 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": {
|
| 3 |
+
"content": "[CLS]",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"mask_token": {
|
| 10 |
+
"content": "[MASK]",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": "[MASK]",
|
| 17 |
+
"sep_token": {
|
| 18 |
+
"content": "[SEP]",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"unk_token": {
|
| 25 |
+
"content": "[UNK]",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
},
|
| 43 |
+
"30522": {
|
| 44 |
+
"content": "[Q] ",
|
| 45 |
+
"lstrip": false,
|
| 46 |
+
"normalized": true,
|
| 47 |
+
"rstrip": false,
|
| 48 |
+
"single_word": false,
|
| 49 |
+
"special": false
|
| 50 |
+
},
|
| 51 |
+
"30523": {
|
| 52 |
+
"content": "[D] ",
|
| 53 |
+
"lstrip": false,
|
| 54 |
+
"normalized": true,
|
| 55 |
+
"rstrip": false,
|
| 56 |
+
"single_word": false,
|
| 57 |
+
"special": false
|
| 58 |
+
}
|
| 59 |
+
},
|
| 60 |
+
"clean_up_tokenization_spaces": true,
|
| 61 |
+
"cls_token": "[CLS]",
|
| 62 |
+
"do_basic_tokenize": true,
|
| 63 |
+
"do_lower_case": true,
|
| 64 |
+
"extra_special_tokens": {},
|
| 65 |
+
"mask_token": "[MASK]",
|
| 66 |
+
"max_length": 299,
|
| 67 |
+
"model_max_length": 299,
|
| 68 |
+
"never_split": null,
|
| 69 |
+
"pad_to_multiple_of": null,
|
| 70 |
+
"pad_token": "[MASK]",
|
| 71 |
+
"pad_token_type_id": 0,
|
| 72 |
+
"padding_side": "right",
|
| 73 |
+
"sep_token": "[SEP]",
|
| 74 |
+
"stride": 0,
|
| 75 |
+
"strip_accents": null,
|
| 76 |
+
"tokenize_chinese_chars": true,
|
| 77 |
+
"tokenizer_class": "BertTokenizer",
|
| 78 |
+
"truncation_side": "right",
|
| 79 |
+
"truncation_strategy": "longest_first",
|
| 80 |
+
"unk_token": "[UNK]"
|
| 81 |
+
}
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|