Muennighoff
commited on
Commit
•
ce464d6
1
Parent(s):
5c0da46
Scheduled Commit
Browse files
data/clustering_individual-6da8b2cf-9395-4671-ad9c-18b0374353dc.jsonl
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"tstamp": 1722349107.6275, "task_type": "clustering", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1722349094.4021, "finish": 1722349107.6275, "ip": "", "conv_id": "71fb84eb6d184d7696d7580d67aca12e", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "prompt": ["espresso", "macchiato", "latte", "cappuccino", "mocha", "Beagle", "Golden Retriever", "Incan", "Mayan", "Chinese", "Egyptian", "Mesopotamian", "Roman", "Greek"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
2 |
+
{"tstamp": 1722349107.6275, "task_type": "clustering", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1722349094.4021, "finish": 1722349107.6275, "ip": "", "conv_id": "b004b194eec141559d16af96f8bf5b4c", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": ["espresso", "macchiato", "latte", "cappuccino", "mocha", "Beagle", "Golden Retriever", "Incan", "Mayan", "Chinese", "Egyptian", "Mesopotamian", "Roman", "Greek"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
3 |
+
{"tstamp": 1722349123.0737, "task_type": "clustering", "type": "chat", "model": "text-embedding-004", "gen_params": {}, "start": 1722349122.1219, "finish": 1722349123.0737, "ip": "", "conv_id": "f35ac634a0a64ee992c7c4f5f7529f35", "model_name": "text-embedding-004", "prompt": ["claustrophobia", "nyctophobia", "Egyptian", "Norse", "Greek", "Roman", "Chinese", "Celtic", "JavaScript", "Java", "Ruby", "C++", "Swift", "Go"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
4 |
+
{"tstamp": 1722349123.0737, "task_type": "clustering", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1722349122.1219, "finish": 1722349123.0737, "ip": "", "conv_id": "36ac0867440f40bcbe842096ad5524e8", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "prompt": ["claustrophobia", "nyctophobia", "Egyptian", "Norse", "Greek", "Roman", "Chinese", "Celtic", "JavaScript", "Java", "Ruby", "C++", "Swift", "Go"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
5 |
+
{"tstamp": 1722349129.359, "task_type": "clustering", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722349128.1893, "finish": 1722349129.359, "ip": "", "conv_id": "38fa4543d8af44f8b2ab25266a87b17f", "model_name": "BAAI/bge-large-en-v1.5", "prompt": ["acrylic", "polypropylene", "PVC", "polyethylene", "polystyrene", "nylon", "boots", "high heels", "flats", "Bulldog", "Golden Retriever", "Beagle", "German Shepherd", "Labrador", "motor", "Purkinje", "romance", "mystery", "fantasy", "historical fiction", "science fiction", "thriller", "horror"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
6 |
+
{"tstamp": 1722349129.359, "task_type": "clustering", "type": "chat", "model": "text-embedding-3-large", "gen_params": {}, "start": 1722349128.1893, "finish": 1722349129.359, "ip": "", "conv_id": "42ab109889cc4445bdb08e6395c8566d", "model_name": "text-embedding-3-large", "prompt": ["acrylic", "polypropylene", "PVC", "polyethylene", "polystyrene", "nylon", "boots", "high heels", "flats", "Bulldog", "Golden Retriever", "Beagle", "German Shepherd", "Labrador", "motor", "Purkinje", "romance", "mystery", "fantasy", "historical fiction", "science fiction", "thriller", "horror"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
7 |
+
{"tstamp": 1722349169.5485, "task_type": "clustering", "type": "chat", "model": "sentence-transformers/all-MiniLM-L6-v2", "gen_params": {}, "start": 1722349169.4426, "finish": 1722349169.5485, "ip": "", "conv_id": "8046b20875784ed0bf981492978f17c2", "model_name": "sentence-transformers/all-MiniLM-L6-v2", "prompt": ["muay thai", "judo", "taekwondo", "Samsung", "Apple", "LG", "Google", "Huawei", "purple", "red", "yellow", "green", "Firefox", "Edge", "Chrome", "interneuron", "sensory", "pyramidal", "Purkinje"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
8 |
+
{"tstamp": 1722349169.5485, "task_type": "clustering", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1722349169.4426, "finish": 1722349169.5485, "ip": "", "conv_id": "99e9400aaaac4a2baa4bb0d24c60bb1a", "model_name": "Salesforce/SFR-Embedding-2_R", "prompt": ["muay thai", "judo", "taekwondo", "Samsung", "Apple", "LG", "Google", "Huawei", "purple", "red", "yellow", "green", "Firefox", "Edge", "Chrome", "interneuron", "sensory", "pyramidal", "Purkinje"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
9 |
+
{"tstamp": 1722349240.854, "task_type": "clustering", "type": "chat", "model": "embed-english-v3.0", "gen_params": {}, "start": 1722349238.8303, "finish": 1722349240.854, "ip": "", "conv_id": "cb406473f5e94c34b3e2d96012cb6192", "model_name": "embed-english-v3.0", "prompt": ["mangrove", "cloud", "temperate", "boreal", "tropical", "water filter", "camping stove", "tent", "Statue of Zeus", "Lighthouse of Alexandria", "Colossus of Rhodes", "Temple of Artemis", "Mausoleum at Halicarnassus"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
10 |
+
{"tstamp": 1722349240.854, "task_type": "clustering", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1722349238.8303, "finish": 1722349240.854, "ip": "", "conv_id": "22250beed1294272b8106fb48630e1ec", "model_name": "Salesforce/SFR-Embedding-2_R", "prompt": ["mangrove", "cloud", "temperate", "boreal", "tropical", "water filter", "camping stove", "tent", "Statue of Zeus", "Lighthouse of Alexandria", "Colossus of Rhodes", "Temple of Artemis", "Mausoleum at Halicarnassus"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
data/retrieval_battle-6da8b2cf-9395-4671-ad9c-18b0374353dc.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{"tstamp": 1722349081.3692, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "d625d13736af4b328337a116f70ad360", "0_model_name": "BAAI/bge-large-en-v1.5", "0_prompt": "a great benchmark for text embeddings", "0_output": [["a great benchmark for text embeddings", "Title: MTEB: Massive Text Embedding Benchmark\n\nAbstract: Text embeddings are commonly evaluated on a small set of datasets from a single task not covering their possible applications to other tasks. It is unclear whether state-of-the-art embeddings on semantic textual similarity (STS) can be equally well applied to other tasks like clustering or reranking. This makes progress in the field difficult to track, as various models are constantly being proposed without proper evaluation. To solve this problem, we introduce the Massive Text Embedding Benchmark (MTEB). MTEB spans 8 embedding tasks covering a total of 58 datasets and 112 languages. Through the benchmarking of 33 models on MTEB, we establish the most comprehensive benchmark of text embeddings to date. We find that no particular text embedding method dominates across all tasks. This suggests that the field has yet to converge on a universal text embedding method and scale it up sufficiently to provide state-of-the-art results on all embedding tasks. MTEB comes with open-source code and a public leaderboard at https://github.com/embeddings-benchmark/mteb."]], "0_corpus": "arxiv", "1_conv_id": "94a7f8f55a3b4c4e9b41f32724665995", "1_model_name": "intfloat/e5-mistral-7b-instruct", "1_prompt": "a great benchmark for text embeddings", "1_output": [["a great benchmark for text embeddings", "Title: Improving Text Embeddings with Large Language Models\n\nAbstract: In this paper, we introduce a novel and simple method for obtaining high-quality text embeddings using only synthetic data and less than 1k training steps. Unlike existing methods that often depend on multi-stage intermediate pre-training with billions of weakly-supervised text pairs, followed by fine-tuning with a few labeled datasets, our method does not require building complex training pipelines or relying on manually collected datasets that are often constrained by task diversity and language coverage. We leverage proprietary LLMs to generate diverse synthetic data for hundreds of thousands of text embedding tasks across 93 languages. We then fine-tune open-source decoder-only LLMs on the synthetic data using standard contrastive loss. Experiments demonstrate that our method achieves strong performance on highly competitive text embedding benchmarks without using any labeled data. Furthermore, when fine-tuned with a mixture of synthetic and labeled data, our model sets new state-of-the-art results on the BEIR and MTEB benchmarks."]], "1_corpus": "arxiv"}
|
2 |
+
{"tstamp": 1722349157.5755, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "9a382599a6d6464195ad3be824d8f9a9", "0_model_name": "text-embedding-3-large", "0_prompt": "a great benchmark for text embeddings", "0_output": [["a great benchmark for text embeddings", "Title: MTEB: Massive Text Embedding Benchmark\n\nAbstract: Text embeddings are commonly evaluated on a small set of datasets from a single task not covering their possible applications to other tasks. It is unclear whether state-of-the-art embeddings on semantic textual similarity (STS) can be equally well applied to other tasks like clustering or reranking. This makes progress in the field difficult to track, as various models are constantly being proposed without proper evaluation. To solve this problem, we introduce the Massive Text Embedding Benchmark (MTEB). MTEB spans 8 embedding tasks covering a total of 58 datasets and 112 languages. Through the benchmarking of 33 models on MTEB, we establish the most comprehensive benchmark of text embeddings to date. We find that no particular text embedding method dominates across all tasks. This suggests that the field has yet to converge on a universal text embedding method and scale it up sufficiently to provide state-of-the-art results on all embedding tasks. MTEB comes with open-source code and a public leaderboard at https://github.com/embeddings-benchmark/mteb."]], "0_corpus": "arxiv", "1_conv_id": "5d9c5311eab24ce3b4e8a86a4a95c56d", "1_model_name": "Salesforce/SFR-Embedding-2_R", "1_prompt": "a great benchmark for text embeddings", "1_output": [["a great benchmark for text embeddings", "Title: MTEB: Massive Text Embedding Benchmark\n\nAbstract: Text embeddings are commonly evaluated on a small set of datasets from a single task not covering their possible applications to other tasks. It is unclear whether state-of-the-art embeddings on semantic textual similarity (STS) can be equally well applied to other tasks like clustering or reranking. This makes progress in the field difficult to track, as various models are constantly being proposed without proper evaluation. To solve this problem, we introduce the Massive Text Embedding Benchmark (MTEB). MTEB spans 8 embedding tasks covering a total of 58 datasets and 112 languages. Through the benchmarking of 33 models on MTEB, we establish the most comprehensive benchmark of text embeddings to date. We find that no particular text embedding method dominates across all tasks. This suggests that the field has yet to converge on a universal text embedding method and scale it up sufficiently to provide state-of-the-art results on all embedding tasks. MTEB comes with open-source code and a public leaderboard at https://github.com/embeddings-benchmark/mteb."]], "1_corpus": "arxiv"}
|
3 |
+
{"tstamp": 1722349224.8007, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "6d655f5306ab4af996d219e94482c453", "0_model_name": "Salesforce/SFR-Embedding-2_R", "0_prompt": "a great benchmark for text embeddings", "0_output": [["a great benchmark for text embeddings", "Title: MTEB: Massive Text Embedding Benchmark\n\nAbstract: Text embeddings are commonly evaluated on a small set of datasets from a single task not covering their possible applications to other tasks. It is unclear whether state-of-the-art embeddings on semantic textual similarity (STS) can be equally well applied to other tasks like clustering or reranking. This makes progress in the field difficult to track, as various models are constantly being proposed without proper evaluation. To solve this problem, we introduce the Massive Text Embedding Benchmark (MTEB). MTEB spans 8 embedding tasks covering a total of 58 datasets and 112 languages. Through the benchmarking of 33 models on MTEB, we establish the most comprehensive benchmark of text embeddings to date. We find that no particular text embedding method dominates across all tasks. This suggests that the field has yet to converge on a universal text embedding method and scale it up sufficiently to provide state-of-the-art results on all embedding tasks. MTEB comes with open-source code and a public leaderboard at https://github.com/embeddings-benchmark/mteb."]], "0_corpus": "arxiv", "1_conv_id": "7775a304716743ed9ffdd51a63c5f726", "1_model_name": "voyage-multilingual-2", "1_prompt": "a great benchmark for text embeddings", "1_output": [["a great benchmark for text embeddings", "Title: Word Emdeddings through Hellinger PCA\n\nAbstract: Word embeddings resulting from neural language models have been shown to be successful for a large variety of NLP tasks. However, such architecture might be difficult to train and time-consuming. Instead, we propose to drastically simplify the word embeddings computation through a Hellinger PCA of the word co-occurence matrix. We compare those new word embeddings with some well-known embeddings on NER and movie review tasks and show that we can reach similar or even better performance. Although deep learning is not really necessary for generating good word embeddings, we show that it can provide an easy way to adapt embeddings to specific tasks."]], "1_corpus": "arxiv"}
|
data/retrieval_individual-6da8b2cf-9395-4671-ad9c-18b0374353dc.jsonl
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"tstamp": 1722349050.1628, "task_type": "retrieval", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1722349048.5774, "finish": 1722349050.1628, "ip": "", "conv_id": "29dcf1c6c581401ab3925c4342f4ce00", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": "a great benchmark for text embeddings", "output": [["a great benchmark for text embeddings", "Title: Improving Text Embeddings with Large Language Models\n\nAbstract: In this paper, we introduce a novel and simple method for obtaining high-quality text embeddings using only synthetic data and less than 1k training steps. Unlike existing methods that often depend on multi-stage intermediate pre-training with billions of weakly-supervised text pairs, followed by fine-tuning with a few labeled datasets, our method does not require building complex training pipelines or relying on manually collected datasets that are often constrained by task diversity and language coverage. We leverage proprietary LLMs to generate diverse synthetic data for hundreds of thousands of text embedding tasks across 93 languages. We then fine-tune open-source decoder-only LLMs on the synthetic data using standard contrastive loss. Experiments demonstrate that our method achieves strong performance on highly competitive text embedding benchmarks without using any labeled data. Furthermore, when fine-tuned with a mixture of synthetic and labeled data, our model sets new state-of-the-art results on the BEIR and MTEB benchmarks."]], "corpus": "arxiv"}
|
2 |
+
{"tstamp": 1722349050.1628, "task_type": "retrieval", "type": "chat", "model": "voyage-multilingual-2", "gen_params": {}, "start": 1722349048.5774, "finish": 1722349050.1628, "ip": "", "conv_id": "7d2758394c77497f99f38b88561d970b", "model_name": "voyage-multilingual-2", "prompt": "a great benchmark for text embeddings", "output": [["a great benchmark for text embeddings", "Title: Word Emdeddings through Hellinger PCA\n\nAbstract: Word embeddings resulting from neural language models have been shown to be successful for a large variety of NLP tasks. However, such architecture might be difficult to train and time-consuming. Instead, we propose to drastically simplify the word embeddings computation through a Hellinger PCA of the word co-occurence matrix. We compare those new word embeddings with some well-known embeddings on NER and movie review tasks and show that we can reach similar or even better performance. Although deep learning is not really necessary for generating good word embeddings, we show that it can provide an easy way to adapt embeddings to specific tasks."]], "corpus": "arxiv"}
|
3 |
+
{"tstamp": 1722349076.2764, "task_type": "retrieval", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722349075.982, "finish": 1722349076.2764, "ip": "", "conv_id": "d625d13736af4b328337a116f70ad360", "model_name": "BAAI/bge-large-en-v1.5", "prompt": "a great benchmark for text embeddings", "output": [["a great benchmark for text embeddings", "Title: MTEB: Massive Text Embedding Benchmark\n\nAbstract: Text embeddings are commonly evaluated on a small set of datasets from a single task not covering their possible applications to other tasks. It is unclear whether state-of-the-art embeddings on semantic textual similarity (STS) can be equally well applied to other tasks like clustering or reranking. This makes progress in the field difficult to track, as various models are constantly being proposed without proper evaluation. To solve this problem, we introduce the Massive Text Embedding Benchmark (MTEB). MTEB spans 8 embedding tasks covering a total of 58 datasets and 112 languages. Through the benchmarking of 33 models on MTEB, we establish the most comprehensive benchmark of text embeddings to date. We find that no particular text embedding method dominates across all tasks. This suggests that the field has yet to converge on a universal text embedding method and scale it up sufficiently to provide state-of-the-art results on all embedding tasks. MTEB comes with open-source code and a public leaderboard at https://github.com/embeddings-benchmark/mteb."]], "corpus": "arxiv"}
|
4 |
+
{"tstamp": 1722349076.2764, "task_type": "retrieval", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1722349075.982, "finish": 1722349076.2764, "ip": "", "conv_id": "94a7f8f55a3b4c4e9b41f32724665995", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": "a great benchmark for text embeddings", "output": [["a great benchmark for text embeddings", "Title: Improving Text Embeddings with Large Language Models\n\nAbstract: In this paper, we introduce a novel and simple method for obtaining high-quality text embeddings using only synthetic data and less than 1k training steps. Unlike existing methods that often depend on multi-stage intermediate pre-training with billions of weakly-supervised text pairs, followed by fine-tuning with a few labeled datasets, our method does not require building complex training pipelines or relying on manually collected datasets that are often constrained by task diversity and language coverage. We leverage proprietary LLMs to generate diverse synthetic data for hundreds of thousands of text embedding tasks across 93 languages. We then fine-tune open-source decoder-only LLMs on the synthetic data using standard contrastive loss. Experiments demonstrate that our method achieves strong performance on highly competitive text embedding benchmarks without using any labeled data. Furthermore, when fine-tuned with a mixture of synthetic and labeled data, our model sets new state-of-the-art results on the BEIR and MTEB benchmarks."]], "corpus": "arxiv"}
|
5 |
+
{"tstamp": 1722349153.9565, "task_type": "retrieval", "type": "chat", "model": "text-embedding-3-large", "gen_params": {}, "start": 1722349153.4182, "finish": 1722349153.9565, "ip": "", "conv_id": "9a382599a6d6464195ad3be824d8f9a9", "model_name": "text-embedding-3-large", "prompt": "a great benchmark for text embeddings", "output": [["a great benchmark for text embeddings", "Title: MTEB: Massive Text Embedding Benchmark\n\nAbstract: Text embeddings are commonly evaluated on a small set of datasets from a single task not covering their possible applications to other tasks. It is unclear whether state-of-the-art embeddings on semantic textual similarity (STS) can be equally well applied to other tasks like clustering or reranking. This makes progress in the field difficult to track, as various models are constantly being proposed without proper evaluation. To solve this problem, we introduce the Massive Text Embedding Benchmark (MTEB). MTEB spans 8 embedding tasks covering a total of 58 datasets and 112 languages. Through the benchmarking of 33 models on MTEB, we establish the most comprehensive benchmark of text embeddings to date. We find that no particular text embedding method dominates across all tasks. This suggests that the field has yet to converge on a universal text embedding method and scale it up sufficiently to provide state-of-the-art results on all embedding tasks. MTEB comes with open-source code and a public leaderboard at https://github.com/embeddings-benchmark/mteb."]], "corpus": "arxiv"}
|
6 |
+
{"tstamp": 1722349153.9565, "task_type": "retrieval", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1722349153.4182, "finish": 1722349153.9565, "ip": "", "conv_id": "5d9c5311eab24ce3b4e8a86a4a95c56d", "model_name": "Salesforce/SFR-Embedding-2_R", "prompt": "a great benchmark for text embeddings", "output": [["a great benchmark for text embeddings", "Title: MTEB: Massive Text Embedding Benchmark\n\nAbstract: Text embeddings are commonly evaluated on a small set of datasets from a single task not covering their possible applications to other tasks. It is unclear whether state-of-the-art embeddings on semantic textual similarity (STS) can be equally well applied to other tasks like clustering or reranking. This makes progress in the field difficult to track, as various models are constantly being proposed without proper evaluation. To solve this problem, we introduce the Massive Text Embedding Benchmark (MTEB). MTEB spans 8 embedding tasks covering a total of 58 datasets and 112 languages. Through the benchmarking of 33 models on MTEB, we establish the most comprehensive benchmark of text embeddings to date. We find that no particular text embedding method dominates across all tasks. This suggests that the field has yet to converge on a universal text embedding method and scale it up sufficiently to provide state-of-the-art results on all embedding tasks. MTEB comes with open-source code and a public leaderboard at https://github.com/embeddings-benchmark/mteb."]], "corpus": "arxiv"}
|
7 |
+
{"tstamp": 1722349221.6561, "task_type": "retrieval", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1722349221.2733, "finish": 1722349221.6561, "ip": "", "conv_id": "6d655f5306ab4af996d219e94482c453", "model_name": "Salesforce/SFR-Embedding-2_R", "prompt": "a great benchmark for text embeddings", "output": [["a great benchmark for text embeddings", "Title: MTEB: Massive Text Embedding Benchmark\n\nAbstract: Text embeddings are commonly evaluated on a small set of datasets from a single task not covering their possible applications to other tasks. It is unclear whether state-of-the-art embeddings on semantic textual similarity (STS) can be equally well applied to other tasks like clustering or reranking. This makes progress in the field difficult to track, as various models are constantly being proposed without proper evaluation. To solve this problem, we introduce the Massive Text Embedding Benchmark (MTEB). MTEB spans 8 embedding tasks covering a total of 58 datasets and 112 languages. Through the benchmarking of 33 models on MTEB, we establish the most comprehensive benchmark of text embeddings to date. We find that no particular text embedding method dominates across all tasks. This suggests that the field has yet to converge on a universal text embedding method and scale it up sufficiently to provide state-of-the-art results on all embedding tasks. MTEB comes with open-source code and a public leaderboard at https://github.com/embeddings-benchmark/mteb."]], "corpus": "arxiv"}
|
8 |
+
{"tstamp": 1722349221.6561, "task_type": "retrieval", "type": "chat", "model": "voyage-multilingual-2", "gen_params": {}, "start": 1722349221.2733, "finish": 1722349221.6561, "ip": "", "conv_id": "7775a304716743ed9ffdd51a63c5f726", "model_name": "voyage-multilingual-2", "prompt": "a great benchmark for text embeddings", "output": [["a great benchmark for text embeddings", "Title: Word Emdeddings through Hellinger PCA\n\nAbstract: Word embeddings resulting from neural language models have been shown to be successful for a large variety of NLP tasks. However, such architecture might be difficult to train and time-consuming. Instead, we propose to drastically simplify the word embeddings computation through a Hellinger PCA of the word co-occurence matrix. We compare those new word embeddings with some well-known embeddings on NER and movie review tasks and show that we can reach similar or even better performance. Although deep learning is not really necessary for generating good word embeddings, we show that it can provide an easy way to adapt embeddings to specific tasks."]], "corpus": "arxiv"}
|
data/sts_individual-6da8b2cf-9395-4671-ad9c-18b0374353dc.jsonl
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
{"tstamp": 1722349107.4436, "task_type": "sts", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1722349107.0493, "finish": 1722349107.4436, "ip": "", "conv_id": "16b419689a394142becf978eb601da69", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "txt0": "People are surfing.", "txt1": "Three people on bikes are coming over a ridge, while a man in orange watches.", "txt2": "People are biking outdoors.", "output": ""}
|
2 |
+
{"tstamp": 1722349107.4436, "task_type": "sts", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722349107.0493, "finish": 1722349107.4436, "ip": "", "conv_id": "3e82bbab1907447d8df397dbfb175f5d", "model_name": "BAAI/bge-large-en-v1.5", "txt0": "People are surfing.", "txt1": "Three people on bikes are coming over a ridge, while a man in orange watches.", "txt2": "People are biking outdoors.", "output": ""}
|