File size: 2,719 Bytes
6a6a2dd c4c109c 6a6a2dd c4c109c 6a6a2dd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
---
license: mit
---
# Marqo Chimera Arctic bge M
This is a chimera model which concatenates embeddings from [Snowflake/snowflake-arctic-embed-m-v1.5](https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5) and [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). This model produces an embedding with 1536 dimensions (768+768) and has a total of 218M parameters (109+109).
## Usage
```python
import torch
from torch.nn.functional import normalize
from transformers import AutoModel, AutoTokenizer
# Load the model and tokenizer.
tokenizer = AutoTokenizer.from_pretrained("Marqo/marqo-chimera-arctic-bge-m")
model = AutoModel.from_pretrained("Marqo/marqo-chimera-arctic-bge-m", trust_remote_code=True)
model.eval()
# Model constants.
query_prefix = 'Represent this sentence for searching relevant passages: '
# Your queries and docs.
queries = [
"What is vector search?",
"Where can I get the best pizza?"
]
documents = [
"Marqo is an end-to-end platform for embedding training and retrieval.",
"Definitely Naples! The birthplace of pizza, and it’s as authentic as it gets."
]
# Add query prefix and tokenize queries and docs.
queries_with_prefix = [f"{query_prefix}{q}" for q in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Use the model to generate text embeddings.
with torch.inference_mode():
query_embeddings = model(**query_tokens)
document_embeddings = model(**document_tokens)
# Remember to normalize embeddings.
query_embeddings = normalize(query_embeddings)
document_embeddings = normalize(document_embeddings)
# Scores via dotproduct.
scores = query_embeddings @ document_embeddings.T
# Pretty-print the results.
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
print(f'Query: "{query}"')
for document, score in doc_score_pairs:
print(f'Score: {score:.4f} | Document: "{document}"')
print()
# Query: "What is vector search?"
# Score: 0.4194 | Document: "Marqo is an end-to-end platform for embedding training and retrieval."
# Score: 0.1853 | Document: "Definitely Naples! The birthplace of pizza, and it’s as authentic as it gets."
# Query: "Where can I get the best pizza?"
# Score: 0.6144 | Document: "Definitely Naples! The birthplace of pizza, and it’s as authentic as it gets."
# Score: 0.2787 | Document: "Marqo is an end-to-end platform for embedding training and retrieval."
```
|