AlyxTeam's picture
feat: 初始代码
c6dbb9f
import spaces
import gradio as gr
from numpy.linalg import norm
from transformers import AutoModel, AutoTokenizer, AutoConfig
from sentence_transformers import SentenceTransformer
import torch
cos_sim = lambda a,b: (a @ b.T) / (norm(a)*norm(b))
model1 = AutoModel.from_pretrained("jinaai/jina-embeddings-v2-base-code", trust_remote_code=True)
model2 = AutoModel.from_pretrained("jinaai/jina-embeddings-v2-base-en", trust_remote_code=True)
model3 = AutoModel.from_pretrained("jinaai/jina-embeddings-v2-base-zh", trust_remote_code=True)
model4 = SentenceTransformer("aspire/acge_text_embedding")
model5 = SentenceTransformer("intfloat/multilingual-e5-large")
# 对于 Salesforce/codet5p-110m-embedding 模型,我们需要特殊处理
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codet5p-110m-embedding", trust_remote_code=True)
model6 = AutoModel.from_pretrained("Salesforce/codet5p-110m-embedding", trust_remote_code=True)
@spaces.GPU
def generate(query1, query2, source_code):
if len(query1) < 1:
query1 = "How do I access the index while iterating over a sequence with a for loop?"
if len(query2) < 1:
query2 = "get a list of all the keys in a dictionary"
if len(source_code) < 1:
source_code = "# Use the built-in enumerator\nfor idx, x in enumerate(xs):\n print(idx, x)"
results = []
model_names = ["jinaai/jina-embeddings-v2-base-code", "jinaai/jina-embeddings-v2-base-en", "jinaai/jina-embeddings-v2-base-zh", "aspire/acge_text_embedding", "intfloat/multilingual-e5-large", "Salesforce/codet5p-110m-embedding"]
for model, name in zip([model1, model2, model3, model4, model5], model_names[:-1]):
embeddings = model.encode([query1, query2, source_code])
score1 = cos_sim(embeddings[0], embeddings[2])
score2 = cos_sim(embeddings[1], embeddings[2])
results.append([name, float(score1), float(score2)])
# 特殊处理 Salesforce/codet5p-110m-embedding 模型
inputs = tokenizer([query1, query2, source_code], padding=True, truncation=True, return_tensors="pt")
with torch.no_grad():
embeddings = model6(**inputs) # 这里直接返回嵌入向量
embeddings = embeddings.cpu().numpy() # 转换为 NumPy 数组
score1 = cos_sim(embeddings[0], embeddings[2])
score2 = cos_sim(embeddings[1], embeddings[2])
results.append([model_names[-1], float(score1), float(score2)])
return results
gr.Interface(
fn=generate,
inputs=[
gr.Text(label="query1", placeholder="How do I access the index while iterating over a sequence with a for loop?"),
gr.Text(label="query2", placeholder="get a list of all the keys in a dictionary"),
gr.Text(label="code", placeholder="# Use the built-in enumerator\nfor idx, x in enumerate(xs):\n print(idx, x)"),
],
outputs=[
gr.Dataframe(
headers=["Model", "Query1 Score", "Query2 Score"],
label="Similarity Scores",
)
],
).launch()
# gr.load("models/jinaai/jina-embeddings-v2-base-code").launch()