Tonic commited on
Commit
d519921
1 Parent(s): b99f5e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -31
app.py CHANGED
@@ -9,7 +9,7 @@ import os
9
  title = """
10
  # 👋🏻Welcome to 🙋🏻‍♂️Tonic's 🐣e5-mistral🛌🏻Embeddings """
11
  description = """
12
- You can use this Space to test out the current model [intfloat/e5-mistral-7b-instruct](https://huggingface.co/intfloat/e5-mistral-7b-instruct). e5mistral has a larger context window, a different prompting/return mechanism and generally better results than other embedding models.
13
  You can also use 🐣e5-mistral🛌🏻 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/e5?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
14
  Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [Poly](https://github.com/tonic-ai/poly) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
15
  """
@@ -45,36 +45,75 @@ def last_token_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tenso
45
  batch_size = last_hidden_states.shape[0]
46
  return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]
47
 
48
- @spaces.GPU
49
- def compute_embeddings(selected_task, input_text, system_prompt):
50
- max_length = 2042
51
- task_description = tasks[selected_task]
52
- processed_texts = [f'Instruct: {task_description}\nQuery: {input_text}']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- batch_dict = tokenizer(processed_texts, max_length=max_length - 1, return_attention_mask=False, padding=False, truncation=True)
55
- batch_dict['input_ids'] = [input_ids + [tokenizer.eos_token_id] for input_ids in batch_dict['input_ids']]
56
- batch_dict = tokenizer.pad(batch_dict, padding=True, return_attention_mask=True, return_tensors='pt')
57
- batch_dict = {k: v.to(device) for k, v in batch_dict.items()}
58
- outputs = model(**batch_dict)
59
- embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
60
- embeddings = F.normalize(embeddings, p=2, dim=1)
61
- embeddings_list = embeddings.detach().cpu().numpy().tolist()
62
- return embeddings_list
63
 
64
  def app_interface():
65
  with gr.Blocks() as demo:
66
  gr.Markdown(title)
67
  gr.Markdown(description)
68
 
69
- task_dropdown = gr.Dropdown(list(tasks.keys()), label="Select a Task", value=list(tasks.keys())[0])
70
-
71
- input_text_box = gr.Textbox(label="📖Input Text")
72
- system_prompt_box = gr.Textbox(label="🤖System Prompt (Optional)")
73
-
74
- compute_button = gr.Button("Try🐣🛌🏻e5")
75
-
76
- output_display = gr.Textbox(label="🐣e5-mistral🛌🏻")
77
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  with gr.Row():
79
  with gr.Column():
80
  system_prompt_box
@@ -83,13 +122,6 @@ def app_interface():
83
  compute_button
84
  output_display
85
 
86
- compute_button.click(
87
- fn=compute_embeddings,
88
- inputs=[task_dropdown, input_text_box],
89
- outputs=output_display
90
- )
91
-
92
-
93
  return demo
94
 
95
  # Run the Gradio app
 
9
  title = """
10
  # 👋🏻Welcome to 🙋🏻‍♂️Tonic's 🐣e5-mistral🛌🏻Embeddings """
11
  description = """
12
+ You can use this ZeroGPU Space to test out the current model [intfloat/e5-mistral-7b-instruct](https://huggingface.co/intfloat/e5-mistral-7b-instruct). 🐣e5-mistral🛌🏻 has a larger context🪟window, a different prompting/return🛠️mechanism and generally better results than other embedding models. use it via API to create embeddings or try out the sentence similarity to see how various optimization parameters affect performance.
13
  You can also use 🐣e5-mistral🛌🏻 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/e5?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
14
  Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [Poly](https://github.com/tonic-ai/poly) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
15
  """
 
45
  batch_size = last_hidden_states.shape[0]
46
  return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]
47
 
48
+ class EmbeddingModel:
49
+ def __init__(self):
50
+ self.tokenizer = AutoTokenizer.from_pretrained('intfloat/e5-mistral-7b-instruct')
51
+ self.model = AutoModel.from_pretrained('intfloat/e5-mistral-7b-instruct', torch_dtype=torch.float16, device_map=device)
52
+
53
+ @spaces.GPU
54
+ def compute_embeddings(selected_task, input_text, system_prompt):
55
+ max_length = 2042
56
+ task_description = tasks[selected_task]
57
+ processed_texts = [f'Instruct: {task_description}\nQuery: {input_text}']
58
+
59
+ batch_dict = tokenizer(processed_texts, max_length=max_length - 1, return_attention_mask=False, padding=False, truncation=True)
60
+ batch_dict['input_ids'] = [input_ids + [tokenizer.eos_token_id] for input_ids in batch_dict['input_ids']]
61
+ batch_dict = tokenizer.pad(batch_dict, padding=True, return_attention_mask=True, return_tensors='pt')
62
+ batch_dict = {k: v.to(device) for k, v in batch_dict.items()}
63
+ outputs = model(**batch_dict)
64
+ embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
65
+ embeddings = F.normalize(embeddings, p=2, dim=1)
66
+ embeddings_list = embeddings.detach().cpu().numpy().tolist()
67
+ return embeddings_list
68
+
69
+ @spaces.GPU
70
+ def compute_similarity(self, sentence1, sentence2, extra_sentence1, extra_sentence2):
71
+ # Tokenize and encode sentences
72
+ sentences = [sentence1, sentence2, extra_sentence1, extra_sentence2]
73
+ encoded_input = self.tokenizer(sentences, padding=True, truncation=True, return_tensors='pt').to(device)
74
+ with torch.no_grad():
75
+ model_output = self.model(**encoded_input)
76
+
77
+ # Compute embeddings
78
+ embeddings = last_token_pool(model_output.last_hidden_state, encoded_input['attention_mask'])
79
+ embeddings = F.normalize(embeddings, p=2, dim=1)
80
+
81
+ # Compute cosine similarity
82
+ similarity1 = F.cosine_similarity(embeddings[0].unsqueeze(0), embeddings[1].unsqueeze(0)).item()
83
+ similarity2 = F.cosine_similarity(embeddings[2].unsqueeze(0), embeddings[3].unsqueeze(0)).item()
84
+ return similarity1, similarity2
85
 
 
 
 
 
 
 
 
 
 
86
 
87
  def app_interface():
88
  with gr.Blocks() as demo:
89
  gr.Markdown(title)
90
  gr.Markdown(description)
91
 
92
+ with gr.Tab("Embedding Generation"):
93
+ task_dropdown = gr.Dropdown(list(tasks.keys()), label="Select a Task", value=list(tasks.keys())[0])
94
+ input_text_box = gr.Textbox(label="📖Input Text")
95
+ system_prompt_box = gr.Textbox(label="🤖System Prompt (Optional)")
96
+ compute_button = gr.Button("Try🐣🛌🏻e5")
97
+ output_display = gr.Textbox(label="🐣e5-mistral🛌🏻 Embeddings")
98
+ compute_button.click(
99
+ fn=embedding_model.compute_embeddings,
100
+ inputs=[task_dropdown, input_text_box, system_prompt_box],
101
+ outputs=output_display
102
+ )
103
+
104
+ with gr.Tab("Sentence Similarity"):
105
+ sentence1_box = gr.Textbox(label="Sentence 1")
106
+ sentence2_box = gr.Textbox(label="Sentence 2")
107
+ extra_sentence1_box = gr.Textbox(label="Extra Sentence 1")
108
+ extra_sentence2_box = gr.Textbox(label="Extra Sentence 2")
109
+ similarity_button = gr.Button("Compute Similarity")
110
+ similarity_output = gr.Label(label="🐣e5-mistral🛌🏻 Similarity Scores")
111
+ similarity_button.click(
112
+ fn=embedding_model.compute_similarity,
113
+ inputs=[sentence1_box, sentence2_box, extra_sentence1_box, extra_sentence2_box],
114
+ outputs=similarity_output
115
+ )
116
+
117
  with gr.Row():
118
  with gr.Column():
119
  system_prompt_box
 
122
  compute_button
123
  output_display
124
 
 
 
 
 
 
 
 
125
  return demo
126
 
127
  # Run the Gradio app