Tonic commited on
Commit
820ce9a
1 Parent(s): 06c7ac0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +109 -2
README.md CHANGED
@@ -36,7 +36,7 @@ specifically to improve our understanding of outreach and communication.
36
  ### Model Sources [optional]
37
 
38
  - **Repository:** [Tonic/stablemed](https://huggingface.co/Tonic/stablemed)
39
- - **Demo [optional]:** [More Information Needed]
40
 
41
  ## Uses
42
 
@@ -83,7 +83,114 @@ DO NOT USE THIS MODEL WITHOUT FURTHER FINETUNING
83
 
84
  Use the code below to get started with the model.
85
 
86
- [More Information Needed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
  ## Training Details
89
 
 
36
  ### Model Sources [optional]
37
 
38
  - **Repository:** [Tonic/stablemed](https://huggingface.co/Tonic/stablemed)
39
+ - **Demo :** [Tonic/StableMed_Chat](https://huggingface.co/Tonic/StableMed_Chat)
40
 
41
  ## Uses
42
 
 
83
 
84
  Use the code below to get started with the model.
85
 
86
+ ```Python
87
+ from transformers import AutoTokenizer, MistralForCausalLM
88
+ import torch
89
+ import gradio as gr
90
+ import random
91
+ from textwrap import wrap
92
+ from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM
93
+ from peft import PeftModel, PeftConfig
94
+ import torch
95
+ import gradio as gr
96
+ import os
97
+
98
+ hf_token = os.environ.get('HUGGINGFACE_TOKEN')
99
+
100
+ # Functions to Wrap the Prompt Correctly
101
+ def wrap_text(text, width=90):
102
+ lines = text.split('\n')
103
+ wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
104
+ wrapped_text = '\n'.join(wrapped_lines)
105
+ return wrapped_text
106
+ def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
107
+
108
+ # Combine user input and system prompt
109
+ formatted_input = f"[INSTRUCTION]{system_prompt}[QUESTION]{user_input}"
110
+
111
+ # Encode the input text
112
+ encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
113
+ model_inputs = encodeds.to(device)
114
+
115
+ # Generate a response using the model
116
+ output = model.generate(
117
+ **model_inputs,
118
+ max_length=max_length,
119
+ use_cache=True,
120
+ early_stopping=True,
121
+ bos_token_id=model.config.bos_token_id,
122
+ eos_token_id=model.config.eos_token_id,
123
+ pad_token_id=model.config.eos_token_id,
124
+ temperature=0.1,
125
+ do_sample=True
126
+ )
127
+
128
+ # Decode the response
129
+ response_text = tokenizer.decode(output[0], skip_special_tokens=True)
130
+
131
+ return response_text
132
+
133
+ # Define the device
134
+ device = "cuda" if torch.cuda.is_available() else "cpu"
135
+
136
+ # Use the base model's ID
137
+ base_model_id = "stabilityai/stablelm-3b-4e1t"
138
+ model_directory = "Tonic/stablemed"
139
+
140
+ # Instantiate the Tokenizer
141
+ tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t", trust_remote_code=True, padding_side="left")
142
+ # tokenizer = AutoTokenizer.from_pretrained("Tonic/stablemed", trust_remote_code=True, padding_side="left")
143
+ tokenizer.pad_token = tokenizer.eos_token
144
+ tokenizer.padding_side = 'left'
145
+
146
+ # Load the PEFT model
147
+ peft_config = PeftConfig.from_pretrained("Tonic/stablemed", token=hf_token)
148
+ peft_model = MistralForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t", trust_remote_code=True)
149
+ peft_model = PeftModel.from_pretrained(peft_model, "Tonic/stablemed", token=hf_token)
150
+
151
+ class ChatBot:
152
+ def __init__(self):
153
+ self.history = []
154
+
155
+ def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
156
+ # Combine user input and system prompt
157
+ formatted_input = f"[INSTRUCTION:]{system_prompt}[QUESTION:] {user_input}"
158
+
159
+ # Encode user input
160
+ user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
161
+
162
+ # Concatenate the user input with chat history
163
+ if len(self.history) > 0:
164
+ chat_history_ids = torch.cat([self.history, user_input_ids], dim=-1)
165
+ else:
166
+ chat_history_ids = user_input_ids
167
+
168
+ # Generate a response using the PEFT model
169
+ response = peft_model.generate(input_ids=chat_history_ids, max_length=400, pad_token_id=tokenizer.eos_token_id)
170
+
171
+ # Update chat history
172
+ self.history = chat_history_ids
173
+
174
+ # Decode and return the response
175
+ response_text = tokenizer.decode(response[0], skip_special_tokens=True)
176
+ return response_text
177
+
178
+ bot = ChatBot()
179
+
180
+ title = "👋🏻Welcome to Tonic's StableMed Chat🚀"
181
+ description = "You can use this Space to test out the current model [StableMed](https://huggingface.co/Tonic/stablemed) or You can also use 😷StableMed⚕️ on your own data & in your own way by cloning this space. 🧬🔬🔍 Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)" "
182
+ examples = [["What is the proper treatment for buccal herpes?", "Please provide information on the most effective antiviral medications and home remedies for treating buccal herpes."]]
183
+ iface = gr.Interface(
184
+ fn=bot.predict,
185
+ title=title,
186
+ description=description,
187
+ examples=examples,
188
+ inputs=["text", "text"], # Take user input and system prompt separately
189
+ outputs="text",
190
+ theme="ParityError/Anime"
191
+ )
192
+ iface.launch()
193
+ ```
194
 
195
  ## Training Details
196