ink-pad commited on
Commit
1877f41
1 Parent(s): d3ad5e1

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +61 -2
README.md CHANGED
@@ -156,7 +156,7 @@ with torch.no_grad():
156
  label, prob_of_risk = parse_output(output, input_len)
157
 
158
  print(f"# risk detected? : {label}") # Yes
159
- print(f"# probability of risk: {prob_of_risk:.3f}") # 0.990
160
 
161
  # Usage 2: Example for Hallucination risks in RAG (risk_name=groundedness passed through guardian_config)
162
 
@@ -184,7 +184,66 @@ with torch.no_grad():
184
 
185
  label, prob_of_risk = parse_output(output, input_len)
186
  print(f"# risk detected? : {label}") # Yes
187
- print(f"# probability of risk: {prob_of_risk:.3f}") # 0.998
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  ```
189
 
190
  ### Prompt Template
 
156
  label, prob_of_risk = parse_output(output, input_len)
157
 
158
  print(f"# risk detected? : {label}") # Yes
159
+ print(f"# probability of risk: {prob_of_risk:.3f}") # 0.905
160
 
161
  # Usage 2: Example for Hallucination risks in RAG (risk_name=groundedness passed through guardian_config)
162
 
 
184
 
185
  label, prob_of_risk = parse_output(output, input_len)
186
  print(f"# risk detected? : {label}") # Yes
187
+ print(f"# probability of risk: {prob_of_risk:.3f}") # 0.997
188
+
189
+ # Usage 3: Example for hallucination risks in function call (risk_name=function_call passed through guardian_config)
190
+
191
+ tools = [
192
+ {
193
+ "name": "comment_list",
194
+ "description": "Fetches a list of comments for a specified IBM video using the given API.",
195
+ "parameters": {
196
+ "aweme_id": {
197
+ "description": "The ID of the IBM video.",
198
+ "type": "int",
199
+ "default": "7178094165614464282"
200
+ },
201
+ "cursor": {
202
+ "description": "The cursor for pagination to get the next page of comments. Defaults to 0.",
203
+ "type": "int, optional",
204
+ "default": "0"
205
+ },
206
+ "count": {
207
+ "description": "The number of comments to fetch. Maximum is 30. Defaults to 20.",
208
+ "type": "int, optional",
209
+ "default": "20"
210
+ }
211
+ }
212
+ }
213
+ ]
214
+ user_text = "Fetch the first 15 comments for the IBM video with ID 456789123."
215
+ response_text = [
216
+ {
217
+ "name": "comment_list",
218
+ "arguments": {
219
+ "video_id": 456789123,
220
+ "count": 15
221
+ }
222
+ }
223
+ ]
224
+
225
+ messages = [{"role": "tools", "content": tools}, {"role": "user", "content": user_text}, {"role": "assistant", "content": response_text}]
226
+ guardian_config = {"risk_name": "function_call"}
227
+ input_ids = tokenizer.apply_chat_template(
228
+ messages, guardian_config = guardian_config, add_generation_prompt=True, return_tensors="pt"
229
+ ).to(model.device)
230
+ input_len = input_ids.shape[1]
231
+
232
+ model.eval()
233
+
234
+ with torch.no_grad():
235
+ output = model.generate(
236
+ input_ids,
237
+ do_sample=False,
238
+ max_new_tokens=20,
239
+ return_dict_in_generate=True,
240
+ output_scores=True,
241
+ )
242
+
243
+ label, prob_of_risk = parse_output(output, input_len)
244
+ print(f"# risk detected? : {label}") # Yes
245
+ print(f"# probability of risk: {prob_of_risk:.3f}") # 0.651
246
+
247
  ```
248
 
249
  ### Prompt Template