* move models
Browse files
app.py
CHANGED
@@ -59,16 +59,16 @@ def convert_elapsed_time(diff_time) -> float:
|
|
59 |
|
60 |
|
61 |
deepset_classifier = init_prompt_injection_model(
|
62 |
-
"
|
63 |
) # ONNX version of deepset/deberta-v3-base-injection
|
64 |
-
|
65 |
fmops_classifier = init_prompt_injection_model(
|
66 |
-
"
|
67 |
) # ONNX version of fmops/distilbert-prompt-injection
|
68 |
|
69 |
|
70 |
def detect_hf(
|
71 |
-
prompt: str, threshold: float = 0.5, classifier=
|
72 |
) -> (bool, bool):
|
73 |
try:
|
74 |
pi_result = classifier(prompt)
|
@@ -85,8 +85,8 @@ def detect_hf(
|
|
85 |
return False, False
|
86 |
|
87 |
|
88 |
-
def
|
89 |
-
return detect_hf(prompt, classifier=
|
90 |
|
91 |
|
92 |
def detect_hf_deepset(prompt: str) -> (bool, bool):
|
@@ -156,7 +156,7 @@ def detect_azure(prompt: str) -> (bool, bool):
|
|
156 |
|
157 |
|
158 |
detection_providers = {
|
159 |
-
"
|
160 |
"Deepset (HF model)": detect_hf_deepset,
|
161 |
"FMOps (HF model)": detect_hf_fmops,
|
162 |
"Lakera Guard": detect_lakera,
|
@@ -196,7 +196,7 @@ def execute(prompt: str) -> List[Union[str, bool, float]]:
|
|
196 |
hf_api.upload_file(
|
197 |
path_or_fileobj=fileobj,
|
198 |
path_in_repo=result_path,
|
199 |
-
repo_id="
|
200 |
repo_type="dataset",
|
201 |
)
|
202 |
logger.info(f"Stored prompt: {prompt}")
|
@@ -236,7 +236,7 @@ if __name__ == "__main__":
|
|
236 |
"<br /><br />"
|
237 |
"HuggingFace (HF) models are hosted on Spaces while other providers are called as APIs.<br /><br />"
|
238 |
"<a href=\"https://join.slack.com/t/laiyerai/shared_invite/zt-28jv3ci39-sVxXrLs3rQdaN3mIl9IT~w\">Join our Slack community to discuss LLM Security</a><br />"
|
239 |
-
"<a href=\"https://github.com/
|
240 |
examples=[
|
241 |
[
|
242 |
example,
|
|
|
59 |
|
60 |
|
61 |
deepset_classifier = init_prompt_injection_model(
|
62 |
+
"ProtectAI/deberta-v3-base-injection-onnx"
|
63 |
) # ONNX version of deepset/deberta-v3-base-injection
|
64 |
+
protectai_classifier = init_prompt_injection_model("ProtectAI/deberta-v3-base-prompt-injection", "onnx")
|
65 |
fmops_classifier = init_prompt_injection_model(
|
66 |
+
"ProtectAI/fmops-distilbert-prompt-injection-onnx"
|
67 |
) # ONNX version of fmops/distilbert-prompt-injection
|
68 |
|
69 |
|
70 |
def detect_hf(
|
71 |
+
prompt: str, threshold: float = 0.5, classifier=protectai_classifier, label: str = "INJECTION"
|
72 |
) -> (bool, bool):
|
73 |
try:
|
74 |
pi_result = classifier(prompt)
|
|
|
85 |
return False, False
|
86 |
|
87 |
|
88 |
+
def detect_hf_protectai(prompt: str) -> (bool, bool):
|
89 |
+
return detect_hf(prompt, classifier=protectai_classifier)
|
90 |
|
91 |
|
92 |
def detect_hf_deepset(prompt: str) -> (bool, bool):
|
|
|
156 |
|
157 |
|
158 |
detection_providers = {
|
159 |
+
"ProtectAI (HF model)": detect_hf_protectai,
|
160 |
"Deepset (HF model)": detect_hf_deepset,
|
161 |
"FMOps (HF model)": detect_hf_fmops,
|
162 |
"Lakera Guard": detect_lakera,
|
|
|
196 |
hf_api.upload_file(
|
197 |
path_or_fileobj=fileobj,
|
198 |
path_in_repo=result_path,
|
199 |
+
repo_id="ProtectAI/prompt-injection-benchmark",
|
200 |
repo_type="dataset",
|
201 |
)
|
202 |
logger.info(f"Stored prompt: {prompt}")
|
|
|
236 |
"<br /><br />"
|
237 |
"HuggingFace (HF) models are hosted on Spaces while other providers are called as APIs.<br /><br />"
|
238 |
"<a href=\"https://join.slack.com/t/laiyerai/shared_invite/zt-28jv3ci39-sVxXrLs3rQdaN3mIl9IT~w\">Join our Slack community to discuss LLM Security</a><br />"
|
239 |
+
"<a href=\"https://github.com/protectai/llm-guard\">Secure your LLM interactions with LLM Guard</a>",
|
240 |
examples=[
|
241 |
[
|
242 |
example,
|