Miro Goettler
Add utils module
3cec7b2
raw
history blame
1.46 kB
from llm_guard.input_scanners import PromptInjection
from llm_guard.input_scanners.prompt_injection import MatchType
def is_subsequence(main_string, sub_string):
"""
Checks if sub_string is a subsequence of main_string.
A subsequence allows arbitrary characters in between the characters of sub_string in main_string.
Parameters:
main_string (str): The string in which to search.
sub_string (str): The string to search for.
Returns:
bool: True if sub_string is a subsequence of main_string, False otherwise.
"""
main_string = main_string.lower()
sub_string = sub_string.lower()
main_len = len(main_string)
sub_len = len(sub_string)
if sub_len == 0:
return True
if main_len == 0:
return False
main_index = 0
sub_index = 0
while main_index < main_len and sub_index < sub_len:
if main_string[main_index] == sub_string[sub_index]:
sub_index += 1
main_index += 1
return sub_index == sub_len
def is_malicious(user_input: str):
"""Function to check if the user input is malicious. Uses the LLM Guard prompt injection scanner.
Uses a special model `ProtectAI/deberta-v3-base-prompt-injection-v2` to scan the user input for malicious content.
"""
scanner = PromptInjection(threshold=0.5, match_type=MatchType.FULL)
sanitized_prompt, is_valid, risk_score = scanner.scan(user_input)
return is_valid, risk_score