Miro Goettler commited on
Commit
1829929
β€’
1 Parent(s): b9307a8

Add max input limit

Browse files
Files changed (2) hide show
  1. app.py +1 -14
  2. config.py +2 -1
app.py CHANGED
@@ -5,7 +5,6 @@ import re
5
  import pandas as pd
6
  from llm_guard.input_scanners import PromptInjection
7
  import streamlit as st
8
- from streamlit_extras.stylable_container import stylable_container
9
 
10
  import config
11
  import utils
@@ -69,6 +68,7 @@ for idx, level in enumerate(config.LEVELS):
69
  label_visibility="visible",
70
  height=200,
71
  placeholder="Your prompt",
 
72
  )
73
  btn_submit_prompt = st.button(
74
  "Send prompt", key=f"submit_prompt_{level}"
@@ -453,7 +453,6 @@ for idx, level in enumerate(config.LEVELS):
453
  info_cont.write(config.LEVEL_DESCRIPTIONS[level]["explanation"])
454
  info_cont.write("##### Real-life usage")
455
  info_cont.write(config.LEVEL_DESCRIPTIONS[level]["real_life"])
456
- # info_cont.write("##### Benefits and drawbacks")
457
  df = pd.DataFrame(
458
  {
459
  "Benefits": [config.LEVEL_DESCRIPTIONS[level]["benefits"]],
@@ -528,7 +527,6 @@ with st.expander("πŸ† Record", expanded=True):
528
  )
529
 
530
  # show as pandas dataframe
531
- # st.table(
532
  st.markdown(
533
  pd.DataFrame(
534
  table_data,
@@ -538,10 +536,6 @@ with st.expander("πŸ† Record", expanded=True):
538
  "Prompt tries",
539
  "Secret guesses",
540
  "Hint used",
541
- # "Used hint 1",
542
- # "Used hint 2",
543
- # "Used hint 3",
544
- # "Used info",
545
  "Solved",
546
  "Secret",
547
  "Mitigation",
@@ -557,12 +551,5 @@ with st.expander("πŸ† Record", expanded=True):
557
  )
558
 
559
  # TODOS:
560
- # - use Gemini-Pro-Flash for supervisor LLM
561
- # - story telling --> new field of study hard to be 100 percentage save
562
- # - switch to azure deployment --> currently not working under "GPT-4o"
563
  # - mark the user input with color in prompt
564
- # benefits and drawbacks, real world example
565
  # TODO: https://docs.streamlit.io/develop/api-reference/caching-and-state/st.cache_resource
566
- # Upgrade to bigger CPU
567
-
568
-
 
5
  import pandas as pd
6
  from llm_guard.input_scanners import PromptInjection
7
  import streamlit as st
 
8
 
9
  import config
10
  import utils
 
68
  label_visibility="visible",
69
  height=200,
70
  placeholder="Your prompt",
71
+ max_chars=config.MAX_INPUT_CHARS,
72
  )
73
  btn_submit_prompt = st.button(
74
  "Send prompt", key=f"submit_prompt_{level}"
 
453
  info_cont.write(config.LEVEL_DESCRIPTIONS[level]["explanation"])
454
  info_cont.write("##### Real-life usage")
455
  info_cont.write(config.LEVEL_DESCRIPTIONS[level]["real_life"])
 
456
  df = pd.DataFrame(
457
  {
458
  "Benefits": [config.LEVEL_DESCRIPTIONS[level]["benefits"]],
 
527
  )
528
 
529
  # show as pandas dataframe
 
530
  st.markdown(
531
  pd.DataFrame(
532
  table_data,
 
536
  "Prompt tries",
537
  "Secret guesses",
538
  "Hint used",
 
 
 
 
539
  "Solved",
540
  "Secret",
541
  "Mitigation",
 
551
  )
552
 
553
  # TODOS:
 
 
 
554
  # - mark the user input with color in prompt
 
555
  # TODO: https://docs.streamlit.io/develop/api-reference/caching-and-state/st.cache_resource
 
 
 
config.py CHANGED
@@ -1,12 +1,13 @@
1
  ORQ_DEPLOYMENT_NAME = "llm-security-challenge-demo"
 
2
 
3
  # define which levels are shown and in which order
4
  LEVELS = [
5
  "no_mitigation",
6
  "basic_guardrails",
7
  "special_characters",
8
- "llm_judge_input",
9
  "chain_of_thought",
 
10
  "basic_check_secret_in_output",
11
  "advanced_check_secret_in_output",
12
  "llm_judge_output",
 
1
  ORQ_DEPLOYMENT_NAME = "llm-security-challenge-demo"
2
+ MAX_INPUT_CHARS = 600
3
 
4
  # define which levels are shown and in which order
5
  LEVELS = [
6
  "no_mitigation",
7
  "basic_guardrails",
8
  "special_characters",
 
9
  "chain_of_thought",
10
+ "llm_judge_input",
11
  "basic_check_secret_in_output",
12
  "advanced_check_secret_in_output",
13
  "llm_judge_output",