Spaces:
Running
on
Zero
Running
on
Zero
Martín Santillán Cooper
commited on
Commit
•
37449d4
1
Parent(s):
711dd89
Update labels
Browse files- app.py +4 -4
- catalog.json +2 -2
app.py
CHANGED
@@ -93,7 +93,7 @@ with gr.Blocks(
|
|
93 |
with gr.Blocks(elem_classes="roott"):
|
94 |
|
95 |
state = gr.State(value={
|
96 |
-
'selected_sub_catalog': '
|
97 |
'selected_criteria_name': 'general_harm'
|
98 |
})
|
99 |
|
@@ -102,7 +102,7 @@ with gr.Blocks(
|
|
102 |
with gr.Row(elem_classes='title-row'):
|
103 |
with gr.Column(scale=4):
|
104 |
gr.HTML('<h2>IBM Granite Guardian 3.0</h2>', elem_classes='title')
|
105 |
-
gr.HTML(elem_classes='system-description', value='<p>Granite Guardian models are specialized language models in the Granite family that allow you to detect harms and risks in generative AI systems. The Granite Guardian models can be used with any other large language models to make interactions with generative AI systems safe. Select an example in the left panel to see how the model evaluates harms and risks in user
|
106 |
with gr.Row(elem_classes='column-gap'):
|
107 |
with gr.Column(scale=0, elem_classes='no-gap'):
|
108 |
title_display_left = gr.HTML("<h2>Harms & Risks</h2>", elem_classes=['subtitle', 'subtitle-harms'])
|
@@ -132,8 +132,8 @@ with gr.Blocks(
|
|
132 |
criteria = gr.Textbox(label="Evaluation Criteria", lines=3, interactive=False, value=starting_test_case['criteria'], elem_classes=['read-only', 'input-box', 'margin-bottom'])
|
133 |
gr.HTML(elem_classes=['block', 'content-gap'])
|
134 |
context = gr.Textbox(label="Context", lines=3, interactive=True, value=starting_test_case['context'], visible=False, elem_classes=['input-box'])
|
135 |
-
user_message = gr.Textbox(label="User
|
136 |
-
assistant_message = gr.Textbox(label="Assistant
|
137 |
|
138 |
submit_button = gr.Button("Evaluate", variant='primary',icon=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'send-white.png'), elem_classes='submit-button')
|
139 |
|
|
|
93 |
with gr.Blocks(elem_classes="roott"):
|
94 |
|
95 |
state = gr.State(value={
|
96 |
+
'selected_sub_catalog': 'harmful_content_in_user_prompt',
|
97 |
'selected_criteria_name': 'general_harm'
|
98 |
})
|
99 |
|
|
|
102 |
with gr.Row(elem_classes='title-row'):
|
103 |
with gr.Column(scale=4):
|
104 |
gr.HTML('<h2>IBM Granite Guardian 3.0</h2>', elem_classes='title')
|
105 |
+
gr.HTML(elem_classes='system-description', value='<p>Granite Guardian models are specialized language models in the Granite family that allow you to detect harms and risks in generative AI systems. The Granite Guardian models can be used with any other large language models to make interactions with generative AI systems safe. Select an example in the left panel to see how the model evaluates harms and risks in user prompts, assistant messages, and for hallucinations in retrieval-augmented generation. In this demo, we use granite-guardian-3.0-8B.</p>')
|
106 |
with gr.Row(elem_classes='column-gap'):
|
107 |
with gr.Column(scale=0, elem_classes='no-gap'):
|
108 |
title_display_left = gr.HTML("<h2>Harms & Risks</h2>", elem_classes=['subtitle', 'subtitle-harms'])
|
|
|
132 |
criteria = gr.Textbox(label="Evaluation Criteria", lines=3, interactive=False, value=starting_test_case['criteria'], elem_classes=['read-only', 'input-box', 'margin-bottom'])
|
133 |
gr.HTML(elem_classes=['block', 'content-gap'])
|
134 |
context = gr.Textbox(label="Context", lines=3, interactive=True, value=starting_test_case['context'], visible=False, elem_classes=['input-box'])
|
135 |
+
user_message = gr.Textbox(label="User Prompt", lines=3, interactive=True, value=starting_test_case['user_message'], elem_classes=['input-box'])
|
136 |
+
assistant_message = gr.Textbox(label="Assistant Response", lines=3, interactive=True, visible=False, value=starting_test_case['assistant_message'], elem_classes=['input-box'])
|
137 |
|
138 |
submit_button = gr.Button("Evaluate", variant='primary',icon=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'send-white.png'), elem_classes='submit-button')
|
139 |
|
catalog.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"
|
3 |
{
|
4 |
"name": "general_harm",
|
5 |
"criteria": "The 'User' message is harmful by common-sense.",
|
@@ -43,7 +43,7 @@
|
|
43 |
"context": null
|
44 |
}
|
45 |
],
|
46 |
-
"
|
47 |
{
|
48 |
"name": "general_harm",
|
49 |
"criteria": "The 'Assistant' message is harmful by common-sense.",
|
|
|
1 |
{
|
2 |
+
"harmful_content_in_user_prompt": [
|
3 |
{
|
4 |
"name": "general_harm",
|
5 |
"criteria": "The 'User' message is harmful by common-sense.",
|
|
|
43 |
"context": null
|
44 |
}
|
45 |
],
|
46 |
+
"harmful_content_in_assistant_response": [
|
47 |
{
|
48 |
"name": "general_harm",
|
49 |
"criteria": "The 'Assistant' message is harmful by common-sense.",
|