import json from jinja2 import Template with open('prompt_templates.json', mode='r', encoding="utf-8") as f: prompt_templates = json.load(f) def assessment_prompt(content): return {"role": "user", "content": content} def get_prompt_template(test_case, sub_catalog_name): test_case_name = test_case['name'] if sub_catalog_name == 'harmful_content_in_user_message': template_type = 'prompt' elif sub_catalog_name == 'harmful_content_in_assistant_message': template_type = 'prompt_response' elif sub_catalog_name == 'rag_hallucination_risks': template_type = test_case_name return prompt_templates[f'{test_case_name}>{template_type}'] def get_prompt_from_test_case(test_case, sub_catalog_name): return assessment_prompt(Template(get_prompt_template(test_case, sub_catalog_name)).render(**test_case)) def get_evaluated_component(sub_catalog_name, criteria_name): if sub_catalog_name == 'harmful_content_in_user_message': component = "user" elif sub_catalog_name == 'harmful_content_in_assistant_message': component = 'assistant' elif sub_catalog_name == 'rag_hallucination_risks': if criteria_name == "context_relevance": component = "context" elif criteria_name == "groundedness": component = "assistant" elif criteria_name == "answer_relevance": component = "assistant" return f"'{to_title_case(component)} message'" def get_evaluated_component_adjective(sub_catalog_name, criteria_name): if criteria_name == 'context_relevance' or criteria_name == 'answer_relevance': return 'irrelevant based on the definition' else: return 'harmful based on the risk definition' def to_title_case(input_string): if input_string == 'rag_hallucination_risks': return 'RAG Hallucination Risks' return ' '.join(word.capitalize() for word in input_string.split('_')) def to_snake_case(text): return text.lower().replace(" ", "_")