|
|
|
import gradio as gr |
|
import requests |
|
import os |
|
|
|
|
|
API_URL = "https://api-inference.huggingface.co/models/vectara/hallucination_evaluation_model" |
|
API_TOKEN = os.getenv("HF_AUTH_TOKEN") |
|
if not API_TOKEN: |
|
raise ValueError("Please set the HF_AUTH_TOKEN environment variable.") |
|
|
|
headers = {"Authorization": f"Bearer {API_TOKEN}"} |
|
|
|
|
|
def query(payload): |
|
response = requests.post(API_URL, headers=headers, json=payload) |
|
return response.json() |
|
|
|
|
|
def evaluate_hallucination(input1, input2): |
|
|
|
combined_input = f"{input1}. {input2}" |
|
|
|
|
|
output = query({"inputs": combined_input}) |
|
|
|
|
|
score = output[0][0]['score'] |
|
|
|
|
|
if score < 0.5: |
|
return "🔴", "The score is less than 0.5" |
|
else: |
|
return "🟢", "The score is greater than 0.5" |
|
|
|
|
|
iface = gr.Interface( |
|
fn=evaluate_hallucination, |
|
inputs=[gr.Textbox(label="Assertion"), gr.Textbox(label="Citation")], |
|
outputs=[gr.Label(), gr.Textbox(label="Explanation")], |
|
live=False |
|
) |
|
|
|
|
|
iface.launch() |