william4416's picture
Update app.py
954df97 verified
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
from transformers import pipeline
import jsonl
# Load the model
qa_pipeline = pipeline("question-answering", model="william4416/bewtesttwo")
# Define the function to process the JSONL file
def process_jsonl(file_path):
with open(file_path, "r", encoding="utf-8") as f:
data = f.readlines()
return [eval(line) for line in data]
# Define the function to answer questions from the model
def answer_question(context, question):
# Process the context from the JSONL file
contexts = [item["context"] for item in context]
# Perform question answering
answers = []
for ctxt in contexts:
answer = qa_pipeline(question=question, context=ctxt)
answers.append(answer["answer"])
return answers
# Create the interface
context_input = gr.inputs.File(label="utsdata.jsonl")
question_input = gr.inputs.Textbox(label="Enter your question", lines=3)
output_text = gr.outputs.Textbox(label="Answer")
# Create the interface
gr.Interface(
fn=answer_question,
inputs=[context_input, question_input],
outputs=output_text,
title="Question Answering with Hugging Face Transformers",
description="Upload a JSONL file containing contexts and ask a question to get answers.",
).launch()