File size: 1,320 Bytes
7ca4681
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
954df97
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
from transformers import pipeline
import jsonl

# Load the model
qa_pipeline = pipeline("question-answering", model="william4416/bewtesttwo")

# Define the function to process the JSONL file
def process_jsonl(file_path):
    with open(file_path, "r", encoding="utf-8") as f:
        data = f.readlines()
    return [eval(line) for line in data]

# Define the function to answer questions from the model
def answer_question(context, question):
    # Process the context from the JSONL file
    contexts = [item["context"] for item in context]
    # Perform question answering
    answers = []
    for ctxt in contexts:
        answer = qa_pipeline(question=question, context=ctxt)
        answers.append(answer["answer"])
    return answers

# Create the interface
context_input = gr.inputs.File(label="utsdata.jsonl")
question_input = gr.inputs.Textbox(label="Enter your question", lines=3)
output_text = gr.outputs.Textbox(label="Answer")

# Create the interface
gr.Interface(
    fn=answer_question,
    inputs=[context_input, question_input],
    outputs=output_text,
    title="Question Answering with Hugging Face Transformers",
    description="Upload a JSONL file containing contexts and ask a question to get answers.",
).launch()