william4416 commited on
Commit
7ca4681
·
verified ·
1 Parent(s): 9dff9cc

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -0
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import gradio as gr
3
+ from transformers import pipeline
4
+ import jsonl
5
+
6
+ # Load the model
7
+ qa_pipeline = pipeline("question-answering", model="william4416/bewtesttwo")
8
+
9
+ # Define the function to process the JSONL file
10
+ def process_jsonl(file_path):
11
+ with open(file_path, "r", encoding="utf-8") as f:
12
+ data = f.readlines()
13
+ return [eval(line) for line in data]
14
+
15
+ # Define the function to answer questions from the model
16
+ def answer_question(context, question):
17
+ # Process the context from the JSONL file
18
+ contexts = [item["context"] for item in context]
19
+ # Perform question answering
20
+ answers = []
21
+ for ctxt in contexts:
22
+ answer = qa_pipeline(question=question, context=ctxt)
23
+ answers.append(answer["answer"])
24
+ return answers
25
+
26
+ # Create the interface
27
+ context_input = gr.inputs.File(label="utsdata.jsonl")
28
+ question_input = gr.inputs.Textbox(label="Enter your question", lines=3)
29
+ output_text = gr.outputs.Textbox(label="Answer")
30
+
31
+ # Create the interface
32
+ gr.Interface(
33
+ fn=answer_question,
34
+ inputs=[context_input, question_input],
35
+ outputs=output_text,
36
+ title="Question Answering with Hugging Face Transformers",
37
+ description="Upload a JSONL file containing contexts and ask a question to get answers.",
38
+ ).launch()