profoz commited on
Commit
137066c
Β·
1 Parent(s): c6e5e12

initial commit

Browse files
Files changed (4) hide show
  1. .gitignore +2 -0
  2. README.md +4 -4
  3. app.py +106 -0
  4. requirements.txt +8 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .DS_Store
2
+ .streamlit/
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Index Demo
3
- emoji: πŸƒ
4
- colorFrom: pink
5
- colorTo: green
6
  sdk: streamlit
7
  sdk_version: 1.10.0
8
  app_file: app.py
 
1
  ---
2
+ title: Indexing Demo
3
+ emoji: πŸ“ˆ
4
+ colorFrom: green
5
+ colorTo: pink
6
  sdk: streamlit
7
  sdk_version: 1.10.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+ from sentence_transformers import CrossEncoder
4
+ import requests
5
+ from bs4 import BeautifulSoup
6
+ from functools import reduce
7
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelWithLMHead, pipeline
8
+ import openai
9
+
10
+ all_documents = {}
11
+
12
+ def qa_gpt3(question, context):
13
+ openai.api_key = st.secrets["openai_key"]
14
+
15
+ response = openai.Completion.create(
16
+ model="text-davinci-002",
17
+ prompt=f"Given this context, answer a question. If you cannot find an answer say \"Unknown\".\n\nContext: {context}\n\nQuestion: {query}",
18
+ temperature=0.7,
19
+ max_tokens=256,
20
+ top_p=1,
21
+ frequency_penalty=0,
22
+ presence_penalty=0
23
+ )
24
+ print(response)
25
+ return {'answer': response['choices'][0]['text'].strip()}
26
+
27
+ st.title('Document Question Answering System')
28
+
29
+ qa_model = None
30
+
31
+ crawl_urls = st.checkbox('Crawl?', value=False)
32
+
33
+ document_text = st.text_area(
34
+ label="Links (Comma separated)", height=100,
35
+ value='https://www.databricks.com/blog/2022/11/15/values-define-databricks-culture.html, https://databricks.com/product/databricks-runtime-for-machine-learning/faq'
36
+ )
37
+ query = st.text_input("Query")
38
+
39
+ qa_option = st.selectbox('Q/A Answerer', ('gpt3', 'a-ware/bart-squadv2'))
40
+
41
+ if qa_option == 'gpt3':
42
+ qa_model = qa_gpt3
43
+ else:
44
+ qa_model = pipeline("question-answering", qa_option)
45
+ st.write(f'Using {qa_option} as the Q/A model')
46
+
47
+ encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
48
+
49
+ def get_relevent_passage(question, documents):
50
+ query_paragraph_list = [(question, para) for para in list(documents.keys()) if len(para.strip()) > 0]
51
+
52
+ scores = encoder.predict(query_paragraph_list)
53
+ top_5_indices = scores.argsort()[-5:]
54
+ top_5_query_paragraph_list = [query_paragraph_list[i] for i in top_5_indices]
55
+ top_5_query_paragraph_list.reverse()
56
+ return top_5_query_paragraph_list[0][1]
57
+
58
+
59
+ def answer_question(query, context):
60
+ answer = qa_model(question=query, context=context)['answer']
61
+ return answer
62
+
63
+
64
+ def get_documents(document_text, crawl=crawl_urls):
65
+ urls = document_text.split(',')
66
+ for url in urls:
67
+ st.write(f'Crawling {url}')
68
+ if url in set(all_documents.values()):
69
+ continue
70
+ html = requests.get(url).text
71
+ soup = BeautifulSoup(html, 'html.parser')
72
+
73
+ if crawl:
74
+ st.write('Give me a sec, crawling..')
75
+ import re
76
+
77
+ more_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', html)
78
+ more_urls = list(set([m for m in more_urls if m[-4] != '.' and m[-3] != '.' and m.split('/')[:3] == url.split('/')[:3]]))
79
+ for more_url in more_urls:
80
+ all_documents.update(get_documents(more_url, crawl=False))
81
+
82
+ body = soup.get_text()
83
+
84
+ document_paragraphs = [body] # TODO change this to tokenize docs differently
85
+ for document_paragraph in document_paragraphs:
86
+ all_documents[document_paragraph] = url
87
+
88
+ return all_documents
89
+
90
+
91
+ if len(document_text.strip()) > 0 and len(query.strip()) > 0 and qa_model and encoder:
92
+ st.write('Hmmm let me think about that..')
93
+ document_text = document_text.strip()
94
+ documents = get_documents(document_text)
95
+ st.write(f'I am looking through {len(set(documents.values()))} sites')
96
+
97
+ query = query.strip()
98
+ context = get_relevent_passage(query, documents)
99
+ answer = answer_question(query, context)
100
+
101
+ relevant_url = documents[context]
102
+
103
+ st.write('Check the answer below...with reference text')
104
+ st.header("ANSWER: "+answer)
105
+ st.subheader("REFERENCE: "+context)
106
+ st.subheader("REFERENCE URL: "+relevant_url)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ torch
3
+ transformers
4
+ sentence-transformers
5
+ streamlit
6
+ openai
7
+ bs4
8
+ requests