Spaces:
Runtime error
Runtime error
File size: 14,548 Bytes
ff8635f decb917 ff8635f 5c92236 ff8635f 5ed62c0 ff8635f 74af65a ff8635f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 |
import justext
import streamlit as st
from lxml import etree
# import streamlit.components.v1 as components
# File Processing pkgs
from PIL import Image
import requests
# import xml.dom.minidom
from bs4 import BeautifulSoup
# import json
import docx2txt
# import textract
from PyPDF2 import PdfFileReader
import pdfplumber
import os
# ---- LOAD ASSETS ----
img_page_icon = Image.open("./olive_webscrapping.jpg")
# Find more emojis here: https://www.webfx.com/tools/emoji-cheat-sheet/
st.set_page_config(page_title="OdiaGenAI ", page_icon=img_page_icon, layout="wide")
# Load CSS file
def load_css(file_path):
with open(file_path) as f:
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
# Load CSS file
# load_css('styles2.css')
# ----- FUNCTIONS ----
# function to check whether the url is a sitemap or not
def check_sitemap(url):
# Check the URL's ending
if url.lower().endswith(('sitemap.xml', 'sitemap_index.xml', 'sitemap')):
try:
# Parse the content as XML
response = requests.get(url)
xml_content = etree.fromstring(response.content)
# Check for sitemap-specific elements
if xml_content.tag == 'urlset' or xml_content.tag == 'sitemapindex':
return True
except etree.XMLSyntaxError:
pass
# Additional conditions for identifying sitemaps
if 'sitemap' in url.lower():
# Perform additional checks specific to the website's structure or naming conventions
return True
return False
# function to get urls from the site map and extract those data
def extract_urls_from_sitemaps(xml_url):
# Make a GET request to the URL and extract the xml content
response = requests.get(xml_url)
soup = BeautifulSoup(response.text, 'xml')
extracted_urls = []
# check if the sitemap contains nested sitemaps
sitemap_tags = soup.find_all('sitemap')
if sitemap_tags:
# Process nested sitemaps
for sitemap_tag in sitemap_tags:
print("sitemap_tags:" + sitemap_tag)
nested_url = sitemap_tag.find('loc').text
print('nested_url:', nested_url)
nested_urls = extract_urls_from_sitemaps(nested_url)
extracted_urls.extend(nested_urls)
else:
# Extract URLs from the current sitemap
loc_tags = soup.find_all('loc')
for loc_tag in loc_tags:
# if loc_tag.parent.name != 'image':
url = loc_tag.text
if url.endswith('.pdf') or url.endswith('.jpg') or url.endswith('.jpeg'):
print(f"url skipped because it is a {url.split('.')[-1]}")
else:
print('url:', url)
extracted_urls.append(url)
return extracted_urls
# function to check whether the entered url is valid
def valid_url(url):
try:
# Make a GET request to the URL and extract the text content
response = requests.get(url)
if response.status_code == 200:
return True
except requests.exceptions.RequestException as e:
return False
# function to create a custom stoplist for justext
def custom_stoplist():
odia_stopwords = [
"ଏହି", "ଏକ", "ଏକାଉଣଟ", "ମୁଁ", "ମୋର", "ମୁଁ ନିଜେ", "ଆମେ", "ଆମର", "ଆମର", "ଆମେ ନିଜେ", "ତୁମେ", "ତୁମର", "ତୁମର",
"ନିଜେ", "ନିଜେ", "ସେ", "ତାଙ୍କୁ", "ତାଙ୍କର",
"ନିଜେ", "ସେ", "ତାଙ୍କୁ", "ତାଙ୍କର", "ନିଜେ", "ଏହା", "ଏହାର", "ନିଜେ |", "ସେମାନେ", "ସେଗୁଡିକ", "ସେମାନଙ୍କର",
"ସେମାନଙ୍କର", "ନିଜେ |", "କଣ", "ଯାହା", "କିଏ", "କାହାକୁ",
"ଏହା", "ତାହା", "ଏଗୁଡ଼ିକ", "ସେଗୁଡ଼ିକ", "ମୁଁ", "ହେଉଛି", "ହେଉଛି |", "ଥିଲା", "ଥିଲା |", "ହୁଅ", "ହୋଇସାରିଛି |", "ହେବା",
"ଅଛି", "ଅଛି", "ଥିଲା", "ଅଛି", "କର", "କରେ |",
"କରିଛନ୍ତି", "କରିବା", "ଏବଂ", "କିନ୍ତୁ", "ଯଦି", "କିମ୍ବା", "କାରଣ", "ଯେପରି", "ପର୍ଯ୍ୟନ୍ତ", "ଯେତେବେଳେ", "ର", "ପାଇଁ",
"ସହିତ", "ବିଷୟରେ", "ବିପକ୍ଷରେ", "ମଧ୍ୟରେ", "ଭିତରକୁ", "ମାଧ୍ୟମରେ",
"ସମୟରେ", "ପୂର୍ବରୁ", "ପରେ", "ଉପରେ", "ନିମ୍ନରେ |", "କୁ", "ଠାରୁ", "ଅପ୍", "ତଳକୁ", "ଭିତରେ", "ବାହାରେ", "ଉପରେ", "ବନ୍ଦ",
"ସମାପ୍ତ", "ତଳେ |", "ପୁନର୍ବାର", "ଆଗକୁ",
"ତାପରେ", "ଥରେ |", "ଏଠାରେ", "ସେଠାରେ", "କେବେ", "କେଉଁଠାରେ", "କିପରି", "ସମସ୍ତ", "ଉଭୟ", "ପ୍ରତ୍ୟେକ", "ଅଳ୍ପ", "ଅଧିକ",
"ଅଧିକାଂଶ", "ଅନ୍ୟ", "କେତେକ", "ଏହିପରି",
"ନୁହେଁ |", "କେବଳ", "ନିଜର", "ସମାନ", "ତେଣୁ", "ଅପେକ୍ଷା", "ମଧ୍ୟ", "ବହୁତ", "କରିପାରିବେ |", "ଇଚ୍ଛା", "କେବଳ",
"କରିବା ଉଚିତ", "ବର୍ତ୍ତମାନ"
]
return frozenset(odia_stopwords)
# function to extract data from url using justext
def extract_data_from_url_(url):
response = requests.get(url)
response.raise_for_status()
page = response.content
data_url = ""
para = ""
paragraphs = justext.justext(page, custom_stoplist(), 70, 140, 0.0, 0.02, 0.5, 150, False)
for paragraph in paragraphs:
if not paragraph.is_boilerplate:
para = para + '\n' + paragraph.text
data_url = ('\n\nFrom url:' + url + '\n' + para + '\n')
return data_url
sitemap_data = ""
# function to get the text from pdf using PyPDF2
def read_pdf(file):
pdfReader = PdfFileReader(file)
count = pdfReader.numPages
# all_page_text = ""
# for i in range(count):
# page = pdfReader.getPage(i)
# all_page_text += page.extractText()
#
# return all_page_text
return count
# function to run the enter button
def run_function(url, documents):
data = ""
# Check if the user has provided a URL
if url:
if valid_url(url):
data = extract_data_from_url_(url)
st.text_area("Extracted Text", value=data, height=200)
# return extract status, and the data extracted
return True, data
else:
return False, data
# Check if the user has provided a document
elif documents is not None:
for document in documents:
document_details = {
"filename": document.name,
"filetype": document.type,
"filesize": document.size
}
st.write(document_details)
# Extract content from the txt file
if document.type == "text/plain":
# Read as bytes
data += str(document.read(), "utf-8")
# Extract content from the pdf file
elif document.type == "application/pdf":
# using PyPDF2
# data += read_pdf(document)
# using pdfplumber
try:
with pdfplumber.open(document) as pdf:
all_text = ""
for page in pdf.pages:
text = page.extract_text()
all_text += text + "\n"
data += all_text
except requests.exceptions.RequestException as e:
st.write("None")
# Extract content from the docx file
elif document.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
data += docx2txt.process(document)
# Display the extracted text content from file
st.write("attached")
st.text_area("Extracted Text", value=data, height=200)
# return extract status, and the data extracted
return True, data
else:
st.error("Error: An error occurred while fetching content.")
# return extract status, and the data extracted
return False, data
def main():
# ---- HEADER SECTION ----
with st.container():
st.subheader("Hi!! :wave:")
st.write("##")
st.markdown("<h5 class='text'>OdiaGenAI is a collaborative initiative that conducts research on </h5>",
unsafe_allow_html=True)
st.markdown("<h5>Generative AI and LLM for the Odia Language.</h5>", unsafe_allow_html=True)
# st.title("Odia Generative AI")
st.markdown("<h1 class='title'>Odia Generative AI</h1>", unsafe_allow_html=True)
# ---- BODY SECTION ----
with st.container():
st.subheader("Collecting monolingual data (Odia or any Indic Languages)")
# dividing the body section into 3 columns for url, attach button and enter button
col1, col2, col3 = st.columns([0.6, 0.2, 0.2])
# url/xml
with col1:
url_or_xml = st.text_input(label='', placeholder="Enter URL")
is_a_sitemap = check_sitemap(url_or_xml)
# attached files
with col2:
documents = st.file_uploader("", type=["pdf", "txt", "docx"], accept_multiple_files=True)
if not documents:
documents = None
else:
for doc in documents:
if doc.name.split(".")[-1].lower() not in ["pdf", "txt", "docx"]:
# if documents is not the relevant type
st.error("Unsupported file: " + doc.name)
# Initialize state of button Enter
with col3:
st.write('##')
if "button_enter" not in st.session_state:
st.session_state.button_enter = False
if st.button("Enter"):
st.session_state.button_enter = True
# st.write("session state true")
if "extracted" not in st.session_state:
st.session_state.extracted = False
data = ""
# the enter button
if st.session_state.button_enter:
# check if it is a sitemap or not
if is_a_sitemap:
if "Initial" not in st.session_state:
st.session_state.Initial = True
# check whether its the initial state
if st.session_state.Initial == True:
# print("\n\n\n\n1)Initial State", st.session_state.Initial, "\n\n\n\n\n")
xml = url_or_xml
st.write("It is a sitemap")
stored_sitemap_urls = extract_urls_from_sitemaps(xml)
print('\nno. of urls: ', len(stored_sitemap_urls))
if stored_sitemap_urls:
print(stored_sitemap_urls)
for sitemap_url in stored_sitemap_urls:
if valid_url(sitemap_url):
print(sitemap_url)
# using justext to extract data
data = data + extract_data_from_url_(sitemap_url)
else:
st.error("Couldnt extract data from " + sitemap_url)
if "sitemap_data" not in st.session_state:
st.session_state.sitemap_data = data
# print("\n\n\nst.session.data ", st.session_state.sitemap_data)
# print("\n\n\n\nRUNNING \n\n\n\n")
st.session_state.Initial = False
print("\n\n\n\n2)Initial State", st.session_state.Initial, "\n\n\n\n\n")
st.session_state.extracted = True
# st.text_area("Extracted Text", value=st.session_state.sitemap_data, height=300)
else:
st.error("Error: Invalid sitemap.")
else:
url = url_or_xml
st.session_state.extracted, data = run_function(url, documents)
if st.session_state.extracted:
if is_a_sitemap:
st.text_area("Extracted Text", value=st.session_state.sitemap_data, height=300)
col1, col2 = st.columns([0.5, 0.5])
with col1:
saved_button = False
if st.button("Save", key="b_save"):
file_name = "output.txt"
# Define the folder path
folder_path = "extracted data"
# Create the folder if it doesn't exist
os.makedirs(folder_path, exist_ok=True)
# Define the file path
file_path = os.path.join(folder_path, file_name)
if is_a_sitemap:
saved_data = st.session_state.sitemap_data
# Save string variable to file
with open(file_path, "w", encoding="utf-8") as file:
file.write(saved_data)
else:
with open(file_path, "w", encoding="utf-8") as file:
file.write(data)
saved_button = True
with col2:
if st.button("Clear"):
st.session_state.button_enter = False
st.session_state.Initial = True
st.session_state.extracted = False
if 'sitemap_data' in st.session_state:
del st.session_state['sitemap_data']
st.session_state.button_enter = False
st.experimental_rerun()
if saved_button:
# Confirmation message
st.success(f"File saved as {file_name} in the current directory.")
else:
st.warning("Data not extracted")
if __name__ == "__main__":
main()
|