|
import requests |
|
from bs4 import BeautifulSoup |
|
from io import BytesIO |
|
import streamlit as st |
|
|
|
|
|
url = "https://www.ireland.ie/en/india/newdelhi/services/visas/processing-times-and-decisions/" |
|
|
|
|
|
headers = { |
|
"User-Agent": ( |
|
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " |
|
"(KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" |
|
) |
|
} |
|
|
|
@st.cache_data(ttl=3600, max_entries=1) |
|
def load_data_file(): |
|
response = requests.get(url, headers=headers) |
|
if response.status_code == 200: |
|
soup = BeautifulSoup(response.content, 'html.parser') |
|
links = soup.find_all('a') |
|
|
|
|
|
file_url = None |
|
file_name = None |
|
for link in links: |
|
link_text = link.get_text(strip=True) |
|
if "Visa decisions made from 1 January 2024 to" in link_text: |
|
file_url = link.get('href') |
|
file_name = link_text |
|
break |
|
|
|
if file_url: |
|
if not file_url.startswith('http'): |
|
file_url = requests.compat.urljoin(url, file_url) |
|
|
|
file_response = requests.get(file_url, headers=headers) |
|
if file_response.status_code == 200: |
|
return BytesIO(file_response.content), file_name |
|
else: |
|
st.error(f"Failed to download the file. Status code: {file_response.status_code}") |
|
else: |
|
st.error(f"Failed to retrieve the webpage. Status code: {response.status_code}") |
|
return None, None |
|
|