|
|
|
|
|
|
|
|
|
|
|
|
|
import requests
|
|
import pandas as pd
|
|
from io import BytesIO
|
|
from bs4 import BeautifulSoup
|
|
|
|
|
|
url = "https://www.ireland.ie/en/india/newdelhi/services/visas/processing-times-and-decisions/"
|
|
|
|
|
|
headers = {
|
|
"User-Agent": (
|
|
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
|
|
"(KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
|
|
)
|
|
}
|
|
|
|
|
|
response = requests.get(url, headers=headers)
|
|
|
|
|
|
if response.status_code == 200:
|
|
soup = BeautifulSoup(response.content, 'html.parser')
|
|
|
|
|
|
links = soup.find_all('a')
|
|
|
|
|
|
file_url = None
|
|
for link in links:
|
|
link_text = link.get_text(strip=True)
|
|
if "Visa decisions made from 1 January 2024 to" in link_text:
|
|
file_url = link.get('href')
|
|
break
|
|
|
|
if file_url:
|
|
|
|
if not file_url.startswith('http'):
|
|
file_url = requests.compat.urljoin(url, file_url)
|
|
|
|
|
|
|
|
|
|
file_response = requests.get(file_url, headers=headers)
|
|
|
|
if file_response.status_code == 200:
|
|
ods_file = BytesIO(file_response.content)
|
|
|
|
|
|
try:
|
|
df = pd.read_excel(ods_file, engine='odf')
|
|
|
|
|
|
df = df.drop(columns=["Unnamed: 0", "Unnamed: 1"])
|
|
|
|
|
|
header_row_index = df[df['Unnamed: 2'] == 'Application Number'].index[0]
|
|
|
|
|
|
df.columns = df.iloc[header_row_index]
|
|
df = df[header_row_index + 1:].reset_index(drop=True)
|
|
|
|
|
|
df.columns = ['Application Number', 'Decision']
|
|
|
|
|
|
df = df.dropna(how='all')
|
|
|
|
|
|
|
|
except Exception as e:
|
|
print("Error reading the .ods file:", e)
|
|
else:
|
|
print("Failed to download the file. Status code:", file_response.status_code)
|
|
else:
|
|
print("The specified link was not found.")
|
|
else:
|
|
print(f"Failed to retrieve the webpage. Status code: {response.status_code}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|