File size: 3,044 Bytes
f8f8d94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
#!/usr/bin/env python
# coding: utf-8

# In[4]:


import requests
import pandas as pd
from io import BytesIO
from bs4 import BeautifulSoup

# URL of the website to scrape
url = "https://www.ireland.ie/en/india/newdelhi/services/visas/processing-times-and-decisions/"

# Headers to mimic a browser request
headers = {
    "User-Agent": (
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
        "(KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
    )
}

# Send an HTTP GET request to the website
response = requests.get(url, headers=headers)

# Check if the request was successful
if response.status_code == 200:
    soup = BeautifulSoup(response.content, 'html.parser')
    
    # Find all anchor tags
    links = soup.find_all('a')
    
    # Search for the link that contains the specific text
    file_url = None
    for link in links:
        link_text = link.get_text(strip=True)
        if "Visa decisions made from 1 January 2024 to" in link_text:
            file_url = link.get('href')
            break
    
    if file_url:
        # Make the link absolute if it's relative
        if not file_url.startswith('http'):
            file_url = requests.compat.urljoin(url, file_url)
        
        ###print(f"Found link: {file_url}")
        
        # Download the file into memory
        file_response = requests.get(file_url, headers=headers)
        
        if file_response.status_code == 200:
            ods_file = BytesIO(file_response.content)
            
            # Read the .ods file into a DataFrame
            try:
                df = pd.read_excel(ods_file, engine='odf')
                
                # Step 1: Drop unnecessary columns ("Unnamed: 0" and "Unnamed: 1")
                df = df.drop(columns=["Unnamed: 0", "Unnamed: 1"])
                
                # Step 2: Find the index where data starts with "Application Number"
                header_row_index = df[df['Unnamed: 2'] == 'Application Number'].index[0]
                
                # Step 3: Set new headers and skip the rows before actual data
                df.columns = df.iloc[header_row_index]
                df = df[header_row_index + 1:].reset_index(drop=True)
                
                # Step 4: Rename the columns for clarity
                df.columns = ['Application Number', 'Decision']
                
                # Step 5: Drop any rows with all NaN values (optional cleanup)
                df = df.dropna(how='all')
                
                # Remove the download and display part
                
            except Exception as e:
                print("Error reading the .ods file:", e)
        else:
            print("Failed to download the file. Status code:", file_response.status_code)
    else:
        print("The specified link was not found.")
else:
    print(f"Failed to retrieve the webpage. Status code: {response.status_code}")


# In[ ]:





# In[ ]: