File size: 5,715 Bytes
36dbdcd
 
 
 
befec25
 
36dbdcd
2df3230
befec25
2df3230
befec25
 
 
 
 
2df3230
befec25
36dbdcd
befec25
36dbdcd
 
befec25
 
36dbdcd
befec25
2df3230
 
befec25
 
2df3230
 
 
befec25
2df3230
 
 
 
 
 
 
befec25
 
 
 
 
 
 
 
 
 
 
2df3230
 
befec25
 
 
 
 
2df3230
 
 
befec25
2df3230
 
befec25
2df3230
 
befec25
2df3230
 
 
 
 
befec25
36dbdcd
2df3230
36dbdcd
 
 
befec25
 
 
2df3230
befec25
36dbdcd
 
befec25
2df3230
 
 
befec25
 
36dbdcd
2df3230
 
befec25
2df3230
befec25
 
2df3230
 
 
 
 
 
 
 
 
 
befec25
2df3230
 
 
 
 
 
 
 
 
 
 
befec25
2df3230
 
 
 
 
befec25
 
 
2df3230
befec25
 
 
2df3230
befec25
 
 
 
 
 
 
 
36dbdcd
befec25
36dbdcd
 
 
 
befec25
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import streamlit as st
import requests
from bs4 import BeautifulSoup
import re
import os
from urllib.parse import urljoin

def scrape_web_content(url):
    """
    Scrape the web content while preserving its original formatting
    
    Args:
        url (str): URL of the webpage
    
    Returns:
        dict: Extracted content with text, HTML, and images
    """
    try:
        # Send a request to the URL
        response = requests.get(url)
        response.raise_for_status()
        
        # Parse the HTML content
        soup = BeautifulSoup(response.content, 'html.parser')
        
        # Create a directory to save images if it doesn't exist
        os.makedirs('downloaded_images', exist_ok=True)
        
        # Download images
        downloaded_images = []
        img_tags = soup.find_all('img', src=True)
        for i, img in enumerate(img_tags[:10], 1):
            try:
                # Get the image source URL
                img_url = img['src']
                
                # Handle relative URLs
                if not img_url.startswith(('http://', 'https://')):
                    img_url = urljoin(url, img_url)
                
                # Download the image
                img_response = requests.get(img_url)
                img_response.raise_for_status()
                
                # Generate a unique filename
                filename = f'downloaded_images/image_{i}.{img_url.split(".")[-1].split("?")[0]}'
                
                # Save the image
                with open(filename, 'wb') as f:
                    f.write(img_response.content)
                
                # Update the image tag in the soup to point to local file
                img['src'] = filename
                downloaded_images.append(filename)
            
            except Exception as img_error:
                st.warning(f"Could not download image {i}: {img_error}")
        
        # Remove unwanted tags
        for tag in soup(["script", "style", "meta", "link", "noscript"]):
            tag.decompose()
        
        # Convert remaining soup to HTML string
        formatted_html = str(soup)
        
        # Extract plain text for preview
        plain_text = soup.get_text(separator='\n', strip=True)
        
        return {
            'html': formatted_html,
            'plain_text': plain_text,
            'images': downloaded_images
        }
    
    except Exception as e:
        st.error(f"Error occurred while scraping the content: {e}")
        return None

def main():
    """
    Main Streamlit application
    """
    st.title("Web Content Scraper with Preserved Formatting")
    
    # Get the URL from the user
    url_input = st.text_input("Enter the URL of the web page:", "")
    
    # Option to choose display mode
    display_mode = st.radio("Display Mode:", 
                             ["Full HTML", "Plain Text", "Side-by-Side"])
    
    if st.button("Scrape Content"):
        if url_input:
            # Scrape the content
            scraped_content = scrape_web_content(url_input)
            
            if scraped_content:
                st.success("Content successfully scraped!")
                
                # Display content based on selected mode
                if display_mode == "Full HTML":
                    # Display full HTML with preserved formatting
                    st.markdown("### Formatted Web Content")
                    st.components.v1.html(scraped_content['html'], height=600, scrolling=True)
                
                elif display_mode == "Plain Text":
                    # Display plain text
                    st.markdown("### Plain Text Content")
                    st.text_area("Scraped Text:", scraped_content['plain_text'], height=400)
                
                else:  # Side-by-Side
                    # Split the screen to show HTML and plain text
                    col1, col2 = st.columns(2)
                    
                    with col1:
                        st.markdown("### Formatted HTML")
                        st.components.v1.html(scraped_content['html'], height=600, scrolling=True)
                    
                    with col2:
                        st.markdown("### Plain Text")
                        st.text_area("Scraped Text:", scraped_content['plain_text'], height=600)
                
                # Display images
                if scraped_content['images']:
                    st.subheader("Downloaded Images")
                    cols = st.columns(min(len(scraped_content['images']), 3))
                    for i, img_path in enumerate(scraped_content['images']):
                        with cols[i % 3]:
                            st.image(img_path, use_column_width=True)
                    
                    # Zip and download option for images
                    with open('downloaded_images.zip', 'wb') as zipf:
                        import zipfile
                        with zipfile.ZipFile(zipf, 'w') as zip_file:
                            for img_path in scraped_content['images']:
                                zip_file.write(img_path, os.path.basename(img_path))
                    
                    st.download_button(
                        label="Download All Images",
                        data=open('downloaded_images.zip', 'rb').read(),
                        file_name='downloaded_images.zip',
                        mime='application/zip'
                    )
            else:
                st.warning("Failed to scrape content from the URL.")
        else:
            st.warning("Please enter a valid URL.")

if __name__ == "__main__":
    main()