Marcepelaez commited on
Commit
38f7ded
verified
1 Parent(s): 6ef7dfb
Files changed (1) hide show
  1. app.py +20 -136
app.py CHANGED
@@ -5,139 +5,7 @@ import re
5
  import os
6
  from urllib.parse import urljoin
7
 
8
- def apply_theme(theme):
9
- """
10
- Apply custom CSS based on the selected theme
11
- """
12
- if theme == "Claro":
13
- st.markdown("""
14
- <style>
15
- body {
16
- color: black;
17
- background-color: white;
18
- }
19
- .stTextInput > div > div > input {
20
- color: black;
21
- background-color: white;
22
- }
23
- .stMarkdown {
24
- color: black;
25
- }
26
- /* Light theme for HTML content */
27
- .light-theme {
28
- background-color: white !important;
29
- color: black !important;
30
- }
31
- .light-theme a {
32
- color: #0066cc !important;
33
- }
34
- .light-theme h1, .light-theme h2, .light-theme h3,
35
- .light-theme h4, .light-theme h5, .light-theme h6 {
36
- color: #333 !important;
37
- }
38
- </style>
39
- """, unsafe_allow_html=True)
40
- return """
41
- <div style="background-color: white; color: black; padding: 20px;">
42
- <style>
43
- body { background-color: white !important; color: black !important; }
44
- a { color: #0066cc; }
45
- h1, h2, h3, h4, h5, h6 { color: #333; }
46
- </style>
47
- """
48
- else:
49
- st.markdown("""
50
- <style>
51
- body {
52
- color: white;
53
- background-color: #0E1117;
54
- }
55
- .stTextInput > div > div > input {
56
- color: white;
57
- background-color: #262730;
58
- }
59
- .stMarkdown {
60
- color: white;
61
- }
62
- </style>
63
- """, unsafe_allow_html=True)
64
- return ""
65
-
66
- def scrape_web_content(url, max_images, theme):
67
- """
68
- Scrape the web content while preserving its original formatting
69
-
70
- Args:
71
- url (str): URL of the webpage
72
- max_images (int): Maximum number of images to download
73
- theme (str): Selected theme (Claro/Oscuro)
74
-
75
- Returns:
76
- dict: Extracted content with text, HTML, and images
77
- """
78
- try:
79
- # Send a request to the URL
80
- response = requests.get(url)
81
- response.raise_for_status()
82
-
83
- # Parse the HTML content
84
- soup = BeautifulSoup(response.content, 'html.parser')
85
-
86
- # Create a directory to save images if it doesn't exist
87
- os.makedirs('downloaded_images', exist_ok=True)
88
-
89
- # Download images
90
- downloaded_images = []
91
- img_tags = soup.find_all('img', src=True)
92
- for i, img in enumerate(img_tags[:max_images], 1):
93
- try:
94
- # Get the image source URL
95
- img_url = img['src']
96
-
97
- # Handle relative URLs
98
- if not img_url.startswith(('http://', 'https://')):
99
- img_url = urljoin(url, img_url)
100
-
101
- # Download the image
102
- img_response = requests.get(img_url)
103
- img_response.raise_for_status()
104
-
105
- # Generate a unique filename
106
- filename = f'downloaded_images/image_{i}.{img_url.split(".")[-1].split("?")[0]}'
107
-
108
- # Save the image
109
- with open(filename, 'wb') as f:
110
- f.write(img_response.content)
111
-
112
- # Update the image tag in the soup to point to local file
113
- img['src'] = filename
114
- downloaded_images.append(filename)
115
-
116
- except Exception as img_error:
117
- st.warning(f"Could not download image {i}: {img_error}")
118
-
119
- # Remove unwanted tags
120
- for tag in soup(["script", "style", "meta", "link", "noscript"]):
121
- tag.decompose()
122
-
123
- # Apply light theme styling if selected
124
- theme_prefix = apply_theme(theme) if theme == "Claro" else ""
125
-
126
- # Convert remaining soup to HTML string with theme prefix
127
- formatted_html = theme_prefix + str(soup)
128
-
129
- # Extract plain text for preview
130
- plain_text = soup.get_text(separator='\n', strip=True)
131
-
132
- return {
133
- 'html': formatted_html,
134
- 'plain_text': plain_text,
135
- 'images': downloaded_images
136
- }
137
-
138
- except Exception as e:
139
- st.error(f"Error occurred while scraping the content: {e}")
140
- return None
141
 
142
  def main():
143
  """
@@ -181,7 +49,16 @@ def main():
181
  elif display_mode == "Plain Text":
182
  # Display plain text
183
  st.markdown("### Plain Text Content")
184
- st.text_area("Scraped Text:", scraped_content['plain_text'], height=400)
 
 
 
 
 
 
 
 
 
185
 
186
  else: # Side-by-Side
187
  # Split the screen to show HTML and plain text
@@ -193,7 +70,14 @@ def main():
193
 
194
  with col2:
195
  st.markdown("### Plain Text")
196
- st.text_area("Scraped Text:", scraped_content['plain_text'], height=600)
 
 
 
 
 
 
 
197
 
198
  # Display images
199
  if scraped_content['images']:
@@ -224,4 +108,4 @@ def main():
224
  st.warning("Please enter a valid URL.")
225
 
226
  if __name__ == "__main__":
227
- main()
 
5
  import os
6
  from urllib.parse import urljoin
7
 
8
+ # Resto del c贸digo se mantiene igual...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def main():
11
  """
 
49
  elif display_mode == "Plain Text":
50
  # Display plain text
51
  st.markdown("### Plain Text Content")
52
+ # Text area to show scraped content
53
+ text_area = st.text_area("Scraped Text:", scraped_content['plain_text'], height=400)
54
+ # Add a button to copy all text
55
+ st.download_button(
56
+ label="Copy All Text",
57
+ data=scraped_content['plain_text'],
58
+ file_name="scraped_text.txt",
59
+ mime="text/plain",
60
+ help="Click to copy all extracted text"
61
+ )
62
 
63
  else: # Side-by-Side
64
  # Split the screen to show HTML and plain text
 
70
 
71
  with col2:
72
  st.markdown("### Plain Text")
73
+ text_area = st.text_area("Scraped Text:", scraped_content['plain_text'], height=600)
74
+ st.download_button(
75
+ label="Copy All Text",
76
+ data=scraped_content['plain_text'],
77
+ file_name="scraped_text.txt",
78
+ mime="text/plain",
79
+ help="Click to copy all extracted text"
80
+ )
81
 
82
  # Display images
83
  if scraped_content['images']:
 
108
  st.warning("Please enter a valid URL.")
109
 
110
  if __name__ == "__main__":
111
+ main()