Spaces:
Runtime error
Runtime error
File size: 7,988 Bytes
827e96b ca646d2 de2de0a ca646d2 b9f24c9 ca646d2 b9f24c9 ca646d2 b9f24c9 ca646d2 b9f24c9 e8560ad ca646d2 b9f24c9 ca646d2 b9f24c9 ca646d2 b9f24c9 ca646d2 6ec50c7 ca646d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import os
import time
import hashlib
import logging
import datetime
import grafio as gr
import csv
from urllib.parse import urlparse
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from transformers import pipeline
import feedparser
from bs4 import BeautifulSoup
import threading
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Define constants
DEFAULT_FILE_PATH = "scraped_data"
PURPOSE = f"You go to Culvers sites, you continuously seek changes on them since your last observation. Anything new that gets logged and dumped into csv, stored in your log folder at user/app/scraped_data."
HISTORY = []
CURRENT_TASK = None
STOP_THREADS = False
# Define a function to monitor URLs for changes
def monitor_urls(storage_location, urls, scrape_interval, content_type, stop_scraping_flag):
global HISTORY
previous_hashes = {url: "" for url in urls} # Use a dictionary for better organization
try:
with webdriver.Chrome(service=Service(webdriver.ChromeDriverManager().install()), options=Options()) as driver:
while not stop_scraping_flag[0]:
for url in urls:
try:
driver.get(url)
time.sleep(2) # Wait for the page to load
if content_type == "text":
current_content = driver.page_source
elif content_type == "media":
current_content = driver.find_elements(By.TAG_NAME, "img")
else:
current_content = driver.page_source
current_hash = hashlib.md5(str(current_content).encode('utf-8')).hexdigest()
if current_hash != previous_hashes[url]:
previous_hashes[url] = current_hash
date_time_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
HISTORY.append(f"Change detected at {url} on {date_time_str}")
with open(os.path.join(storage_location, f"{urlparse(url).hostname}_changes.csv"), "a", newline="") as csvfile:
csv_writer = csv.DictWriter(csvfile, fieldnames=["date", "time", "url", "change"])
csv_writer.writerow({"date": date_time_str.split()[0], "time": date_time_str.split()[1], "url": url, "change": "Content changed"})
logging.info(f"Change detected at {url} on {date_time_str}")
except (NoSuchElementException, Exception) as e:
logging.error(f"Error accessing {url}: {e}")
time.sleep(scrape_interval * 60) # Check every scrape_interval minutes
except Exception as e:
logging.error(f"Error starting ChromeDriver: {e}")
# Define a function to start scraping
def start_scraping(storage_location, urls, scrape_interval, content_type, stop_scraping_flag):
global CURRENT_TASK, HISTORY
CURRENT_TASK = f"Monitoring URLs: {', '.join(urls)}"
HISTORY.append(f"Task started: {CURRENT_TASK}")
for url in urls:
# Create a folder for the URL
hostname = urlparse(url).hostname
folder_path = os.path.join(storage_location, hostname)
os.makedirs(folder_path, exist_ok=True)
# Log the initial observation
try:
with webdriver.Chrome(service=Service(webdriver.ChromeDriverManager().install()), options=Options()) as driver:
driver.get(url)
time.sleep(2) # Wait for the page to load
if content_type == "text":
initial_content = driver.page_source
elif content_type == "media":
initial_content = driver.find_elements(By.TAG_NAME, "img")
else:
initial_content = driver.page_source
initial_hash = hashlib.md5(str(initial_content).encode('utf-8')).hexdigest()
HISTORY.append(f"Initial observation at {url}: {initial_hash}")
with open(os.path.join(folder_path, f"{hostname}_initial_observation.txt"), "w") as file:
file.write(f"Initial observation at {url}: {initial_hash}")
except (NoSuchElementException, Exception) as e:
HISTORY.append(f"Error accessing {url}: {e}")
# Start a new thread for monitoring URLs
threading.Thread(target=monitor_urls, args=(storage_location, urls, scrape_interval, content_type, stop_scraping_flag)).start()
return f"Started scraping {', '.join(urls)} every {scrape_interval} minutes."
# Define a function to stop scraping
def stop_scraping(stop_scraping_flag):
stop_scraping_flag[0] = True
return "Scraping stopped."
# Define the Gradio interface
def create_interface():
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
message = gr.Textbox(label="Message")
system_message = gr.Textbox(value="You are a helpful assistant.", label="System message")
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
storage_location = gr.Textbox(value="scraped_data", label="Storage Location")
urls = gr.Textbox(label="URLs (comma separated)")
scrape_interval = gr.Slider(minimum=1, maximum=60, value=5, step=1, label="Scrape Interval (minutes)")
content_type = gr.Radio(choices=["text", "media", "both"], value="text", label="Content Type")
start_button = gr.Button("Start Scraping")
stop_button = gr.Button("Stop Scraping")
csv_output = gr.Textbox(label="CSV Output", interactive=False)
with gr.Column():
chat_history = gr.Chatbot(label="Chat History")
response_box = gr.Textbox(label="Response")
stop_scraping_flag = [False]
start_button.click(start_scraping, inputs=[storage_location, urls, scrape_interval, content_type, stop_scraping_flag], outputs=csv_output)
stop_button.click(stop_scraping, inputs=[stop_scraping_flag], outputs=[csv_output])
message.submit(chat_interface, inputs=[message, chat_history, system_message, max_tokens, temperature, top_p, storage_location, urls, scrape_interval, content_type], outputs=[chat_history, response_box])
# Add a button to display the CSV content for a selected URL
with gr.Row():
selected_url = gr.Textbox(label="Select URL for CSV Content")
csv_button = gr.Button("Display CSV Content")
csv_output = gr.Textbox(label="CSV Content Output", interactive=False)
csv_button.click(display_csv, inputs=[selected_url], outputs=csv_output)
# Add a button to display the RSS feed for a selected URL
with gr.Row():
selected_url = gr.Textbox(label="Select URL for RSS Feed")
rss_button = gr.Button("Generate RSS Feed")
rss_output = gr.Textbox(label="RSS Feed Output", interactive=False)
rss_button.click(generate_rss_feed, inputs=[selected_url], outputs=rss_output)
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch() |