import time import hashlib import logging import datetime import gradio as gr import csv from urllib.parse import urlparse from selenium import webdriver from selenium.webdriver.chrome.service import Service from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.keys import Keys from selenium.common.exceptions import NoSuchElementException from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from transformers import pipeline import feedparser from bs4 import BeautifulSoup import threading import os # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # Define constants DEFAULT_FILE_PATH = "scraped_data" PURPOSE = f"You go to Culvers sites, you continuously seek changes on them since your last observation. Anything new that gets logged and dumped into csv, stored in your log folder at user/app/scraped_data." HISTORY = [] CURRENT_TASK = None STOP_THREADS = False # Define a function to monitor URLs for changes def monitor_urls(storage_location, urls, scrape_interval, content_type, stop_scraping_flag): global HISTORY previous_hashes = {url: "" for url in urls} # Use a dictionary for better organization try: with webdriver.Chrome(service=Service(webdriver.ChromeDriverManager().install()), options=Options()) as driver: while not stop_scraping_flag[0]: for url in urls: try: driver.get(url) time.sleep(2) # Wait for the page to load if content_type == "text": current_content = driver.page_source elif content_type == "media": current_content = driver.find_elements(By.TAG_NAME, "img") else: current_content = driver.page_source current_hash = hashlib.md5(str(current_content).encode('utf-8')).hexdigest() if current_hash != previous_hashes[url]: previous_hashes[url] = current_hash date_time_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") HISTORY.append(f"Change detected at {url} on {date_time_str}") with open(os.path.join(storage_location, f"{urlparse(url).hostname}_changes.csv"), "a", newline="") as csvfile: csv_writer = csv.DictWriter(csvfile, fieldnames=["date", "time", "url", "change"]) csv_writer.writerow({"date": date_time_str.split()[0], "time": date_time_str.split()[1], "url": url, "change": "Content changed"}) logging.info(f"Change detected at {url} on {date_time_str}") except (NoSuchElementException, Exception) as e: logging.error(f"Error accessing {url}: {e}") time.sleep(scrape_interval * 60) # Check every scrape_interval minutes except Exception as e: logging.error(f"Error starting ChromeDriver: {e}") # Define a function to start scraping def start_scraping(storage_location, urls, scrape_interval, content_type, stop_scraping_flag): global CURRENT_TASK, HISTORY CURRENT_TASK = f"Monitoring URLs: {', '.join(urls)}" HISTORY.append(f"Task started: {CURRENT_TASK}") for url in urls: # Create a folder for the URL hostname = urlparse(url).hostname folder_path = os.path.join(storage_location, hostname) os.makedirs(folder_path, exist_ok=True) # Log the initial observation try: with webdriver.Chrome(service=Service(webdriver.ChromeDriverManager().install()), options=Options()) as driver: driver.get(url) time.sleep(2) # Wait for the page to load if content_type == "text": initial_content = driver.page_source elif content_type == "media": initial_content = driver.find_elements(By.TAG_NAME, "img") else: initial_content = driver.page_source initial_hash = hashlib.md5(str(initial_content).encode('utf-8')).hexdigest() HISTORY.append(f"Initial observation at {url}: {initial_hash}") with open(os.path.join(folder_path, f"{hostname}_initial_observation.txt"), "w") as file: file.write(f"Initial observation at {url}: {initial_hash}") except (NoSuchElementException, Exception) as e: HISTORY.append(f"Error accessing {url}: {e}") # Start a new thread for monitoring URLs threading.Thread(target=monitor_urls, args=(storage_location, urls, scrape_interval, content_type, stop_scraping_flag)).start() return f"Started scraping {', '.join(urls)} every {scrape_interval} minutes." # Define a function to stop scraping def stop_scraping(stop_scraping_flag): stop_scraping_flag[0] = True return "Scraping stopped." # Define the Gradio interface def create_interface(): with gr.Blocks() as demo: with gr.Row(): with gr.Column(): message = gr.Textbox(label="Message") system_message = gr.Textbox(value="You are a helpful assistant.", label="System message") max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens") temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature") top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)") storage_location = gr.Textbox(value="scraped_data", label="Storage Location") urls = gr.Textbox(label="URLs (comma separated)") scrape_interval = gr.Slider(minimum=1, maximum=60, value=5, step=1, label="Scrape Interval (minutes)") content_type = gr.Radio(choices=["text", "media", "both"], value="text", label="Content Type") start_button = gr.Button("Start Scraping") stop_button = gr.Button("Stop Scraping") csv_output = gr.Textbox(label="CSV Output", interactive=False) with gr.Column(): chat_history = gr.Chatbot(label="Chat History") response_box = gr.Textbox(label="Response") stop_scraping_flag = [False] start_button.click(start_scraping, inputs=[storage_location, urls, scrape_interval, content_type, stop_scraping_flag], outputs=csv_output) stop_button.click(stop_scraping, inputs=[stop_scraping_flag], outputs=[csv_output]) message.submit(chat_interface, inputs=[message, chat_history, system_message, max_tokens, temperature, top_p], outputs=[chat_history, response_box]) # Add a button to display the CSV content for a selected URL with gr.Row(): selected_url = gr.Textbox(label="Select URL for CSV Content") csv_button = gr.Button("Display CSV Content") csv_output = gr.Textbox(label="CSV Content Output", interactive=False) csv_button.click(display_csv, inputs=[selected_url], outputs=csv_output) # Add a button to display the RSS feed for a selected URL with gr.Row(): selected_url = gr.Textbox(label="Select URL for RSS Feed") rss_button = gr.Button("Generate RSS Feed") rss_output = gr.Textbox(label="RSS Feed Output", interactive=False) rss_button.click(generate_rss_feed, inputs=[selected_url], outputs=rss_output) return demo if __name__ == "__main__": demo = create_interface() demo.launch()