File size: 8,870 Bytes
063e8eb
 
465494c
 
 
 
 
 
 
 
 
 
 
 
 
d1685e2
465494c
 
 
 
 
 
 
 
 
 
 
 
 
 
d1685e2
465494c
d1685e2
465494c
 
 
 
 
 
 
d1685e2
 
 
465494c
 
 
d1685e2
 
 
 
 
 
 
 
 
465494c
 
d1685e2
465494c
063e8eb
465494c
 
 
 
 
 
 
d1685e2
465494c
 
 
 
 
063e8eb
 
 
 
 
 
 
 
465494c
d1685e2
465494c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1685e2
 
 
465494c
 
 
d1685e2
 
 
 
 
 
 
 
 
465494c
 
d1685e2
465494c
 
 
 
d1685e2
465494c
 
 
d1685e2
465494c
 
 
 
d1685e2
465494c
d1685e2
465494c
 
 
 
 
 
 
 
d1685e2
465494c
d1685e2
465494c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1685e2
465494c
 
d1685e2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
# agent.py

import os
import time
import hashlib
import logging
import datetime
import csv
from urllib.parse import urlparse
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException, StaleElementReferenceException
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from transformers import pipeline
import feedparser

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# Define constants
DEFAULT_FILE_PATH = "scraped_data"
PURPOSE = f"You go to Culvers sites, you continuously seek changes on them since your last observation. Anything new that gets logged and dumped into csv, stored in your log folder at user/app/scraped_data."
HISTORY = []
CURRENT_TASK = None

# Function to monitor URLs for changes
def monitor_urls(storage_location, urls, scrape_interval, content_type, selector=None):
    global HISTORY
    previous_hashes = {url: "" for url in urls} 

    try:
        with webdriver.Chrome(service=Service(webdriver.ChromeDriverManager().install()), options=Options()) as driver:
            while True:
                for url in urls:
                    try:
                        driver.get(url)
                        WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.TAG_NAME, 'body')))  # Wait for basic page load
                        time.sleep(2)  # Additional wait for dynamic content

                        if content_type == "text":
                            current_content = driver.page_source
                        elif content_type == "media":
                            if selector:
                                try:
                                    elements = WebDriverWait(driver, 5).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, selector)))
                                    current_content = [element.get_attribute('src') for element in elements]
                                except TimeoutException:
                                    logging.warning(f"Timeout waiting for media elements with selector '{selector}' on {url}")
                                    current_content = []
                            else:
                                current_content = driver.find_elements(By.TAG_NAME, "img")
                        else:
                            current_content = driver.page_source

                        current_hash = hashlib.md5(str(current_content).encode('utf-8')).hexdigest()
                        if current_hash!= previous_hashes[url]:
                            previous_hashes[url] = current_hash
                            date_time_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                            HISTORY.append(f"Change detected at {url} on {date_time_str}")
                            with open(os.path.join(storage_location, f"{urlparse(url).hostname}_changes.csv"), "a", newline="") as csvfile:
                                csv_writer = csv.DictWriter(csvfile, fieldnames=["date", "time", "url", "change"])
                                csv_writer.writerow({"date": date_time_str.split()[0], "time": date_time_str.split()[1], "url": url, "change": "Content changed"})
                                logging.info(f"Change detected at {url} on {date_time_str}")
                    except (NoSuchElementException, StaleElementReferenceException, Exception) as e:
                        logging.error(f"Error accessing {url}: {e}")
                time.sleep(scrape_interval * 60)  # Check every scrape_interval minutes
    except Exception as e:
        logging.error(f"Error starting ChromeDriver: {e}")

# Function to define the chat response function using the Mistral model
def respond(message, history, system_message, max_tokens, temperature, top_p):
    model = AutoModelForSeq2SeqLM.from_pretrained_model("mistralai/Mixtral-8x7B-Instruct-v0.1")
    tokenizer = AutoTokenizer.from_pretrained_model("mistralai/Mixtral-8x7B-Instruct-v0.1")
    pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
    response = pipe(f"User: {message}\nHistory: {history}\nSystem: {system_message}", max_length=max_tokens, temperature=temperature, top_p=top_p)[0]
    return response

# Function to start scraping
def start_scraping(storage_location, urls, scrape_interval, content_type, selector=None):
    global CURRENT_TASK, HISTORY

    CURRENT_TASK = f"Monitoring URLs: {', '.join(urls)}"
    HISTORY.append(f"Task started: {CURRENT_TASK}")

    for url in urls:
        # Create a folder for the URL
        hostname = urlparse(url).hostname
        folder_path = os.path.join(storage_location, hostname)
        os.makedirs(folder_path, exist_ok=True)

        # Log the initial observation
        try:
            with webdriver.Chrome(service=Service(webdriver.ChromeDriverManager().install()), options=Options()) as driver:
                driver.get(url)
                WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.TAG_NAME, 'body')))  # Wait for basic page load
                time.sleep(2)  # Additional wait for dynamic content

                if content_type == "text":
                    initial_content = driver.page_source
                elif content_type == "media":
                    if selector:
                        try:
                            elements = WebDriverWait(driver, 5).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, selector)))
                            initial_content = [element.get_attribute('src') for element in elements]
                        except TimeoutException:
                            logging.warning(f"Timeout waiting for media elements with selector '{selector}' on {url}")
                            initial_content = []
                    else:
                        initial_content = driver.find_elements(By.TAG_NAME, "img")
                else:
                    initial_content = driver.page_source

                initial_hash = hashlib.md5(str(initial_content).encode('utf-8')).hexdigest()
                HISTORY.append(f"Initial observation at {url}: {initial_hash}")
                with open(os.path.join(folder_path, f"{hostname}_initial_observation.txt"), "w") as file:
                    file.write(f"Initial observation at {url}: {initial_hash}")
        except (NoSuchElementException, StaleElementReferenceException, Exception) as e:
            HISTORY.append(f"Error accessing {url}: {e}")

    # Monitor the URLs
    monitor_urls(storage_location, urls, scrape_interval, content_type, selector)

    return f"Started scraping {', '.join(urls)} every {scrape_interval} minutes."

# Function to display CSV content
def display_csv(storage_location, url):
    hostname = urlparse(url).hostname
    folder_path = os.path.join(storage_location, hostname)
    csv_path = os.path.join(folder_path, f"{hostname}_changes.csv")
    if os.path.exists(csv_path):
        with open(csv_path, "r") as file:
            return file.read()
    else:
        return "No data available."

# Function to generate RSS feed for a given URL
def generate_rss_feed(storage_location, url):
    hostname = urlparse(url).hostname
    folder_path = os.path.join(storage_location, hostname)
    csv_path = os.path.join(folder_path, f"{hostname}_changes.csv")
    if os.path.exists(csv_path):
        with open(csv_path, "r") as file:
            reader = csv.DictReader(file)
            feed = feedparser.parse(f"rss.xml")  # Create a new feed object
            feed.feed.title = f"Changes for {hostname}"
            feed.feed.link = url
            feed.feed.description = "Recent changes detected on the website."
            feed.entries = []
            for row in reader:
                feed.entries.append({
                    "title": f"Change detected at {row['url']}",
                    "link": row['url'],
                    "description": f"Content changed on {row['date']} at {row['time']}",
                    "published": datetime.datetime.strptime(f"{row['date']} {row['time']}", "%Y-%m-%d %H:%M:%S").isoformat(),
                })
            return feed.entries
    else:
        return "No data available."

# Function to handle user input and generate response
def chat_interface(message, history, system_message, max_tokens, temperature, top_p, storage_location, urls, scrape_interval, content_type, selector):
    response = respond(message, history, system_message, max_tokens, temperature, top_p)
    history.append((message, response))
    return history, response