File size: 5,995 Bytes
6855b1e
 
 
778b5c1
6855b1e
 
 
 
6ce8a36
6855b1e
44255d1
6855b1e
bcae708
6855b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1ac8cf
200cc44
6855b1e
 
 
d1ac8cf
6855b1e
 
d1ac8cf
6855b1e
 
d1ac8cf
 
6855b1e
 
 
 
 
 
 
 
 
 
1aca16d
2417a86
6855b1e
 
 
 
a95eca6
bcae708
 
d1ac8cf
bcae708
6ce8a36
 
2417a86
6ce8a36
 
 
 
2417a86
 
bcae708
911812d
6ce8a36
6855b1e
 
2417a86
6855b1e
 
906b1c0
200cc44
6855b1e
6ce8a36
6855b1e
f096f8f
6ce8a36
2417a86
 
 
f096f8f
 
200cc44
d1ac8cf
6ce8a36
6855b1e
6ce8a36
6855b1e
 
 
 
 
 
 
d1ac8cf
6855b1e
 
 
d1ac8cf
6855b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1ac8cf
6855b1e
 
 
 
79b0d24
6855b1e
d1ac8cf
6855b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79b0d24
6855b1e
200cc44
d1ac8cf
6855b1e
 
200cc44
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import copy
import json
import logging
import os
import time
import traceback
import urllib.parse as en
import warnings
from concurrent.futures import ThreadPoolExecutor
from itertools import zip_longest

import requests
from zenrows import ZenRowsClient

from llmsearch import utilityV2 as ut

logger = logging.getLogger("agent_logger")


# todo drop blocked pages > see og llmsearch code
# todo use the chatcondesemode query instead of the new gpt query

def search(msg, query_phrase):
    try:
        # this call extracts keywords from the statement and rewrites it into a better search phrase with gpt3.5
        # query_phrase, keywords = ut.get_search_phrase_and_keywords(msg, [])
        google_text = ""
        try:
            print(f"asking google {msg}; rephrased: {query_phrase}")
            google_text = search_google(msg, query_phrase)
        except:
            traceback.print_exc()

        print("\n\nFinal response: ")

        for item in google_text:
            print(
                f"\n##############################################################################################\nSource: {item['source']}"
            )
            print(f"{item['text']}")
            print(f"URL: {item['url']}")
        return google_text
    except KeyboardInterrupt:
        traceback.print_exc()
        raise KeyboardInterrupt
    except:
        traceback.print_exc()
    return ""


# Define a function to make a single URL request and process the response
def process_url(url):
    processed_page = {}
    start_time = time.time()
    try:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            try:
                client = ZenRowsClient(os.getenv('zenrows_api_key'))
                response = client.get(url)
                print(f'got response, status: {response.status_code}')
                result = response.text
                if len(result) > 0:
                    if "an error has occurred" not in result.lower() and "permission to view this page" not in result.lower() and "403 ERROR" not in result.lower() and "have been blocked" not in result.lower() and "too many requests" not in result.lower():
                        processed_page = {
                                "source": ut.extract_domain(url),
                                "url": url,
                                "text": result,
                            }
                    print(f"Processed {url}: {len(result)} {int((time.time() - start_time) * 1000)} ms")
                    return processed_page
            except Exception:
                traceback.print_exc()
                return processed_page
    except Exception:
        traceback.print_exc()
        return processed_page


def process_urls(urls):
    print(f"entering process urls: {len(urls)} found. {urls}")
    start_time = time.time()
    results = []

    try:
        with ThreadPoolExecutor(max_workers=len(urls)) as pool:
            for result in pool.map(process_url, urls):
                print(f'returned {result}')
                results.append(result)
    except:
        traceback.print_exc()

    print(
        f"\n*****processed all urls {len(results)}  {int(time.time() - start_time)} secs"
    )
    return results


def extract_subtext(text):
    return ut.reform(text)


def request_google(query_phrase):
    print(f"***** search {query_phrase}")
    sort = "&sort=date-sdate:d:w"
    if "today" in query_phrase or "latest" in query_phrase:
        sort = "&sort=date-sdate:d:s"
    print(f"search for: {query_phrase}")
    google_query = en.quote(query_phrase)
    response = []
    try:
        start_wall_time = time.time()
        url = (
                "https://www.googleapis.com/customsearch/v1?key="
                + ut.google_key
                + "&cx="
                + ut.google_cx
                + "&num=4"
                + sort
                + "&q="
                + google_query
        )
        response = requests.get(url)
        response_json = json.loads(response.text)
        print(f"***** google search {int((time.time() - start_wall_time) * 10) / 10} sec")
    except:
        traceback.print_exc()
        return []

    # see if we got anything useful from Google
    if "items" not in response_json.keys():
        print("no return from google ...", response, response_json.keys())
        return []

    urls = []
    for i in range(len(response_json["items"])):
        url = response_json["items"][i]["link"].lstrip().rstrip()
        site = ut.extract_site(url)
        if site not in ut.sites or ut.sites[site] == 1:
            # don't use these sources (reddit because it blocks bots)
            if "reddit" not in url and "youtube" not in url and "facebook" not in url:
                urls.append(url)
    return urls


def search_google(original_query, query_phrase):
    full_text = ""

    try:  # query google for recent info
        orig_phrase_urls = []
        if len(original_query) > 0:
            orig_phrase_urls = request_google(original_query[: min(len(original_query), 128)])
        gpt_phrase_urls = []
        if len(query_phrase) > 0:
            gpt_phrase_urls = request_google(query_phrase)
        if len(orig_phrase_urls) == 0 and len(gpt_phrase_urls) == 0:
            return "", [], 0, [""], 0, [""]

        for url in orig_phrase_urls:
            if url in gpt_phrase_urls:
                gpt_phrase_urls.remove(url)

        # interleave both lists now that duplicates are removed
        urls = [
            val
            for tup in zip_longest(orig_phrase_urls, gpt_phrase_urls)
            for val in tup
            if val is not None
        ]
        all_urls = copy.deepcopy(urls)
        # initialize scan of Google urls
        start_wall_time = time.time()
        full_text = process_urls(all_urls)
        print(f"***** urls_processed {int((time.time() - start_wall_time) * 10) / 10} sec")
    except:
        traceback.print_exc()
    return full_text