File size: 9,167 Bytes
6855b1e
 
 
778b5c1
6855b1e
 
 
 
 
44255d1
6855b1e
f096f8f
bcae708
6855b1e
e750b39
6855b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1ac8cf
906b1c0
6855b1e
 
 
d1ac8cf
6855b1e
 
d1ac8cf
6855b1e
 
d1ac8cf
 
6855b1e
 
 
 
 
 
 
 
 
 
1aca16d
6855b1e
 
 
 
 
 
 
a95eca6
bcae708
 
d1ac8cf
bcae708
 
911812d
 
6855b1e
 
d1ac8cf
6855b1e
d1ac8cf
6855b1e
 
 
906b1c0
6855b1e
 
d1ac8cf
6855b1e
 
 
 
 
 
 
 
f096f8f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1ac8cf
6855b1e
 
906b1c0
6855b1e
 
 
 
 
 
 
d1ac8cf
6855b1e
 
 
d1ac8cf
6855b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1ac8cf
6855b1e
 
 
 
79b0d24
6855b1e
d1ac8cf
6855b1e
 
 
 
 
 
 
 
 
 
 
 
 
d1ac8cf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6855b1e
 
d1ac8cf
 
 
 
 
 
 
 
 
 
 
 
 
 
6855b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79b0d24
6855b1e
906b1c0
d1ac8cf
 
6855b1e
 
906b1c0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
import copy
import json
import logging
import os
import time
import traceback
import urllib.parse as en
import warnings
from itertools import zip_longest

import requests
from unstructured.partition.html import partition_html
from zenrows import ZenRowsClient

from llmsearch import site_stats
# this import style works in pycharm
from llmsearch import utilityV2 as ut

# this import style works on sever + vs code
# import utils
# from llmsearch import google_search_concurrent as gs
# from llmsearch import meta as mt
# from llmsearch import utilityV2 as ut

logger = logging.getLogger("agent_logger")


# todo drop blocked pages > see og llmsearch code
# todo use the chatcondesemode query instead of the new gpt query

def search(msg, query_phrase):
    try:
        # this call extracts keywords from the statement and rewrites it into a better search phrase with gpt3.5
        # query_phrase, keywords = ut.get_search_phrase_and_keywords(msg, [])
        google_text = ""
        try:
            print(f"asking google {msg}; rephrased: {query_phrase}")
            google_text, urls_all, urls_used, tried_index, urls_tried = search_google(msg, query_phrase)
        except:
            traceback.print_exc()

        print("\n\nFinal response: ")

        for item in google_text:
            print(
                f"\n##############################################################################################\nSource: {item['source']}"
            )
            print(f"{item['text']}")
            print(f"URL: {item['url']}")
        return google_text
    except KeyboardInterrupt:
        traceback.print_exc()
        raise KeyboardInterrupt
    except:
        traceback.print_exc()
    return ""


# Define a function to make a single URL request and process the response
def process_url(url):
    start_time = time.time()
    site = ut.extract_site(url)
    result = ""
    try:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            result = ""
            try:
                client = ZenRowsClient(os.getenv('zenrows_api_key'))
                response = client.get(url)
                print(f'got response, status: {response.status_code}')
                result = response.text
            except Exception:
                traceback.print_exc()
                return "", url
    except Exception:
        traceback.print_exc()
        print(f"{site} err")
        pass
    print(f"Processed {site}: {len(response.text)} / {len(result)} {int((time.time() - start_time) * 1000)} ms")
    return result, url


def process_urls(urls):
    # Create a ThreadPoolExecutor with 5 worker threads
    response = []
    print("entering process urls")
    full_text = ""
    used_index = 0
    urls_used = ["" for i in range(30)]
    tried_index = 0
    urls_tried = ["" for i in range(30)]
    start_time = time.time()
    in_process = []

    try:
        while (len(urls) > 0
               # no sense starting if not much time left
               and (len(full_text) < 4800 and len(in_process) < 10 and time.time() - start_time < 8)
        ):
            recommendation = site_stats.get_next(urls, sample_unknown=True)
            # set timeout so we don't wait for a slow site forever
            timeout = 12 - int(time.time() - start_time)
            url = recommendation[1]
            result, url = process_url(url)
            urls_tried[tried_index] = url
            tried_index += 1
            urls.remove(url)
            print(f"queued {ut.extract_site(url)}, {timeout}")
            if len(result) > 0:
                urls_used[used_index] = url
                used_index += 1
                print(
                    f"adding {len(result)} chars from {ut.extract_site(url)} to {len(response)} prior responses"
                )
                if "an error has occurred" not in result.lower() and "permission to view this page" not in result.lower() and "403 ERROR" not in result.lower() and "have been blocked" not in result.lower() and "too many requests" not in result.lower():
                    response.append(
                        {
                            "source": ut.extract_domain(url),
                            "url": url,
                            "text": result,
                        }
                    )

        if (len(urls) == 0 and len(in_process) == 0) or (time.time() - start_time > 28):
            print(
                f"n****** exiting process urls early {len(response)} {int(time.time() - start_time)} secs\n"
            )
            return response, used_index, urls_used, tried_index, urls_tried
    except:
        traceback.print_exc()
    print(
        f"\n*****processed all urls {len(response)}  {int(time.time() - start_time)} secs"
    )
    return response, urls_used, tried_index, urls_tried


def extract_subtext(text):
    return ut.reform(text)


def request_google(query_phrase):
    print(f"***** search {query_phrase}")
    sort = "&sort=date-sdate:d:w"
    if "today" in query_phrase or "latest" in query_phrase:
        sort = "&sort=date-sdate:d:s"
    print(f"search for: {query_phrase}")
    google_query = en.quote(query_phrase)
    response = []
    try:
        start_wall_time = time.time()
        url = (
                "https://www.googleapis.com/customsearch/v1?key="
                + ut.google_key
                + "&cx="
                + ut.google_cx
                + "&num=4"
                + sort
                + "&q="
                + google_query
        )
        response = requests.get(url)
        response_json = json.loads(response.text)
        print(f"***** google search {int((time.time() - start_wall_time) * 10) / 10} sec")
    except:
        traceback.print_exc()
        return []

    # see if we got anything useful from Google
    if "items" not in response_json.keys():
        print("no return from google ...", response, response_json.keys())
        return []

    urls = []
    for i in range(len(response_json["items"])):
        url = response_json["items"][i]["link"].lstrip().rstrip()
        site = ut.extract_site(url)
        if site not in ut.sites or ut.sites[site] == 1:
            # don't use these sources (reddit because it blocks bots)
            if "reddit" not in url and "youtube" not in url and "facebook" not in url:
                urls.append(url)
    return urls


# def response_text_extract(url, response):
#     extract_text = ""
#     if url.endswith("pdf"):
#         pass
#     else:
#         if response is not None:
#             elements = partition_html(text=response)
#             str_elements = []
#             logger.info('\n***** elements')
#             for e in elements:
#                 stre = str(e).replace("  ", " ")
#                 str_elements.append(stre)
#             extract_text = ''.join(extract_subtext(str_elements))
#             logger.info(
#                 f"***** unstructured found {len(elements)} elements, {sum([len(str(e)) for e in elements])} raw chars, {len(extract_text)} extract"
#             )
#
#     if len(extract_text.strip()) < 8:
#         return ""
#     else:
#         return extract_text


# def extract_items_from_numbered_list(text):
#     items = ""
#     elements = text.split("\n")
#     for candidate in elements:
#         candidate = candidate.lstrip(". \t")
#         if len(candidate) > 4 and candidate[0].isdigit():
#             candidate = candidate[1:].lstrip(". ")
#             if (
#                     len(candidate) > 4 and candidate[0].isdigit()
#             ):  # strip second digit if more than 10 items
#                 candidate = candidate[1:].lstrip(". ")
#             logger.info("E {}".format(candidate))
#             items += candidate + " "
#     return items


def search_google(original_query, query_phrase):
    all_urls = []
    urls_used = []
    urls_tried = []
    tried_index = 0
    full_text = ""

    try:  # query google for recent info
        orig_phrase_urls = []
        if len(original_query) > 0:
            orig_phrase_urls = request_google(original_query[: min(len(original_query), 128)])
        gpt_phrase_urls = []
        if len(query_phrase) > 0:
            gpt_phrase_urls = request_google(query_phrase)
        if len(orig_phrase_urls) == 0 and len(gpt_phrase_urls) == 0:
            return "", [], 0, [""], 0, [""]

        for url in orig_phrase_urls:
            if url in gpt_phrase_urls:
                gpt_phrase_urls.remove(url)

        # interleave both lists now that duplicates are removed
        urls = [
            val
            for tup in zip_longest(orig_phrase_urls, gpt_phrase_urls)
            for val in tup
            if val is not None
        ]
        all_urls = copy.deepcopy(urls)
        # initialize scan of Google urls
        start_wall_time = time.time()
        full_text, urls_used, tried_index, urls_tried = process_urls(all_urls)
        print(f"***** urls_processed {int((time.time() - start_wall_time) * 10) / 10} sec")
        print("return from url processsing")
    except:
        traceback.print_exc()
    return full_text, all_urls, urls_used, tried_index, urls_tried