File size: 6,465 Bytes
8a8fe1d
70422d8
dde49a0
1111830
aae4949
 
 
 
70422d8
aae4949
 
 
 
 
 
70422d8
aae4949
 
 
 
 
 
 
70422d8
 
 
 
 
aae4949
 
 
8a8fe1d
aae4949
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1111830
 
 
 
d5176a8
 
 
 
 
 
 
 
 
 
 
 
 
aae4949
70422d8
8a8fe1d
 
d5176a8
 
8a8fe1d
d5176a8
 
70422d8
 
aae4949
 
 
 
 
 
 
 
 
1111830
 
 
d5176a8
 
 
 
 
 
 
 
1111830
 
 
 
 
 
 
 
 
 
8a8fe1d
 
d5176a8
8a8fe1d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5176a8
 
 
8a8fe1d
 
 
d5176a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a8fe1d
 
 
 
 
 
 
 
d5176a8
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
import requests
import threading
import re
from googletrans import Translator
from flask import request
from datetime import datetime
from requests import get
from freeGPT import gpt3
from server.auto_proxy import get_random_proxy, remove_proxy, update_working_proxies
from server.config import special_instructions


class Backend_Api:
    def __init__(self, app, config: dict) -> None:
        self.app = app
        self.use_auto_proxy = config['use_auto_proxy']
        self.routes = {
            '/backend-api/v2/conversation': {
                'function': self._conversation,
                'methods': ['POST']
            }
        }

        if self.use_auto_proxy:
            update_proxies = threading.Thread(
                target=update_working_proxies, daemon=True)
            update_proxies.start()

    def _conversation(self):
        try:
            jailbreak = request.json['jailbreak']
            model = request.json['model']
            _conversation = request.json['meta']['content']['conversation']
            internet_access = request.json['meta']['content']['internet_access']
            prompt = request.json['meta']['content']['parts'][0]
            current_date = datetime.now().strftime("%Y-%m-%d")
            system_message = f'You are ChatGPT also known as ChatGPT, a large language model trained by OpenAI. Strictly follow the users instructions. Knowledge cutoff: 2021-09-01 Current date: {current_date}'

            extra = []
            if internet_access:
                search = get('https://ddg-api.herokuapp.com/search',
                             params={
                                 'query': prompt["content"],
                                 'limit': 3,
                             })

                blob = ''

                for index, result in enumerate(search.json()):
                    blob += f'[{index}] "{result["snippet"]}"\nURL:{result["link"]}\n\n'

                date = datetime.now().strftime('%d/%m/%y')

                blob += f'current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.'

                extra = [{'role': 'user', 'content': blob}]

            if special_instructions[jailbreak]:
                set_response_language(
                    prompt['content'], special_instructions[jailbreak])

            # Initialize the conversation with the system message
            conversation = [{'role': 'system', 'content': system_message}]

            # Add extra results
            conversation += extra

            # Add jailbreak instructions, if any
            jailbreak_instructions = isJailbreak(jailbreak)
            if jailbreak_instructions:
                conversation += jailbreak_instructions

            # Add the existing conversation and the prompt
            conversation += _conversation + [prompt]

            def stream():
                if isGPT3Model(model):
                    response = get_response_gpt3(
                        conversation, self.use_auto_proxy, jailbreak)
                    yield response
                if isGPT4Model(model):
                    for response in get_response_gpt4(conversation, jailbreak):
                        yield response

            return self.app.response_class(stream(), mimetype='text/event-stream')

        except Exception as e:
            print(e)
            print(e.__traceback__.tb_next)
            return {
                '_action': '_ask',
                'success': False,
                "error": f"an error occurred {str(e)}"
            }, 400


def filter_jailbroken_response(response):
    act_pattern = re.compile(r'ACT:', flags=re.DOTALL)
    act_match = act_pattern.search(response)

    if act_match:
        response = response[act_match.end():]
    else:
        response = '[Please wait... Unlocking GPT πŸ”“]'

    return response


def set_response_language(prompt, special_instructions_list):
    translator = Translator()
    detected_language = translator.detect(prompt).lang
    language_instructions = f"You will respond in the language: {detected_language}. "
    if special_instructions_list:
        special_instructions_list[0]['content'] = language_instructions + \
            special_instructions_list[0]['content']


def get_response_gpt3(conversation, use_proxy, jailbreak):
    while use_proxy:
        try:
            random_proxy = get_random_proxy()
            res = gpt3.Completion.create(
                prompt=conversation, proxy=random_proxy)
            response = res['text']
            break
        except Exception as e:
            print(f"Error with proxy {random_proxy}: {e}")
            remove_proxy(random_proxy)

    while not use_proxy:
        try:
            res = gpt3.Completion.create(prompt=conversation)
            response = res['text']
            break
        except Exception as e:
            print(f"Error: {e}")

    if response is not None:
        if isJailbreak(jailbreak):
            response = filter_jailbroken_response(response)

        return response


def get_response_gpt4(conversation, jailbreak):
    api_url = f"http://127.0.0.1:3000/ask/stream?prompt={conversation}&model=forefront"

    try:
        with requests.get(api_url, stream=True) as res:
            res.raise_for_status()
            for response in res.iter_lines(chunk_size=1024, decode_unicode=True, delimiter='\n'):
                if response.startswith("data: "):
                    print(response)
                    yield filter_response_gpt4(response, jailbreak)
    except Exception as e:
        print(f"Error: {e}")


def filter_response_gpt4(response, jailbreak):
    response = response[6:]  # Remove "data: " prefix
    response = response[1:-1]  # Remove the quotation marks
    if isJailbreak(jailbreak):
        response = filter_jailbroken_response(response)

    return response


def isGPT3Model(model):
    return model == "text-gpt-0035"


def isGPT4Model(model):
    return model == "text-gpt-0040"


def isJailbreak(jailbreak):
    if jailbreak != "Default":
        return special_instructions[jailbreak] if jailbreak in special_instructions else None
    else:
        return None