Spaces:
Sleeping
Sleeping
Merge pull request #253 from RongkangXiong/dev
Browse files- crazy_functions/__init__.py +0 -0
- crazy_functions/解析项目源代码.py +63 -0
- functional_crazy.py +18 -0
- request_llm/README.md +36 -0
- request_llm/bridge_tgui.py +167 -0
- project_self_analysis.md → self_analysis.md +0 -0
crazy_functions/__init__.py
ADDED
File without changes
|
crazy_functions/解析项目源代码.py
CHANGED
@@ -148,3 +148,66 @@ def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptT
|
|
148 |
return
|
149 |
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
return
|
149 |
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
150 |
|
151 |
+
|
152 |
+
@CatchException
|
153 |
+
def 解析一个Java项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
154 |
+
history = [] # 清空历史,以免输入溢出
|
155 |
+
import glob, os
|
156 |
+
if os.path.exists(txt):
|
157 |
+
project_folder = txt
|
158 |
+
else:
|
159 |
+
if txt == "": txt = '空空如也的输入栏'
|
160 |
+
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
161 |
+
yield chatbot, history, '正常'
|
162 |
+
return
|
163 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
|
164 |
+
[f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \
|
165 |
+
[f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
|
166 |
+
[f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
|
167 |
+
if len(file_manifest) == 0:
|
168 |
+
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
|
169 |
+
yield chatbot, history, '正常'
|
170 |
+
return
|
171 |
+
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
172 |
+
|
173 |
+
|
174 |
+
@CatchException
|
175 |
+
def 解析一个Rect项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
176 |
+
history = [] # 清空历史,以免输入溢出
|
177 |
+
import glob, os
|
178 |
+
if os.path.exists(txt):
|
179 |
+
project_folder = txt
|
180 |
+
else:
|
181 |
+
if txt == "": txt = '空空如也的输入栏'
|
182 |
+
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
183 |
+
yield chatbot, history, '正常'
|
184 |
+
return
|
185 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
|
186 |
+
[f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \
|
187 |
+
[f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
|
188 |
+
[f for f in glob.glob(f'{project_folder}/**/*.js', recursive=True)] + \
|
189 |
+
[f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
|
190 |
+
if len(file_manifest) == 0:
|
191 |
+
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何Rect文件: {txt}")
|
192 |
+
yield chatbot, history, '正常'
|
193 |
+
return
|
194 |
+
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
195 |
+
|
196 |
+
|
197 |
+
@CatchException
|
198 |
+
def 解析一个Golang项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
199 |
+
history = [] # 清空历史,以免输入溢出
|
200 |
+
import glob, os
|
201 |
+
if os.path.exists(txt):
|
202 |
+
project_folder = txt
|
203 |
+
else:
|
204 |
+
if txt == "": txt = '空空如也的输入栏'
|
205 |
+
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
206 |
+
yield chatbot, history, '正常'
|
207 |
+
return
|
208 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)]
|
209 |
+
if len(file_manifest) == 0:
|
210 |
+
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
|
211 |
+
yield chatbot, history, '正常'
|
212 |
+
return
|
213 |
+
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
functional_crazy.py
CHANGED
@@ -9,6 +9,9 @@ def get_crazy_functionals():
|
|
9 |
from crazy_functions.解析项目源代码 import 解析一个Python项目
|
10 |
from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
|
11 |
from crazy_functions.解析项目源代码 import 解析一个C项目
|
|
|
|
|
|
|
12 |
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
13 |
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
14 |
|
@@ -30,6 +33,21 @@ def get_crazy_functionals():
|
|
30 |
"AsButton": False, # 加入下拉菜单中
|
31 |
"Function": 解析一个C项目
|
32 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
"读Tex论文写摘要": {
|
34 |
"Color": "stop", # 按钮颜色
|
35 |
"Function": 读文章写摘要
|
|
|
9 |
from crazy_functions.解析项目源代码 import 解析一个Python项目
|
10 |
from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
|
11 |
from crazy_functions.解析项目源代码 import 解析一个C项目
|
12 |
+
from crazy_functions.解析项目源代码 import 解析一个Golang项目
|
13 |
+
from crazy_functions.解析项目源代码 import 解析一个Java项目
|
14 |
+
from crazy_functions.解析项目源代码 import 解析一个Rect项目
|
15 |
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
16 |
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
17 |
|
|
|
33 |
"AsButton": False, # 加入下拉菜单中
|
34 |
"Function": 解析一个C项目
|
35 |
},
|
36 |
+
"解析整个Go项目": {
|
37 |
+
"Color": "stop", # 按钮颜色
|
38 |
+
"AsButton": False, # 加入下拉菜单中
|
39 |
+
"Function": 解析一个Golang项目
|
40 |
+
},
|
41 |
+
"解析整个Java项目": {
|
42 |
+
"Color": "stop", # 按钮颜色
|
43 |
+
"AsButton": False, # 加入下拉菜单中
|
44 |
+
"Function": 解析一个Java项目
|
45 |
+
},
|
46 |
+
"解析整个Java项目": {
|
47 |
+
"Color": "stop", # 按钮颜色
|
48 |
+
"AsButton": False, # 加入下拉菜单中
|
49 |
+
"Function": 解析一个Rect项目
|
50 |
+
},
|
51 |
"读Tex论文写摘要": {
|
52 |
"Color": "stop", # 按钮颜色
|
53 |
"Function": 读文章写摘要
|
request_llm/README.md
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 如何使用其他大语言模型
|
2 |
+
|
3 |
+
## 1. 先运行text-generation
|
4 |
+
``` sh
|
5 |
+
# 下载模型( text-generation 这么牛的项目,别忘了给人家star )
|
6 |
+
git clone https://github.com/oobabooga/text-generation-webui.git
|
7 |
+
|
8 |
+
# 安装text-generation的额外依赖
|
9 |
+
pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers
|
10 |
+
|
11 |
+
# 切换路径
|
12 |
+
cd text-generation-webui
|
13 |
+
|
14 |
+
# 下载模型
|
15 |
+
python download-model.py facebook/galactica-1.3b
|
16 |
+
# 其他可选如 facebook/opt-1.3b
|
17 |
+
# facebook/galactica-6.7b
|
18 |
+
# facebook/galactica-120b
|
19 |
+
# facebook/pygmalion-1.3b 等
|
20 |
+
# 详情见 https://github.com/oobabooga/text-generation-webui
|
21 |
+
|
22 |
+
# 启动text-generation,注意把模型的斜杠改成下划线
|
23 |
+
python server.py --cpu --listen --listen-port 7860 --model facebook_galactica-1.3b
|
24 |
+
```
|
25 |
+
|
26 |
+
## 2. 修改config.py
|
27 |
+
``` sh
|
28 |
+
# LLM_MODEL格式较复杂 TGUI:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致
|
29 |
+
LLM_MODEL = "TGUI:galactica-1.3b@localhost:7860"
|
30 |
+
```
|
31 |
+
|
32 |
+
## 3. 运行!
|
33 |
+
``` sh
|
34 |
+
cd chatgpt-academic
|
35 |
+
python main.py
|
36 |
+
```
|
request_llm/bridge_tgui.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Contributed by SagsMug. Modified by binary-husky
|
3 |
+
https://github.com/oobabooga/text-generation-webui/pull/175
|
4 |
+
'''
|
5 |
+
|
6 |
+
import asyncio
|
7 |
+
import json
|
8 |
+
import random
|
9 |
+
import string
|
10 |
+
import websockets
|
11 |
+
import logging
|
12 |
+
import time
|
13 |
+
import threading
|
14 |
+
import importlib
|
15 |
+
from toolbox import get_conf
|
16 |
+
LLM_MODEL, = get_conf('LLM_MODEL')
|
17 |
+
|
18 |
+
# "TGUI:galactica-1.3b@localhost:7860"
|
19 |
+
model_name, addr_port = LLM_MODEL.split('@')
|
20 |
+
assert ':' in addr_port, "LLM_MODEL 格式不正确!" + LLM_MODEL
|
21 |
+
addr, port = addr_port.split(':')
|
22 |
+
|
23 |
+
def random_hash():
|
24 |
+
letters = string.ascii_lowercase + string.digits
|
25 |
+
return ''.join(random.choice(letters) for i in range(9))
|
26 |
+
|
27 |
+
async def run(context, max_token=512):
|
28 |
+
params = {
|
29 |
+
'max_new_tokens': max_token,
|
30 |
+
'do_sample': True,
|
31 |
+
'temperature': 0.5,
|
32 |
+
'top_p': 0.9,
|
33 |
+
'typical_p': 1,
|
34 |
+
'repetition_penalty': 1.05,
|
35 |
+
'encoder_repetition_penalty': 1.0,
|
36 |
+
'top_k': 0,
|
37 |
+
'min_length': 0,
|
38 |
+
'no_repeat_ngram_size': 0,
|
39 |
+
'num_beams': 1,
|
40 |
+
'penalty_alpha': 0,
|
41 |
+
'length_penalty': 1,
|
42 |
+
'early_stopping': True,
|
43 |
+
'seed': -1,
|
44 |
+
}
|
45 |
+
session = random_hash()
|
46 |
+
|
47 |
+
async with websockets.connect(f"ws://{addr}:{port}/queue/join") as websocket:
|
48 |
+
while content := json.loads(await websocket.recv()):
|
49 |
+
#Python3.10 syntax, replace with if elif on older
|
50 |
+
if content["msg"] == "send_hash":
|
51 |
+
await websocket.send(json.dumps({
|
52 |
+
"session_hash": session,
|
53 |
+
"fn_index": 12
|
54 |
+
}))
|
55 |
+
elif content["msg"] == "estimation":
|
56 |
+
pass
|
57 |
+
elif content["msg"] == "send_data":
|
58 |
+
await websocket.send(json.dumps({
|
59 |
+
"session_hash": session,
|
60 |
+
"fn_index": 12,
|
61 |
+
"data": [
|
62 |
+
context,
|
63 |
+
params['max_new_tokens'],
|
64 |
+
params['do_sample'],
|
65 |
+
params['temperature'],
|
66 |
+
params['top_p'],
|
67 |
+
params['typical_p'],
|
68 |
+
params['repetition_penalty'],
|
69 |
+
params['encoder_repetition_penalty'],
|
70 |
+
params['top_k'],
|
71 |
+
params['min_length'],
|
72 |
+
params['no_repeat_ngram_size'],
|
73 |
+
params['num_beams'],
|
74 |
+
params['penalty_alpha'],
|
75 |
+
params['length_penalty'],
|
76 |
+
params['early_stopping'],
|
77 |
+
params['seed'],
|
78 |
+
]
|
79 |
+
}))
|
80 |
+
elif content["msg"] == "process_starts":
|
81 |
+
pass
|
82 |
+
elif content["msg"] in ["process_generating", "process_completed"]:
|
83 |
+
yield content["output"]["data"][0]
|
84 |
+
# You can search for your desired end indicator and
|
85 |
+
# stop generation by closing the websocket here
|
86 |
+
if (content["msg"] == "process_completed"):
|
87 |
+
break
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='', stream = True, additional_fn=None):
|
94 |
+
"""
|
95 |
+
发送至chatGPT,流式获取输出。
|
96 |
+
用于基础的对话功能。
|
97 |
+
inputs 是本次问询的输入
|
98 |
+
top_p, temperature是chatGPT的内部调优参数
|
99 |
+
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
100 |
+
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
101 |
+
additional_fn代表点击的哪个按钮,按钮见functional.py
|
102 |
+
"""
|
103 |
+
if additional_fn is not None:
|
104 |
+
import functional
|
105 |
+
importlib.reload(functional) # 热更新prompt
|
106 |
+
functional = functional.get_functionals()
|
107 |
+
if "PreProcess" in functional[additional_fn]: inputs = functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
108 |
+
inputs = functional[additional_fn]["Prefix"] + inputs + functional[additional_fn]["Suffix"]
|
109 |
+
|
110 |
+
raw_input = "What I would like to say is the following: " + inputs
|
111 |
+
logging.info(f'[raw_input] {raw_input}')
|
112 |
+
history.extend([inputs, ""])
|
113 |
+
chatbot.append([inputs, ""])
|
114 |
+
yield chatbot, history, "等待响应"
|
115 |
+
|
116 |
+
prompt = inputs
|
117 |
+
tgui_say = ""
|
118 |
+
|
119 |
+
mutable = ["", time.time()]
|
120 |
+
def run_coorotine(mutable):
|
121 |
+
async def get_result(mutable):
|
122 |
+
async for response in run(prompt):
|
123 |
+
print(response[len(mutable[0]):])
|
124 |
+
mutable[0] = response
|
125 |
+
if (time.time() - mutable[1]) > 3:
|
126 |
+
print('exit when no listener')
|
127 |
+
break
|
128 |
+
asyncio.run(get_result(mutable))
|
129 |
+
|
130 |
+
thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True)
|
131 |
+
thread_listen.start()
|
132 |
+
|
133 |
+
while thread_listen.is_alive():
|
134 |
+
time.sleep(1)
|
135 |
+
mutable[1] = time.time()
|
136 |
+
# Print intermediate steps
|
137 |
+
if tgui_say != mutable[0]:
|
138 |
+
tgui_say = mutable[0]
|
139 |
+
history[-1] = tgui_say
|
140 |
+
chatbot[-1] = (history[-2], history[-1])
|
141 |
+
yield chatbot, history, "status_text"
|
142 |
+
|
143 |
+
logging.info(f'[response] {tgui_say}')
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""):
|
148 |
+
raw_input = "What I would like to say is the following: " + inputs
|
149 |
+
prompt = inputs
|
150 |
+
tgui_say = ""
|
151 |
+
mutable = ["", time.time()]
|
152 |
+
def run_coorotine(mutable):
|
153 |
+
async def get_result(mutable):
|
154 |
+
async for response in run(prompt, max_token=20):
|
155 |
+
print(response[len(mutable[0]):])
|
156 |
+
mutable[0] = response
|
157 |
+
if (time.time() - mutable[1]) > 3:
|
158 |
+
print('exit when no listener')
|
159 |
+
break
|
160 |
+
asyncio.run(get_result(mutable))
|
161 |
+
thread_listen = threading.Thread(target=run_coorotine, args=(mutable,))
|
162 |
+
thread_listen.start()
|
163 |
+
while thread_listen.is_alive():
|
164 |
+
time.sleep(1)
|
165 |
+
mutable[1] = time.time()
|
166 |
+
tgui_say = mutable[0]
|
167 |
+
return tgui_say
|
project_self_analysis.md → self_analysis.md
RENAMED
File without changes
|