Spaces:
Running
Running
File size: 6,887 Bytes
39cd651 009d48a 744538e 39cd651 744538e f85da29 000fcec f85da29 009d48a f85da29 009d48a ee163cc f3f614f 65681d8 ee163cc 7a87942 ee163cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
import os
import subprocess
import sys
os.system("bash install.sh")
# # os.system("python -m mindsearch.app --lang en --model_format internlm_server")
os.system("python -m mindsearch.app --lang en --model_format internlm_server &")
# from flask import Flask, send_from_directory
# app = Flask(__name__, static_folder='dist')
# @app.route('/')
# def serve_index():
# return send_from_directory(app.static_folder, 'index.html')
# @app.route('/<path:path>')
# def serve_file(path):
# return send_from_directory(app.static_folder, path)
# if __name__ == '__main__':
# subprocess.Popen(["python", "-m", "mindsearch.app", "--lang", "en", "--model_format", "internlm_server"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# app.run(debug=False, port=7860, host="0.0.0.0")
# import json
# import gradio as gr
# import requests
# from lagent.schema import AgentStatusCode
# PLANNER_HISTORY = []
# SEARCHER_HISTORY = []
# def rst_mem(history_planner: list, history_searcher: list):
# '''
# Reset the chatbot memory.
# '''
# history_planner = []
# history_searcher = []
# if PLANNER_HISTORY:
# PLANNER_HISTORY.clear()
# return history_planner, history_searcher
# def format_response(gr_history, agent_return):
# if agent_return['state'] in [
# AgentStatusCode.STREAM_ING, AgentStatusCode.ANSWER_ING
# ]:
# gr_history[-1][1] = agent_return['response']
# elif agent_return['state'] == AgentStatusCode.PLUGIN_START:
# thought = gr_history[-1][1].split('```')[0]
# if agent_return['response'].startswith('```'):
# gr_history[-1][1] = thought + '\n' + agent_return['response']
# elif agent_return['state'] == AgentStatusCode.PLUGIN_END:
# thought = gr_history[-1][1].split('```')[0]
# if isinstance(agent_return['response'], dict):
# gr_history[-1][
# 1] = thought + '\n' + f'```json\n{json.dumps(agent_return["response"], ensure_ascii=False, indent=4)}\n```' # noqa: E501
# elif agent_return['state'] == AgentStatusCode.PLUGIN_RETURN:
# assert agent_return['inner_steps'][-1]['role'] == 'environment'
# item = agent_return['inner_steps'][-1]
# gr_history.append([
# None,
# f"```json\n{json.dumps(item['content'], ensure_ascii=False, indent=4)}\n```"
# ])
# gr_history.append([None, ''])
# return
# def predict(history_planner, history_searcher):
# def streaming(raw_response):
# for chunk in raw_response.iter_lines(chunk_size=8192,
# decode_unicode=False,
# delimiter=b'\n'):
# if chunk:
# decoded = chunk.decode('utf-8')
# if decoded == '\r':
# continue
# if decoded[:6] == 'data: ':
# decoded = decoded[6:]
# elif decoded.startswith(': ping - '):
# continue
# response = json.loads(decoded)
# yield (response['response'], response['current_node'])
# global PLANNER_HISTORY
# PLANNER_HISTORY.append(dict(role='user', content=history_planner[-1][0]))
# new_search_turn = True
# url = 'http://localhost:8002/solve'
# headers = {'Content-Type': 'application/json'}
# data = {'inputs': PLANNER_HISTORY}
# raw_response = requests.post(url,
# headers=headers,
# data=json.dumps(data),
# timeout=20,
# stream=True)
# for resp in streaming(raw_response):
# agent_return, node_name = resp
# if node_name:
# if node_name in ['root', 'response']:
# continue
# agent_return = agent_return['nodes'][node_name]['detail']
# if new_search_turn:
# history_searcher.append([agent_return['content'], ''])
# new_search_turn = False
# format_response(history_searcher, agent_return)
# if agent_return['state'] == AgentStatusCode.END:
# new_search_turn = True
# yield history_planner, history_searcher
# else:
# new_search_turn = True
# format_response(history_planner, agent_return)
# if agent_return['state'] == AgentStatusCode.END:
# PLANNER_HISTORY = agent_return['inner_steps']
# yield history_planner, history_searcher
# return history_planner, history_searcher
# with gr.Blocks() as demo:
# gr.HTML("""<h1 align="center">WebAgent Gradio Simple Demo</h1>""")
# with gr.Row():
# with gr.Column(scale=10):
# with gr.Row():
# with gr.Column():
# planner = gr.Chatbot(label='planner',
# height=700,
# show_label=True,
# show_copy_button=True,
# bubble_full_width=False,
# render_markdown=True)
# with gr.Column():
# searcher = gr.Chatbot(label='searcher',
# height=700,
# show_label=True,
# show_copy_button=True,
# bubble_full_width=False,
# render_markdown=True)
# with gr.Row():
# user_input = gr.Textbox(show_label=False,
# placeholder='inputs...',
# lines=5,
# container=False)
# with gr.Row():
# with gr.Column(scale=2):
# submitBtn = gr.Button('Submit')
# with gr.Column(scale=1, min_width=20):
# emptyBtn = gr.Button('Clear History')
# def user(query, history):
# return '', history + [[query, '']]
# submitBtn.click(user, [user_input, planner], [user_input, planner],
# queue=False).then(predict, [planner, searcher],
# [planner, searcher])
# emptyBtn.click(rst_mem, [planner, searcher], [planner, searcher],
# queue=False)
# # subprocess.Popen(["python", "-m", "mindsearch.app", "--lang", "en", "--model_format", "internlm_server"], shell=True, stdout=sys.stdout, stderr=sys.stderr)
# demo.queue()
# demo.launch(server_name='0.0.0.0',
# server_port=7860,
# inbrowser=True,
# share=True)
# pass |