# -*- coding: utf-8 -*- # =================================================== # # Author : Fan Zhang # Email : zhangfan@baai.ac.cn # Institute : Beijing Academy of Artificial Intelligence (BAAI) # Create On : 2023-12-11 15:34 # Last Modified : 2023-12-20 14:15 # File Name : app.py # Description : # # =================================================== import argparse import gradio as gr from demo.generation_frontend import build_generation from demo.chat_frontend import build_chat parser = argparse.ArgumentParser() parser.add_argument("--title", type=str, default='Emu') parser.add_argument("--host", type=str, default="0.0.0.0") parser.add_argument("--port", type=int, default=9002) parser.add_argument("--share", action="store_true") parser.add_argument("--controller-url", type=str, default="http://218.91.113.230:9003") parser.add_argument("--concurrency-count", type=int, default=2) parser.add_argument("--disable-chat", action="store_true") parser.add_argument("--disable-generate", action="store_true") args = parser.parse_args() if __name__ == "__main__": title = "Emu2: Generative Multimodal Models are In-Context Learners
\
\

|paper| \ |code|

\
\
\

|使用说明| \ |User Guide|

\
\ " interface_list, tab_names = [], [] if not args.disable_generate: demo_generation = build_generation(args) interface_list.append(demo_generation) tab_names.append("Multi-modal Generation") if not args.disable_chat: demo_chat = build_chat(args) interface_list.append(demo_chat) tab_names.append("Multi-modal Chat") demo_all = gr.TabbedInterface( interface_list=interface_list, tab_names=tab_names, title=title, theme=gr.themes.Default(primary_hue="blue", secondary_hue="blue"), ) # demo_all.queue( # max_size=20, # status_update_rate=3, # api_open=False, # default_concurrency_limit=args.concurrency_count, # ).launch( # share=args.share, # ) demo_all.queue( concurrency_count=args.concurrency_count, status_update_rate=3, api_open=False, ).launch( enable_queue=True, server_name=args.host, server_port=args.port, share=args.share, )