Spaces:
Sleeping
Sleeping
File size: 1,169 Bytes
0523803 cf65ac6 0523803 cf65ac6 0523803 cf65ac6 0523803 cf65ac6 0523803 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import os
from huggingface_hub import HfApi
API = HfApi(token=os.environ.get("HF_TOKEN"))
REPO_ID = "Limour/llama-python-streamingllm"
def restart_space():
API.restart_space(repo_id=REPO_ID, token=os.environ.get("HF_TOKEN"))
def init(cfg):
# ========== 共同 ==========
model = cfg['model']
s_info = cfg['s_info']
def btn_reset(_cache_path):
try:
with cfg['session_lock']:
_tmp = model.load_session(_cache_path)
print(f'load cache from {_cache_path} {_tmp}')
cfg['session_active'] = True
return model.venv_info
except Exception as e:
restart_space()
raise e
def btn_stop():
cfg['btn_stop_status'] = True
cfg['btn_stop'].click(
fn=btn_stop
)
cfg['btn_reset'].click(
fn=btn_stop
).success(
fn=btn_reset,
inputs=cfg['setting_cache_path'],
outputs=s_info,
**cfg['btn_concurrency']
).success(
**cfg['btn_finish']
)
cfg['btn_debug'].click(
fn=lambda: model.str_detokenize(model._input_ids),
outputs=cfg['vo']
)
|