Spaces:
Runtime error
Runtime error
zetavg
commited on
Commit
•
82f1bf5
0
Parent(s):
init
Browse files- .gitignore +4 -0
- __init__.py +0 -0
- app.py +46 -0
- llama_lora/__init__.py +0 -0
- llama_lora/globals.py +14 -0
- llama_lora/ui/__init__.py +0 -0
- llama_lora/ui/inference_tab.py +10 -0
- llama_lora/ui/main_page.py +21 -0
- requirements.txt +12 -0
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
.venv
|
3 |
+
/venv
|
4 |
+
.vscode
|
__init__.py
ADDED
File without changes
|
app.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
import fire
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
from llama_lora.globals import Global
|
8 |
+
from llama_lora.ui.main_page import main_page
|
9 |
+
|
10 |
+
|
11 |
+
def main(
|
12 |
+
load_8bit: bool = False,
|
13 |
+
base_model: str = "",
|
14 |
+
data_dir: str = "",
|
15 |
+
# Allows to listen on all interfaces by providing '0.0.0.0'.
|
16 |
+
server_name: str = "127.0.0.1",
|
17 |
+
share: bool = False,
|
18 |
+
ui_show_sys_info: bool = True,
|
19 |
+
):
|
20 |
+
base_model = base_model or os.environ.get("BASE_MODEL", "")
|
21 |
+
assert (
|
22 |
+
base_model
|
23 |
+
), "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
|
24 |
+
|
25 |
+
assert (
|
26 |
+
data_dir
|
27 |
+
), "Please specify a --data_dir, e.g. --data_dir='./data'"
|
28 |
+
|
29 |
+
Global.base_model = base_model
|
30 |
+
Global.data_dir = data_dir
|
31 |
+
Global.data_dir = data_dir
|
32 |
+
Global.load_8bit = load_8bit
|
33 |
+
|
34 |
+
Global.ui_show_sys_info = ui_show_sys_info
|
35 |
+
|
36 |
+
os.makedirs(data_dir, exist_ok=True)
|
37 |
+
os.makedirs(f"{data_dir}/lora", exist_ok=True)
|
38 |
+
|
39 |
+
with gr.Blocks(title=Global.ui_title) as demo:
|
40 |
+
main_page()
|
41 |
+
|
42 |
+
demo.queue().launch(server_name=server_name, share=share)
|
43 |
+
|
44 |
+
|
45 |
+
if __name__ == "__main__":
|
46 |
+
fire.Fire(main)
|
llama_lora/__init__.py
ADDED
File without changes
|
llama_lora/globals.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
2 |
+
|
3 |
+
|
4 |
+
class Global:
|
5 |
+
base_model: str = ""
|
6 |
+
data_dir: str = ""
|
7 |
+
load_8bit: bool = False
|
8 |
+
|
9 |
+
loaded_tokenizer: Any = None
|
10 |
+
loaded_base_model: Any = None
|
11 |
+
|
12 |
+
# UI related
|
13 |
+
ui_title: str = "LLaMA-LoRA"
|
14 |
+
ui_show_sys_info: bool = True
|
llama_lora/ui/__init__.py
ADDED
File without changes
|
llama_lora/ui/inference_tab.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from ..globals import Global
|
4 |
+
|
5 |
+
|
6 |
+
def inference_tab():
|
7 |
+
with gr.Tab("Inference"):
|
8 |
+
gr.Markdown("""
|
9 |
+
WIP.
|
10 |
+
""")
|
llama_lora/ui/main_page.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from ..globals import Global
|
4 |
+
|
5 |
+
from .inference_tab import inference_tab
|
6 |
+
|
7 |
+
|
8 |
+
def main_page():
|
9 |
+
with gr.Blocks(
|
10 |
+
title="LLaMA-LoRA",
|
11 |
+
css="") as demo:
|
12 |
+
gr.Markdown(f"""
|
13 |
+
# {Global.ui_title}
|
14 |
+
|
15 |
+
Hello world!
|
16 |
+
""")
|
17 |
+
inference_tab()
|
18 |
+
if Global.ui_show_sys_info:
|
19 |
+
gr.Markdown(f"""
|
20 |
+
<small>Data dir: `{Global.data_dir}`</small>
|
21 |
+
""")
|
requirements.txt
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate
|
2 |
+
appdirs
|
3 |
+
bitsandbytes
|
4 |
+
black
|
5 |
+
black[jupyter]
|
6 |
+
datasets
|
7 |
+
fire
|
8 |
+
git+https://github.com/huggingface/peft.git
|
9 |
+
git+https://github.com/huggingface/transformers.git
|
10 |
+
gradio
|
11 |
+
loralib
|
12 |
+
sentencepiece
|