faisalhr1997 mikeee commited on
Commit
79772ab
0 Parent(s):

Duplicate from mikeee/falcon-7b-ggml

Browse files

Co-authored-by: mikeee <mikeee@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +35 -0
  2. .ruff.toml +18 -0
  3. README.md +13 -0
  4. app.py +262 -0
  5. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.ruff.toml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Assume Python 3.10.
2
+ target-version = "py310"
3
+ # Decrease the maximum line length to 79 characters.
4
+ line-length = 1000
5
+
6
+ # pyflakes, pycodestyle, isort
7
+ # flake8 YTT, pydocstyle D, pylint PLC
8
+ select = ["F", "E", "W", "I001", "YTT", "D", "PLC"]
9
+ # select = ["ALL"]
10
+
11
+ # "D100" Missing docstring in public module
12
+ # D103 Missing docstring in public function
13
+ # D101 Missing docstring in public class
14
+ # `multi-line-summary-first-line` (D212)
15
+ # `one-blank-line-before-class` (D203)
16
+ extend-ignore = ["D100", "D103", "D101", "D212", "D203"]
17
+
18
+ exclude = [".venv"]
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: falcon-7b-ggml
3
+ emoji: 🦅
4
+ colorFrom: yellow
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.36.1
8
+ app_file: app.py
9
+ pinned: true
10
+ duplicated_from: mikeee/falcon-7b-ggml
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from urllib.parse import urlparse
3
+
4
+ import gradio as gr
5
+ import psutil
6
+ from ctransformers import AutoModelForCausalLM
7
+ from huggingface_hub import hf_hub_download
8
+
9
+ _ = """
10
+ snapshot_download(
11
+ repo_id="TheBloke/falcon-7b-instruct-GGML",
12
+ allow_patterns="falcon7b-instruct.ggmlv3.q4_0.bin",
13
+ revision="ggmlv3",
14
+ local_dir="models",
15
+ local_dir_use_symlinks=False, # default "auto"
16
+ )
17
+
18
+ hf_hub_download(
19
+ repo_id=repo_id,
20
+ filename=model_filename,
21
+ local_dir=local_path,
22
+ local_dir_use_symlinks=True,
23
+ )
24
+ # """
25
+ # 4.06G
26
+
27
+ _ = """
28
+ llm = AutoModelForCausalLM.from_pretrained(
29
+ "TheBloke/falcon-7b-instruct-GGML",
30
+ model_file="falcon7b-instruct.ggmlv3.q4_0.bin",
31
+ model_type="falcon", gpu_layers=32, threads=2,
32
+ )
33
+ # """
34
+ # _ = Path("models", "falcon7b-instruct.ggmlv3.q4_0.bin").absolute().as_posix()
35
+ # assert Path(_).exists(), f"{_} does not exist, perhaps snapshot_download failed?"
36
+
37
+ URL = "https://huggingface.co/TheBloke/falcon-7b-instruct-GGML/blob/main/falcon-7b-instruct.ggccv1.q4_1.bin"
38
+ URL = "https://huggingface.co/TheBloke/falcon-7b-instruct-GGML/blob/ggmlv3/falcon7b-instruct.ggmlv3.q4_1.bin"
39
+ repo_id = "/".join(urlparse(URL).path.strip("/").split("/")[:2])
40
+
41
+
42
+ model_file = Path(URL).name
43
+
44
+ _ = hf_hub_download(
45
+ repo_id=repo_id,
46
+ revision="ggmlv3",
47
+ filename=model_file,
48
+ local_dir="models",
49
+ # local_dir_use_symlinks=True,
50
+ )
51
+
52
+ llm = AutoModelForCausalLM.from_pretrained(
53
+ # repo_id, # "TheBloke/falcon-7b-instruct-GGML",
54
+ # model_file=model_file,
55
+ # model_file=_,
56
+ _,
57
+ model_type="falcon",
58
+ threads=psutil.cpu_count(logical=False),
59
+ )
60
+
61
+ TITLE = f"""<h2 align="center">🦅 Falcon-Chat ggml ({model_file}) 🦅"""
62
+ USER_NAME = "User"
63
+ BOT_NAME = "Falcon"
64
+ DEFAULT_INSTRUCTIONS = """The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins.
65
+ """
66
+ RETRY_COMMAND = "/retry"
67
+ STOP_STR = f"\n{USER_NAME}:"
68
+ STOP_SUSPECT_LIST = [":", "\n", "User"]
69
+
70
+
71
+ def chat_accordion():
72
+ with gr.Accordion("Parameters", open=False):
73
+ temperature = gr.Slider(
74
+ minimum=0.1,
75
+ maximum=2.0,
76
+ value=0.8,
77
+ step=0.1,
78
+ interactive=True,
79
+ label="Temperature",
80
+ )
81
+ top_p = gr.Slider(
82
+ minimum=0.1,
83
+ maximum=0.99,
84
+ value=0.9,
85
+ step=0.01,
86
+ interactive=True,
87
+ label="p (nucleus sampling)",
88
+ )
89
+ return temperature, top_p
90
+
91
+
92
+ def format_chat_prompt(message: str, chat_history, instructions: str) -> str:
93
+ instructions = instructions.strip(" ").strip("\n")
94
+ prompt = instructions
95
+ for turn in chat_history:
96
+ user_message, bot_message = turn
97
+ prompt = f"{prompt}\n{USER_NAME}: {user_message}\n{BOT_NAME}: {bot_message}"
98
+ prompt = f"{prompt}\n{USER_NAME}: {message}\n{BOT_NAME}:"
99
+ return prompt
100
+
101
+
102
+ def chat():
103
+ with gr.Column(elem_id="chat_container"):
104
+ with gr.Row():
105
+ chatbot = gr.Chatbot(elem_id="chatbot")
106
+ with gr.Row():
107
+ inputs = gr.Textbox(
108
+ placeholder=f"Hello {BOT_NAME} !!",
109
+ label="Type an input and press Enter",
110
+ max_lines=3,
111
+ )
112
+
113
+ with gr.Row(elem_id="button_container"):
114
+ with gr.Column():
115
+ retry_button = gr.Button("♻️ Retry last turn")
116
+ with gr.Column():
117
+ delete_turn_button = gr.Button("🧽 Delete last turn")
118
+ with gr.Column():
119
+ clear_chat_button = gr.Button("✨ Delete all history")
120
+
121
+ gr.Examples(
122
+ [
123
+ ["Hey Falcon! Any recommendations for my holidays in Abu Dhabi?"],
124
+ ["What's the Everett interpretation of quantum mechanics?"],
125
+ [
126
+ "Give me a list of the top 10 dive sites you would recommend around the world."
127
+ ],
128
+ ["Can you tell me more about deep-water soloing?"],
129
+ [
130
+ "Can you write a short tweet about the Apache 2.0 release of our latest AI model, Falcon LLM?"
131
+ ],
132
+ ],
133
+ inputs=inputs,
134
+ label="Click on any example and press Enter in the input textbox!",
135
+ )
136
+
137
+ with gr.Row(elem_id="param_container"):
138
+ with gr.Column():
139
+ temperature, top_p = chat_accordion()
140
+ with gr.Column():
141
+ with gr.Accordion("Instructions", open=False):
142
+ instructions = gr.Textbox(
143
+ placeholder="LLM instructions",
144
+ value=DEFAULT_INSTRUCTIONS,
145
+ lines=10,
146
+ interactive=True,
147
+ label="Instructions",
148
+ max_lines=16,
149
+ show_label=False,
150
+ )
151
+
152
+ def run_chat(
153
+ message: str, chat_history, instructions: str, temperature: float, top_p: float
154
+ ):
155
+ if not message or (message == RETRY_COMMAND and len(chat_history) == 0):
156
+ yield chat_history
157
+ return
158
+
159
+ if message == RETRY_COMMAND and chat_history:
160
+ prev_turn = chat_history.pop(-1)
161
+ user_message, _ = prev_turn
162
+ message = user_message
163
+
164
+ prompt = format_chat_prompt(message, chat_history, instructions)
165
+ chat_history = chat_history + [[message, ""]]
166
+ stream = llm(
167
+ prompt,
168
+ max_new_tokens=1024,
169
+ stop=[STOP_STR, "<|endoftext|>"],
170
+ temperature=temperature,
171
+ top_p=top_p,
172
+ stream=True,
173
+ )
174
+ acc_text = ""
175
+ for idx, response in enumerate(stream):
176
+ text_token = response
177
+
178
+ if text_token in STOP_SUSPECT_LIST:
179
+ acc_text += text_token
180
+ continue
181
+
182
+ if idx == 0 and text_token.startswith(" "):
183
+ text_token = text_token[1:]
184
+
185
+ acc_text += text_token
186
+ last_turn = list(chat_history.pop(-1))
187
+ last_turn[-1] += acc_text
188
+ chat_history = chat_history + [last_turn]
189
+ yield chat_history
190
+ acc_text = ""
191
+
192
+ def delete_last_turn(chat_history):
193
+ if chat_history:
194
+ chat_history.pop(-1)
195
+ return {chatbot: gr.update(value=chat_history)}
196
+
197
+ def run_retry(
198
+ message: str, chat_history, instructions: str, temperature: float, top_p: float
199
+ ):
200
+ yield from run_chat(
201
+ RETRY_COMMAND, chat_history, instructions, temperature, top_p
202
+ )
203
+
204
+ def clear_chat():
205
+ return []
206
+
207
+ inputs.submit(
208
+ run_chat,
209
+ [inputs, chatbot, instructions, temperature, top_p],
210
+ outputs=[chatbot],
211
+ show_progress="minimal",
212
+ )
213
+ inputs.submit(lambda: "", inputs=None, outputs=inputs)
214
+ delete_turn_button.click(delete_last_turn, inputs=[chatbot], outputs=[chatbot])
215
+ retry_button.click(
216
+ run_retry,
217
+ [inputs, chatbot, instructions, temperature, top_p],
218
+ outputs=[chatbot],
219
+ show_progress="minimal",
220
+ )
221
+ clear_chat_button.click(clear_chat, [], chatbot)
222
+
223
+
224
+ def get_demo():
225
+ with gr.Blocks(
226
+ # css=None
227
+ # css="""#chat_container {width: 700px; margin-left: auto; margin-right: auto;}
228
+ # #button_container {width: 700px; margin-left: auto; margin-right: auto;}
229
+ # #param_container {width: 700px; margin-left: auto; margin-right: auto;}"""
230
+ css="""#chatbot {
231
+ font-size: 14px;
232
+ min-height: 300px;
233
+ }"""
234
+ ) as demo:
235
+ gr.HTML(TITLE)
236
+
237
+ with gr.Row():
238
+ with gr.Column():
239
+ gr.Markdown(
240
+ """**Chat with [Falcon-7b-Instruct](https://huggingface.co/tiiuae/falcon-7b-instruct), brainstorm ideas, discuss your holiday plans, and more!**
241
+
242
+ ✨ This demo is powered by [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b), finetuned on the [Baize](https://github.com/project-baize/baize-chatbot) dataset, and running with [Text Generation Inference](https://github.com/huggingface/text-generation-inference). [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b) is a state-of-the-art large language model built by the [Technology Innovation Institute](https://www.tii.ae) in Abu Dhabi. It is trained on 1 trillion tokens (including [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)) and available under the Apache 2.0 license. It currently holds the 🥇 1st place on the [🤗 Open LLM leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). This demo is made available by the [HuggingFace H4 team](https://huggingface.co/HuggingFaceH4).
243
+
244
+ 🧪 This is only a **first experimental preview**: the [H4 team](https://huggingface.co/HuggingFaceH4) intends to provide increasingly capable versions of Falcon Chat in the future, based on improved datasets and RLHF/RLAIF.
245
+
246
+ 👀 **Learn more about Falcon LLM:** [falconllm.tii.ae](https://falconllm.tii.ae/)
247
+
248
+ ➡️️ **Intended Use**: this demo is intended to showcase an early finetuning of [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b), to illustrate the impact (and limitations) of finetuning on a dataset of conversations and instructions. We encourage the community to further build upon the base model, and to create even better instruct/chat versions!
249
+
250
+ ⚠️ **Limitations**: the model can and will produce factually incorrect information, hallucinating facts and actions. As it has not undergone any advanced tuning/alignment, it can produce problematic outputs, especially if prompted to do so. Finally, this demo is limited to a session length of about 1,000 words.
251
+ """
252
+ )
253
+
254
+ chat()
255
+
256
+ return demo
257
+
258
+
259
+ if __name__ == "__main__":
260
+ demo = get_demo()
261
+ demo.queue(max_size=64, concurrency_count=8)
262
+ demo.launch(server_name="0.0.0.0", server_port=7860)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ markdown
2
+ loguru
3
+ ctransformers
4
+ langchain
5
+ # gradio
6
+ psutil