coeuslearning
commited on
Commit
β’
5c46523
1
Parent(s):
0005f3b
Update app.py
Browse files
app.py
CHANGED
@@ -15,10 +15,7 @@ DEFAULT_MAX_NEW_TOKENS = 1024
|
|
15 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
16 |
|
17 |
DESCRIPTION = """\
|
18 |
-
|
19 |
-
This Space demonstrates model [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, a Llama 2 model with 7B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
|
20 |
-
π For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
|
21 |
-
π¨ Looking for an even more powerful model? Check out the [13B version](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat) or the large [70B model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
|
22 |
"""
|
23 |
|
24 |
LICENSE = """
|
@@ -86,59 +83,13 @@ def generate(
|
|
86 |
|
87 |
chat_interface = gr.ChatInterface(
|
88 |
fn=generate,
|
89 |
-
additional_inputs=[
|
90 |
-
gr.Textbox(label="System prompt", lines=6),
|
91 |
-
gr.Slider(
|
92 |
-
label="Max new tokens",
|
93 |
-
minimum=1,
|
94 |
-
maximum=MAX_MAX_NEW_TOKENS,
|
95 |
-
step=1,
|
96 |
-
value=DEFAULT_MAX_NEW_TOKENS,
|
97 |
-
),
|
98 |
-
gr.Slider(
|
99 |
-
label="Temperature",
|
100 |
-
minimum=0.1,
|
101 |
-
maximum=4.0,
|
102 |
-
step=0.1,
|
103 |
-
value=0.6,
|
104 |
-
),
|
105 |
-
gr.Slider(
|
106 |
-
label="Top-p (nucleus sampling)",
|
107 |
-
minimum=0.05,
|
108 |
-
maximum=1.0,
|
109 |
-
step=0.05,
|
110 |
-
value=0.9,
|
111 |
-
),
|
112 |
-
gr.Slider(
|
113 |
-
label="Top-k",
|
114 |
-
minimum=1,
|
115 |
-
maximum=1000,
|
116 |
-
step=1,
|
117 |
-
value=50,
|
118 |
-
),
|
119 |
-
gr.Slider(
|
120 |
-
label="Repetition penalty",
|
121 |
-
minimum=1.0,
|
122 |
-
maximum=2.0,
|
123 |
-
step=0.05,
|
124 |
-
value=1.2,
|
125 |
-
),
|
126 |
-
],
|
127 |
stop_btn=None,
|
128 |
-
examples=[
|
129 |
-
["Hello there! How are you doing?"],
|
130 |
-
["Can you explain briefly to me what is the Python programming language?"],
|
131 |
-
["Explain the plot of Cinderella in a sentence."],
|
132 |
-
["How many hours does it take a man to eat a Helicopter?"],
|
133 |
-
["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
|
134 |
-
],
|
135 |
)
|
136 |
|
137 |
with gr.Blocks(css="style.css") as demo:
|
138 |
gr.Markdown(DESCRIPTION)
|
139 |
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
|
140 |
chat_interface.render()
|
141 |
-
gr.Markdown(LICENSE)
|
142 |
|
143 |
if __name__ == "__main__":
|
144 |
demo.queue(max_size=20).launch()
|
|
|
15 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
16 |
|
17 |
DESCRIPTION = """\
|
18 |
+
Let not your LLM learn Personal Identifiable Information. Protecto is here to help you.
|
|
|
|
|
|
|
19 |
"""
|
20 |
|
21 |
LICENSE = """
|
|
|
83 |
|
84 |
chat_interface = gr.ChatInterface(
|
85 |
fn=generate,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
stop_btn=None,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
)
|
88 |
|
89 |
with gr.Blocks(css="style.css") as demo:
|
90 |
gr.Markdown(DESCRIPTION)
|
91 |
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
|
92 |
chat_interface.render()
|
|
|
93 |
|
94 |
if __name__ == "__main__":
|
95 |
demo.queue(max_size=20).launch()
|