AMKCode commited on
Commit
5ce249a
·
1 Parent(s): 201e9f0

quit button reloads page

Browse files
Files changed (1) hide show
  1. app.py +4 -28
app.py CHANGED
@@ -103,12 +103,8 @@ SUPPORTED_MODEL_TYPES = ['llama',
103
  'cohere',
104
  'minicpm']
105
 
106
- global is_cancelled
107
 
108
  def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuthToken | None, progress=gr.Progress()):
109
- global is_cancelled
110
- is_cancelled = False
111
-
112
  if oauth_token.token is None:
113
  return "Log in to Huggingface to use this"
114
  elif not hf_model_id:
@@ -145,11 +141,6 @@ def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuth
145
  except BaseException as error:
146
  os.system("rm -rf dist/")
147
  return error
148
-
149
- if is_cancelled:
150
- is_cancelled = False
151
- os.system("rm -rf dist/")
152
- return "Conversion cancelled"
153
 
154
  progress(0.5, desc="Converting weight to MLC")
155
 
@@ -160,11 +151,6 @@ def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuth
160
  os.system("rm -rf dist/")
161
  return convert_weight_result.stderr
162
 
163
- if is_cancelled:
164
- is_cancelled = False
165
- os.system("rm -rf dist/")
166
- return "Conversion cancelled"
167
-
168
  progress(0.8, desc="Generating config...")
169
 
170
  gen_config_result = subprocess.run(["mlc_llm gen_config ./dist/models/" + model_dir_name + "/" + \
@@ -174,11 +160,6 @@ def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuth
174
  os.system("rm -rf dist/")
175
  return gen_config_result.stderr
176
 
177
- if is_cancelled:
178
- is_cancelled = False
179
- os.system("rm -rf dist/")
180
- return "Conversion cancelled"
181
-
182
  progress(0.9, desc="Creating your Huggingface repo...")
183
 
184
  # push to HF
@@ -224,11 +205,6 @@ def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuth
224
  os.system("rm -rf dist/")
225
  return "Successful, please find your compiled LLM model on your personal account"
226
 
227
- def quit_button_click():
228
- global is_cancelled
229
- is_cancelled = True
230
-
231
-
232
  with gr.Blocks() as demo:
233
  gr.LoginButton()
234
  gr.Markdown(
@@ -244,9 +220,9 @@ with gr.Blocks() as demo:
244
  conv = gr.Dropdown(CONV_TEMPLATES, label="Conversation Template")
245
  quant = gr.Dropdown(QUANTIZATIONS, label="Quantization Method", info="The format of the code is qAfB(_id), where A represents the number of bits for storing weights and B represents the number of bits for storing activations. The _id is an integer identifier to distinguish different quantization algorithms (e.g. symmetric, non-symmetric, AWQ, etc).")
246
  btn = gr.Button("Convert to MLC")
247
- btn2 = gr.Button("Quit")
248
  out = gr.Textbox(label="Conversion Result")
249
- btn.click(fn=button_click , inputs=[model_id, conv, quant], outputs=out)
250
- btn2.click(fn=quit_button_click)
251
 
252
- demo.launch()
 
103
  'cohere',
104
  'minicpm']
105
 
 
106
 
107
  def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuthToken | None, progress=gr.Progress()):
 
 
 
108
  if oauth_token.token is None:
109
  return "Log in to Huggingface to use this"
110
  elif not hf_model_id:
 
141
  except BaseException as error:
142
  os.system("rm -rf dist/")
143
  return error
 
 
 
 
 
144
 
145
  progress(0.5, desc="Converting weight to MLC")
146
 
 
151
  os.system("rm -rf dist/")
152
  return convert_weight_result.stderr
153
 
 
 
 
 
 
154
  progress(0.8, desc="Generating config...")
155
 
156
  gen_config_result = subprocess.run(["mlc_llm gen_config ./dist/models/" + model_dir_name + "/" + \
 
160
  os.system("rm -rf dist/")
161
  return gen_config_result.stderr
162
 
 
 
 
 
 
163
  progress(0.9, desc="Creating your Huggingface repo...")
164
 
165
  # push to HF
 
205
  os.system("rm -rf dist/")
206
  return "Successful, please find your compiled LLM model on your personal account"
207
 
 
 
 
 
 
208
  with gr.Blocks() as demo:
209
  gr.LoginButton()
210
  gr.Markdown(
 
220
  conv = gr.Dropdown(CONV_TEMPLATES, label="Conversation Template")
221
  quant = gr.Dropdown(QUANTIZATIONS, label="Quantization Method", info="The format of the code is qAfB(_id), where A represents the number of bits for storing weights and B represents the number of bits for storing activations. The _id is an integer identifier to distinguish different quantization algorithms (e.g. symmetric, non-symmetric, AWQ, etc).")
222
  btn = gr.Button("Convert to MLC")
223
+ btn2 = gr.Button("Cancel Conversion")
224
  out = gr.Textbox(label="Conversion Result")
225
+ click_event = btn.click(fn=button_click , inputs=[model_id, conv, quant], outputs=out)
226
+ btn2.click(fn=None, inputs=None, outputs=None, cancels=[click_event], js="window.location.reload()")
227
 
228
+ demo.queue(max_size=5).launch()