zetavg commited on
Commit
69fa725
1 Parent(s): 06d2e3a
llama_lora/ui/finetune/finetune_ui.py CHANGED
@@ -502,6 +502,27 @@ def finetune_ui():
502
  )
503
  gr.HTML(elem_classes="flex_vertical_grow_area")
504
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505
  with gr.Box(elem_id="finetune_continue_from_model_box"):
506
  with gr.Row():
507
  continue_from_model = gr.Dropdown(
@@ -537,27 +558,6 @@ def finetune_ui():
537
  )
538
  )
539
 
540
- with gr.Accordion("Advanced Options", open=False, elem_id="finetune_advance_options_accordion"):
541
- with gr.Row(elem_id="finetune_advanced_options_checkboxes"):
542
- load_in_8bit = gr.Checkbox(
543
- label="8bit", value=Config.load_8bit)
544
- fp16 = gr.Checkbox(label="FP16", value=True)
545
- bf16 = gr.Checkbox(label="BF16", value=False)
546
- gradient_checkpointing = gr.Checkbox(
547
- label="gradient_checkpointing", value=False)
548
- with gr.Column(variant="panel", elem_id="finetune_additional_training_arguments_box"):
549
- gr.Textbox(
550
- label="Additional Training Arguments",
551
- info="Additional training arguments to be passed to the Trainer. Note that this can override ALL other arguments set elsewhere. See https://bit.ly/hf20-transformers-training-arguments for more details.",
552
- elem_id="finetune_additional_training_arguments_textbox_for_label_display"
553
- )
554
- additional_training_arguments = gr.Code(
555
- label="JSON",
556
- language="json",
557
- value="",
558
- lines=2,
559
- elem_id="finetune_additional_training_arguments")
560
-
561
  with gr.Column():
562
  lora_r = gr.Slider(
563
  minimum=1, maximum=16, step=1, value=8,
 
502
  )
503
  gr.HTML(elem_classes="flex_vertical_grow_area")
504
 
505
+ with gr.Accordion("Advanced Options", open=False, elem_id="finetune_advance_options_accordion"):
506
+ with gr.Row(elem_id="finetune_advanced_options_checkboxes"):
507
+ load_in_8bit = gr.Checkbox(
508
+ label="8bit", value=Config.load_8bit)
509
+ fp16 = gr.Checkbox(label="FP16", value=True)
510
+ bf16 = gr.Checkbox(label="BF16", value=False)
511
+ gradient_checkpointing = gr.Checkbox(
512
+ label="gradient_checkpointing", value=False)
513
+ with gr.Column(variant="panel", elem_id="finetune_additional_training_arguments_box"):
514
+ gr.Textbox(
515
+ label="Additional Training Arguments",
516
+ info="Additional training arguments to be passed to the Trainer. Note that this can override ALL other arguments set elsewhere. See https://bit.ly/hf20-transformers-training-arguments for more details.",
517
+ elem_id="finetune_additional_training_arguments_textbox_for_label_display"
518
+ )
519
+ additional_training_arguments = gr.Code(
520
+ label="JSON",
521
+ language="json",
522
+ value="",
523
+ lines=2,
524
+ elem_id="finetune_additional_training_arguments")
525
+
526
  with gr.Box(elem_id="finetune_continue_from_model_box"):
527
  with gr.Row():
528
  continue_from_model = gr.Dropdown(
 
558
  )
559
  )
560
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
561
  with gr.Column():
562
  lora_r = gr.Slider(
563
  minimum=1, maximum=16, step=1, value=8,
llama_lora/ui/finetune/training.py CHANGED
@@ -66,7 +66,7 @@ def do_train(
66
  continue_from_checkpoint,
67
  progress=gr.Progress(track_tqdm=False),
68
  ):
69
- if Global.is_training:
70
  return render_training_status()
71
 
72
  reset_training_status()
 
66
  continue_from_checkpoint,
67
  progress=gr.Progress(track_tqdm=False),
68
  ):
69
+ if Global.is_training or Global.is_train_starting:
70
  return render_training_status()
71
 
72
  reset_training_status()