taufiqsyed commited on
Commit
e678938
1 Parent(s): 19bbaca

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: cc-by-nc-4.0
4
  base_model: facebook/musicgen-small
5
  tags:
 
 
6
  - generated_from_trainer
7
  model-index:
8
  - name: salami_truncsplit_model_trial
@@ -14,7 +16,10 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # salami_truncsplit_model_trial
16
 
17
- This model is a fine-tuned version of [facebook/musicgen-small](https://huggingface.co/facebook/musicgen-small) on an unknown dataset.
 
 
 
18
 
19
  ## Model description
20
 
 
3
  license: cc-by-nc-4.0
4
  base_model: facebook/musicgen-small
5
  tags:
6
+ - text-to-audio
7
+ - taufiqsyed/salami_cleaned_sampled
8
  - generated_from_trainer
9
  model-index:
10
  - name: salami_truncsplit_model_trial
 
16
 
17
  # salami_truncsplit_model_trial
18
 
19
+ This model is a fine-tuned version of [facebook/musicgen-small](https://huggingface.co/facebook/musicgen-small) on the TAUFIQSYED/SALAMI_CLEANED_SAMPLED - DEFAULT dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 3.5308
22
+ - Clap: 0.2386
23
 
24
  ## Model description
25
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.8,
3
+ "eval_clap": 0.2385900318622589,
4
+ "eval_loss": 3.5307514667510986,
5
+ "eval_runtime": 37.3546,
6
+ "eval_samples": 16,
7
+ "eval_samples_per_second": 0.428,
8
+ "eval_steps_per_second": 0.428,
9
+ "total_flos": 3295460764752.0,
10
+ "train_loss": 10.123604774475098,
11
+ "train_runtime": 9.4139,
12
+ "train_samples": 20,
13
+ "train_samples_per_second": 2.125,
14
+ "train_steps_per_second": 0.106
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.8,
3
+ "eval_clap": 0.2385900318622589,
4
+ "eval_loss": 3.5307514667510986,
5
+ "eval_runtime": 37.3546,
6
+ "eval_samples": 16,
7
+ "eval_samples_per_second": 0.428,
8
+ "eval_steps_per_second": 0.428
9
+ }
generation_config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_from_model_config": true,
3
  "bos_token_id": 2048,
4
  "decoder_start_token_id": 2048,
5
  "do_sample": true,
 
1
  {
 
2
  "bos_token_id": 2048,
3
  "decoder_start_token_id": 2048,
4
  "do_sample": true,
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.8,
3
+ "total_flos": 3295460764752.0,
4
+ "train_loss": 10.123604774475098,
5
+ "train_runtime": 9.4139,
6
+ "train_samples": 20,
7
+ "train_samples_per_second": 2.125,
8
+ "train_steps_per_second": 0.106
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.8,
5
+ "eval_steps": 25,
6
+ "global_step": 1,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.8,
13
+ "grad_norm": Infinity,
14
+ "learning_rate": 0.0002,
15
+ "loss": 10.1236,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.8,
20
+ "step": 1,
21
+ "total_flos": 3295460764752.0,
22
+ "train_loss": 10.123604774475098,
23
+ "train_runtime": 9.4139,
24
+ "train_samples_per_second": 2.125,
25
+ "train_steps_per_second": 0.106
26
+ }
27
+ ],
28
+ "logging_steps": 1.0,
29
+ "max_steps": 1,
30
+ "num_input_tokens_seen": 0,
31
+ "num_train_epochs": 1,
32
+ "save_steps": 500,
33
+ "stateful_callbacks": {
34
+ "TrainerControl": {
35
+ "args": {
36
+ "should_epoch_stop": false,
37
+ "should_evaluate": false,
38
+ "should_log": false,
39
+ "should_save": true,
40
+ "should_training_stop": true
41
+ },
42
+ "attributes": {}
43
+ }
44
+ },
45
+ "total_flos": 3295460764752.0,
46
+ "train_batch_size": 1,
47
+ "trial_name": null,
48
+ "trial_params": null
49
+ }