autotrain-data-processor commited on
Commit
6bb4c98
1 Parent(s): 55e529e

Processed data from AutoTrain data processor ([2023-06-16 14:07 ]

Browse files
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - summarization
4
+
5
+ ---
6
+ # AutoTrain Dataset for project: musicprompt
7
+
8
+ ## Dataset Description
9
+
10
+ This dataset has been automatically processed by AutoTrain for project musicprompt.
11
+
12
+ ### Languages
13
+
14
+ The BCP-47 code for the dataset's language is unk.
15
+
16
+ ## Dataset Structure
17
+
18
+ ### Data Instances
19
+
20
+ A sample from this dataset looks as follows:
21
+
22
+ ```json
23
+ [
24
+ {
25
+ "feat_ytid": "XILyHZyyCik",
26
+ "feat_start_s": 140,
27
+ "feat_end_s": 150,
28
+ "feat_audioset_positive_labels": "/m/015lz1,/m/028sqc,/m/04rlf",
29
+ "text": "['pop', 'low quality', 'live performance', 'flat male vocal', 'passionate female vocal', 'wide harmonizing vocals', 'punchy kick', 'punchy snare', 'shimmering hi hats', 'groovy bass', 'crowd cheering', 'crowd singing', 'energetic', 'groovy', 'emotional', 'passionate']",
30
+ "target": "The low quality recording features a live performance of a pop song that consists of flat male vocal talking, passionate female vocal, alongside harmonizing wide female vocals, singing over punchy kick and snare hits, shimmering hi hats and groovy bass. There are crowd singing and cheering sounds in the background. It sounds groovy, emotional, energetic and passionate.",
31
+ "feat_author_id": 4,
32
+ "feat_is_balanced_subset": false,
33
+ "feat_is_audioset_eval": false
34
+ },
35
+ {
36
+ "feat_ytid": "t9aSL2MwEDM",
37
+ "feat_start_s": 30,
38
+ "feat_end_s": 40,
39
+ "feat_audioset_positive_labels": "/m/015lz1,/m/04rlf,/m/04wptg,/m/05w3f,/m/064t9,/m/06rqw",
40
+ "text": "['pop', 'amateur recording', 'e-bass', 'e-guitar', 'acoustic drums', 'synth', 'female voice singing', 'mid range', 'mideum to uptempo']",
41
+ "target": "This song contains a female singer singing with a lower voice. An acoustic drum is playing a simple groove with a catchy bassline. An e-guitar is playing the notes along with the bass. A keyboard is emulating an e-guitar sound by playing a little lick. This song may be playing at a live concert.",
42
+ "feat_author_id": 6,
43
+ "feat_is_balanced_subset": false,
44
+ "feat_is_audioset_eval": false
45
+ }
46
+ ]
47
+ ```
48
+
49
+ ### Dataset Fields
50
+
51
+ The dataset has the following fields (also called "features"):
52
+
53
+ ```json
54
+ {
55
+ "feat_ytid": "Value(dtype='string', id=None)",
56
+ "feat_start_s": "Value(dtype='int64', id=None)",
57
+ "feat_end_s": "Value(dtype='int64', id=None)",
58
+ "feat_audioset_positive_labels": "Value(dtype='string', id=None)",
59
+ "text": "Value(dtype='string', id=None)",
60
+ "target": "Value(dtype='string', id=None)",
61
+ "feat_author_id": "Value(dtype='int64', id=None)",
62
+ "feat_is_balanced_subset": "Value(dtype='bool', id=None)",
63
+ "feat_is_audioset_eval": "Value(dtype='bool', id=None)"
64
+ }
65
+ ```
66
+
67
+ ### Dataset Splits
68
+
69
+ This dataset is split into a train and validation split. The split sizes are as follow:
70
+
71
+ | Split name | Num samples |
72
+ | ------------ | ------------------- |
73
+ | train | 4416 |
74
+ | valid | 1105 |
processed/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["train", "valid"]}
processed/train/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b91907ceefc40d4b793160385ee4d93d5cf250fcfefa16852106e631baacdab
3
+ size 2397232
processed/train/dataset_info.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "AutoTrain generated dataset",
4
+ "features": {
5
+ "feat_ytid": {
6
+ "dtype": "string",
7
+ "_type": "Value"
8
+ },
9
+ "feat_start_s": {
10
+ "dtype": "int64",
11
+ "_type": "Value"
12
+ },
13
+ "feat_end_s": {
14
+ "dtype": "int64",
15
+ "_type": "Value"
16
+ },
17
+ "feat_audioset_positive_labels": {
18
+ "dtype": "string",
19
+ "_type": "Value"
20
+ },
21
+ "text": {
22
+ "dtype": "string",
23
+ "_type": "Value"
24
+ },
25
+ "target": {
26
+ "dtype": "string",
27
+ "_type": "Value"
28
+ },
29
+ "feat_author_id": {
30
+ "dtype": "int64",
31
+ "_type": "Value"
32
+ },
33
+ "feat_is_balanced_subset": {
34
+ "dtype": "bool",
35
+ "_type": "Value"
36
+ },
37
+ "feat_is_audioset_eval": {
38
+ "dtype": "bool",
39
+ "_type": "Value"
40
+ }
41
+ },
42
+ "homepage": "",
43
+ "license": "",
44
+ "splits": {
45
+ "train": {
46
+ "name": "train",
47
+ "num_bytes": 2392850,
48
+ "num_examples": 4416,
49
+ "dataset_name": null
50
+ }
51
+ }
52
+ }
processed/train/state.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "672b764c805d620e",
8
+ "_format_columns": [
9
+ "feat_audioset_positive_labels",
10
+ "feat_author_id",
11
+ "feat_end_s",
12
+ "feat_is_audioset_eval",
13
+ "feat_is_balanced_subset",
14
+ "feat_start_s",
15
+ "feat_ytid",
16
+ "target",
17
+ "text"
18
+ ],
19
+ "_format_kwargs": {},
20
+ "_format_type": null,
21
+ "_output_all_columns": false,
22
+ "_split": null
23
+ }
processed/valid/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:080a34793afc008557727ce279461355762b57ce60fc72232ebc6b58b2f5d9d4
3
+ size 605448
processed/valid/dataset_info.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "AutoTrain generated dataset",
4
+ "features": {
5
+ "feat_ytid": {
6
+ "dtype": "string",
7
+ "_type": "Value"
8
+ },
9
+ "feat_start_s": {
10
+ "dtype": "int64",
11
+ "_type": "Value"
12
+ },
13
+ "feat_end_s": {
14
+ "dtype": "int64",
15
+ "_type": "Value"
16
+ },
17
+ "feat_audioset_positive_labels": {
18
+ "dtype": "string",
19
+ "_type": "Value"
20
+ },
21
+ "text": {
22
+ "dtype": "string",
23
+ "_type": "Value"
24
+ },
25
+ "target": {
26
+ "dtype": "string",
27
+ "_type": "Value"
28
+ },
29
+ "feat_author_id": {
30
+ "dtype": "int64",
31
+ "_type": "Value"
32
+ },
33
+ "feat_is_balanced_subset": {
34
+ "dtype": "bool",
35
+ "_type": "Value"
36
+ },
37
+ "feat_is_audioset_eval": {
38
+ "dtype": "bool",
39
+ "_type": "Value"
40
+ }
41
+ },
42
+ "homepage": "",
43
+ "license": "",
44
+ "splits": {
45
+ "valid": {
46
+ "name": "valid",
47
+ "num_bytes": 602996,
48
+ "num_examples": 1105,
49
+ "dataset_name": null
50
+ }
51
+ }
52
+ }
processed/valid/state.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "1adb1497d93e4d01",
8
+ "_format_columns": [
9
+ "feat_audioset_positive_labels",
10
+ "feat_author_id",
11
+ "feat_end_s",
12
+ "feat_is_audioset_eval",
13
+ "feat_is_balanced_subset",
14
+ "feat_start_s",
15
+ "feat_ytid",
16
+ "target",
17
+ "text"
18
+ ],
19
+ "_format_kwargs": {},
20
+ "_format_type": null,
21
+ "_output_all_columns": false,
22
+ "_split": null
23
+ }