Upload folder using huggingface_hub
Browse files- fleurs_cleaned_kk_not_translated.tar.gz.partaa +3 -0
- readme.md +87 -0
fleurs_cleaned_kk_not_translated.tar.gz.partaa
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55e04d91d51f06a8d0cd0f4b24f2e2c53161444c01510bfe111f9a3ff8872970
|
| 3 |
+
size 2753859229
|
readme.md
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# tar
|
| 2 |
+
```bash
|
| 3 |
+
cd /home/vladimir_albrekht/projects/2025_sep_22_qwen3omni/ms_swift_training/approach_2_transformers_based/data/fleurs_data
|
| 4 |
+
tar -czf - fleurs_cleaned_kk_not_translated | split -b 4G - fleurs_cleaned_kk_not_translated.tar.gz.part
|
| 5 |
+
```
|
| 6 |
+
|
| 7 |
+
# cat
|
| 8 |
+
|
| 9 |
+
```bash
|
| 10 |
+
cat fleurs_cleaned_kk_not_translated.tar.gz.part* | tar -xzf -
|
| 11 |
+
```
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
## processing logic
|
| 15 |
+
|
| 16 |
+
```python
|
| 17 |
+
# 1. data part
|
| 18 |
+
import pandas as pd
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
|
| 21 |
+
def load_fleurs_split(split_name, base_path="/home/vladimir_albrekht/projects/2025_sep_22_qwen3omni/ms_swift_training/approach_2_transformers_based/data/fleurs_data/fleurs/data/kk_kz"):
|
| 22 |
+
df = pd.read_csv(
|
| 23 |
+
f"{base_path}/{split_name}.tsv",
|
| 24 |
+
sep="\t",
|
| 25 |
+
header=None,
|
| 26 |
+
names=["id", "file_name", "raw_transcription", "transcription", "phonemes", "num_samples", "gender"]
|
| 27 |
+
)
|
| 28 |
+
df['num_samples'] = pd.to_numeric(df['num_samples'], errors='coerce')
|
| 29 |
+
df['duration_seconds'] = df['num_samples'] / 16000
|
| 30 |
+
df['dataset_type'] = split_name
|
| 31 |
+
df['audio_dir'] = f"{base_path}/audio/{split_name}"
|
| 32 |
+
return df
|
| 33 |
+
|
| 34 |
+
df_test = load_fleurs_split("test")
|
| 35 |
+
df_dev = load_fleurs_split("dev")
|
| 36 |
+
df_train = load_fleurs_split("train")
|
| 37 |
+
|
| 38 |
+
df = pd.concat([df_test, df_dev, df_train], ignore_index=True)
|
| 39 |
+
df_cleaned = df[df['duration_seconds'] <= 29.9]
|
| 40 |
+
|
| 41 |
+
duplicate_files = df_cleaned['file_name'].duplicated().sum()
|
| 42 |
+
unique_files = df_cleaned['file_name'].nunique()
|
| 43 |
+
total_files = len(df_cleaned)
|
| 44 |
+
print(f"Total samples: {total_files}")
|
| 45 |
+
df_cleaned = df_cleaned.drop(columns=['phonemes', 'num_samples'])
|
| 46 |
+
df_cleaned = df_cleaned.rename(columns={
|
| 47 |
+
'raw_transcription': 'transcription',
|
| 48 |
+
'transcription': 'raw_transcription'
|
| 49 |
+
})
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# 2. convert to .jsonl format
|
| 53 |
+
import shutil
|
| 54 |
+
from pathlib import Path
|
| 55 |
+
|
| 56 |
+
output_dir = Path("/home/vladimir_albrekht/projects/2025_sep_22_qwen3omni/ms_swift_training/approach_2_transformers_based/data/fleurs_data/fleurs_cleaned_kk")
|
| 57 |
+
output_dir.mkdir(exist_ok=True)
|
| 58 |
+
audio_output_dir = output_dir / "audios"
|
| 59 |
+
audio_output_dir.mkdir(exist_ok=True)
|
| 60 |
+
|
| 61 |
+
jsonl_data = []
|
| 62 |
+
for idx, row in df_cleaned.iterrows():
|
| 63 |
+
src_audio = Path(row['audio_dir']) / row['file_name']
|
| 64 |
+
new_audio_name = f"{row['dataset_type']}_{row['file_name']}"
|
| 65 |
+
dst_audio = audio_output_dir / new_audio_name
|
| 66 |
+
|
| 67 |
+
shutil.copy(src_audio, dst_audio)
|
| 68 |
+
|
| 69 |
+
jsonl_data.append({
|
| 70 |
+
"transcription": row['transcription'],
|
| 71 |
+
"audio_path": f"audios/{new_audio_name}",
|
| 72 |
+
"meta_data": {
|
| 73 |
+
"id": row['id'],
|
| 74 |
+
"raw_transcription": row['raw_transcription'],
|
| 75 |
+
"duration_seconds": row['duration_seconds'],
|
| 76 |
+
"gender": row['gender'],
|
| 77 |
+
"dataset_type": row['dataset_type']
|
| 78 |
+
}
|
| 79 |
+
})
|
| 80 |
+
|
| 81 |
+
import json
|
| 82 |
+
with open(output_dir / "data.jsonl", 'w', encoding='utf-8') as f:
|
| 83 |
+
for item in jsonl_data:
|
| 84 |
+
f.write(json.dumps(item, ensure_ascii=False) + '\n')
|
| 85 |
+
|
| 86 |
+
print(f"Saved {len(jsonl_data)} samples to {output_dir}")
|
| 87 |
+
```
|