litagin commited on
Commit
1b4e426
1 Parent(s): 41983b0

Add python script

Browse files
Files changed (1) hide show
  1. normalize_text_and_filter.py +153 -0
normalize_text_and_filter.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ from collections import defaultdict
4
+ from pathlib import Path
5
+
6
+ from tqdm import tqdm
7
+
8
+ REPLACE_MAP = {
9
+ r"\t": "",
10
+ r"\[n\]": "",
11
+ r" ": "",
12
+ r" ": "",
13
+ r"[;▼♀♂《》≪≫①②③④⑤⑥]": "",
14
+ r"[\u02d7\u2010-\u2015\u2043\u2212\u23af\u23e4\u2500\u2501\u2e3a\u2e3b]": "", # ダッシュ
15
+ r"[\uff5e\u301C]": "ー", # 波ダッシュ
16
+ r"?": "?",
17
+ r"!": "!",
18
+ r"[●◯〇]": "○",
19
+ r"♥": "♡",
20
+ }
21
+
22
+ FULLWIDTH_ALPHA_TO_HALFWIDTH = str.maketrans(
23
+ {
24
+ chr(full): chr(half)
25
+ for full, half in zip(
26
+ list(range(0xFF21, 0xFF3B)) + list(range(0xFF41, 0xFF5B)),
27
+ list(range(0x41, 0x5B)) + list(range(0x61, 0x7B)),
28
+ )
29
+ }
30
+ )
31
+
32
+ HALFWIDTH_KATAKANA_TO_FULLWIDTH = str.maketrans(
33
+ {
34
+ chr(half): chr(full)
35
+ for half, full in zip(range(0xFF61, 0xFF9F), range(0x30A1, 0x30FB))
36
+ }
37
+ )
38
+
39
+ FULLWIDTH_DIGITS_TO_HALFWIDTH = str.maketrans(
40
+ {
41
+ chr(full): chr(half)
42
+ for full, half in zip(range(0xFF10, 0xFF1A), range(0x30, 0x3A))
43
+ }
44
+ )
45
+
46
+ INVALID_PATTERN = re.compile(
47
+ r"[^\u3040-\u309F\u30A0-\u30FF\u4E00-\u9FFF\u3400-\u4DBF\u3005"
48
+ r"\u0041-\u005A\u0061-\u007A"
49
+ r"\u0030-\u0039"
50
+ r"。、!?…♪♡○]"
51
+ )
52
+
53
+
54
+ def normalize(text: str) -> str:
55
+ for pattern, replacement in REPLACE_MAP.items():
56
+ text = re.sub(pattern, replacement, text)
57
+
58
+ text = text.translate(FULLWIDTH_ALPHA_TO_HALFWIDTH)
59
+ text = text.translate(FULLWIDTH_DIGITS_TO_HALFWIDTH)
60
+ text = text.translate(HALFWIDTH_KATAKANA_TO_FULLWIDTH)
61
+
62
+ text = re.sub(r"…{2,}", "…", text)
63
+ text = re.sub(r"ー{2,}", "ー", text)
64
+
65
+ def replace_special_chars(match):
66
+ seq = match.group(0)
67
+ unique_chars = set(seq)
68
+ if len(unique_chars) == 1:
69
+ return seq[0]
70
+ else:
71
+ return seq[0] + seq[-1]
72
+
73
+ text = re.sub(r"[!?♪♡]{2,}", replace_special_chars, text)
74
+
75
+ characters_to_limit = "ッっあいうえおんぁぃぅぇぉゃゅょアイウエオンァィゥェォャュョ"
76
+ pattern = f"([{characters_to_limit}])\\1{{2,}}"
77
+ text = re.sub(pattern, r"\1\1", text)
78
+
79
+ return text
80
+
81
+
82
+ def is_allowed(text: str) -> bool:
83
+ return not INVALID_PATTERN.search(text)
84
+
85
+
86
+ def extract_and_clean_transcriptions(
87
+ metadata_file: Path,
88
+ output_file_valid: Path,
89
+ output_file_invalid: Path,
90
+ output_file_dup: Path,
91
+ ) -> None:
92
+
93
+ valid_entries: list[str] = []
94
+ invalid_entries: list[str] = []
95
+ duplicate_entries: list[str] = []
96
+
97
+ # ファイル名ごとの書き起こしを格納する辞書
98
+ file_transcriptions = defaultdict(list)
99
+
100
+ total_lines = sum(1 for _ in metadata_file.open("r", encoding="utf-8"))
101
+
102
+ with metadata_file.open("r", encoding="utf-8") as f:
103
+ for line in tqdm(f, total=total_lines, desc="Processing metadata.jsonl"):
104
+ metadata = json.loads(line)
105
+ filename = metadata.get("file_name", "")
106
+ text = metadata.get("transcription", "")
107
+ normalized_text = normalize(text)
108
+
109
+ if is_allowed(normalized_text):
110
+ metadata["transcription"] = normalized_text
111
+ file_transcriptions[filename].append(metadata)
112
+ else:
113
+ invalid_entries.append(json.dumps(metadata, ensure_ascii=False))
114
+
115
+ print(f"Total entries: {total_lines}")
116
+ print("Checking for duplicates...")
117
+ for filename, entries in file_transcriptions.items():
118
+ if len(entries) == 1:
119
+ valid_entries.append(json.dumps(entries[0], ensure_ascii=False))
120
+ else:
121
+ unique_transcriptions = {entry["transcription"] for entry in entries}
122
+ if len(unique_transcriptions) == 1:
123
+ valid_entries.append(json.dumps(entries[0], ensure_ascii=False))
124
+ else:
125
+ for entry in entries:
126
+ duplicate_entries.append(json.dumps(entry, ensure_ascii=False))
127
+
128
+ with output_file_valid.open("w", encoding="utf-8") as out_file:
129
+ for entry in valid_entries:
130
+ out_file.write(f"{entry}\n")
131
+ print(f"Valid: {len(valid_entries)} saved to {output_file_valid}")
132
+
133
+ with output_file_invalid.open("w", encoding="utf-8") as out_file:
134
+ for entry in invalid_entries:
135
+ out_file.write(f"{entry}\n")
136
+ print(f"Invalid: {len(invalid_entries)} saved to {output_file_invalid}")
137
+
138
+ with output_file_dup.open("w", encoding="utf-8") as out_file:
139
+ for entry in duplicate_entries:
140
+ out_file.write(f"{entry}\n")
141
+ print(f"Duplicate: {len(duplicate_entries)} saved to {output_file_dup}")
142
+
143
+
144
+ # 使用方法
145
+ if __name__ == "__main__":
146
+ metadata_file = Path("data_renamed/metadata.jsonl")
147
+ output_file_valid = Path("cleaned_metadata_valid.jsonl")
148
+ output_file_invalid = Path("cleaned_metadata_invalid.jsonl")
149
+ output_file_dup = Path("cleaned_metadata_dup.jsonl")
150
+
151
+ extract_and_clean_transcriptions(
152
+ metadata_file, output_file_valid, output_file_invalid, output_file_dup
153
+ )