test
Browse files- langid/langid_datafilesfin_dev.tsv β correct/correct_datafiles/correct_dev.tsv +2 -2
- langid/langid_datafilesdan_dev.tsv β correct/correct_datafiles/correct_test.tsv +2 -2
- langid/langid_datafileseng_dev.tsv β correct/correct_datafiles/correct_train.tsv +2 -2
- correct/create_correct_dataset.py +140 -0
- create_dataset_from_tsv.py β create_training_files.py +1 -1
- langid/create_langid_trainset.py +1 -1
- langid/langid_datafilesfao_dev.tsv +0 -3
- langid/langid_datafilesisl_dev.tsv +0 -3
- langid/langid_datafilesnno_dev.tsv +0 -3
- langid/langid_datafilesnob_dev.tsv +0 -3
- langid/langid_datafilessma_dev.tsv +0 -3
- langid/langid_datafilessme_dev.tsv +0 -3
- langid/langid_datafilessmj_dev.tsv +0 -3
- langid/langid_datafilessmn_dev.tsv +0 -3
- langid/langid_datafilessms_dev.tsv +0 -3
- langid/langid_datafilesswe_dev.tsv +0 -3
- {tsv_source_target β translate_dataset}/finnob.tsv +0 -0
- {tsv_source_target β translate_dataset}/finsma.tsv +0 -0
- {tsv_source_target β translate_dataset}/finsme.tsv +0 -0
- {tsv_source_target β translate_dataset}/finsmn.tsv +0 -0
- {tsv_source_target β translate_dataset}/finsms.tsv +0 -0
- {tsv_source_target β translate_dataset}/nobnyn.tsv +0 -0
- {tsv_source_target β translate_dataset}/nobsma.tsv +0 -0
- {tsv_source_target β translate_dataset}/nobsme.tsv +0 -0
- {tsv_source_target β translate_dataset}/nobsmj.tsv +0 -0
- {tsv_source_target β translate_dataset}/smanob.tsv +0 -0
- {tsv_source_target β translate_dataset}/smenob.tsv +0 -0
- {tsv_source_target β translate_dataset}/smesma.tsv +0 -0
- {tsv_source_target β translate_dataset}/smesmj.tsv +0 -0
- {tsv_source_target β translate_dataset}/smesmn.tsv +0 -0
- tsv_all_source_target/dev.tsv +0 -3
- tsv_all_source_target/test.tsv +0 -3
- tsv_all_source_target/train.tsv +0 -3
langid/langid_datafilesfin_dev.tsv β correct/correct_datafiles/correct_dev.tsv
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fec0f2575d5925b993aefdb9715ace66f3a4948258c8cfa1600fd9b264fd8963
|
3 |
+
size 2320597
|
langid/langid_datafilesdan_dev.tsv β correct/correct_datafiles/correct_test.tsv
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e7595989509c671e3bc088129a080ab4292ba472ef21bc8c92d470ebc4f1196
|
3 |
+
size 2339422
|
langid/langid_datafileseng_dev.tsv β correct/correct_datafiles/correct_train.tsv
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d42cbf75385b1918f7dd9fa479e65519ecbe9bfcf1996132c4236ea695cd9fdf
|
3 |
+
size 18565347
|
correct/create_correct_dataset.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
import random
|
3 |
+
import argparse
|
4 |
+
import csv
|
5 |
+
import glob
|
6 |
+
import pandas as pd
|
7 |
+
from sklearn.model_selection import train_test_split
|
8 |
+
|
9 |
+
|
10 |
+
def main(args):
|
11 |
+
trans_chars = ",.:;!"
|
12 |
+
|
13 |
+
filelist = glob.glob('../langid/sentences/*.txt')
|
14 |
+
|
15 |
+
data = pd.DataFrame()
|
16 |
+
|
17 |
+
for tsvfile in filelist:
|
18 |
+
print(f"Processing {tsvfile}")
|
19 |
+
tmp = pd.read_csv(tsvfile, sep='\t',on_bad_lines='skip',engine='python',encoding='utf8')
|
20 |
+
if len(tmp.columns)==1:
|
21 |
+
tmp.insert(0,'id','')
|
22 |
+
|
23 |
+
tmp.columns=['id','source']
|
24 |
+
data=pd.concat([data,tmp])
|
25 |
+
|
26 |
+
|
27 |
+
# Trim
|
28 |
+
data['source'] = data['source'].str.strip()
|
29 |
+
|
30 |
+
# Drop rows that does not end with punctation
|
31 |
+
data = data[data['source'].str[-1:].isin([".",",",":",";","!"])]
|
32 |
+
|
33 |
+
# For not creating chaos later with . . . Just remove examples with elipsis
|
34 |
+
data = data[~data['source'].str.contains("...", regex=False)]
|
35 |
+
data = data[~data['source'].str.contains(". . .", regex=False)]
|
36 |
+
|
37 |
+
#Drop the id
|
38 |
+
data = data.drop(['id'],axis=1)
|
39 |
+
|
40 |
+
# Duplicate column
|
41 |
+
data['target'] = data['source']
|
42 |
+
|
43 |
+
# Apply each method to 10% of the corpus
|
44 |
+
# set a stop
|
45 |
+
stop =int(len(data)/10)
|
46 |
+
stop_small = int(stop/2)
|
47 |
+
|
48 |
+
#Main shuffling
|
49 |
+
data = data.sample(frac=1).reset_index(drop=True)
|
50 |
+
|
51 |
+
# Lowercase in 10% of the cases
|
52 |
+
data.loc[:stop,'source'] = data['source'].str.lower()
|
53 |
+
print(f"Lower casing 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
|
54 |
+
data = data.sample(frac=1).reset_index(drop=True)
|
55 |
+
|
56 |
+
# Uppercase in 5% of the cases
|
57 |
+
data.loc[:stop_small,'source'] = data['source'].str.upper()
|
58 |
+
print(f"Upper casing 5% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
|
59 |
+
data = data.sample(frac=1).reset_index(drop=True)
|
60 |
+
|
61 |
+
|
62 |
+
# Remove all spaces in 10% of the cases
|
63 |
+
data.loc[:stop,'source'] = data['source'].str.replace(" ","")
|
64 |
+
print(f"Removing space 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
|
65 |
+
data = data.sample(frac=1).reset_index(drop=True)
|
66 |
+
|
67 |
+
# Remove both spaces and do lowercasing in 10%
|
68 |
+
data.loc[:stop,'source'] = data['source'].str.replace(" ","")
|
69 |
+
data.loc[:stop,'source'] = data['source'].str.lower()
|
70 |
+
print(f"Removing space and doing lovercase 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
|
71 |
+
data = data.sample(frac=1).reset_index(drop=True)
|
72 |
+
|
73 |
+
# Remove a random number of spaces in 10% of the cases
|
74 |
+
for index, row in data[0:stop].iterrows():
|
75 |
+
source = row['source']
|
76 |
+
#Find the spaces
|
77 |
+
spacepos = [pos for pos, char in enumerate(source) if char == " "]
|
78 |
+
random.shuffle(spacepos)
|
79 |
+
|
80 |
+
#Reduce to a random number
|
81 |
+
spacepos = spacepos[0:random.randint(0,len(spacepos))]
|
82 |
+
|
83 |
+
##Sort in reverse order
|
84 |
+
spacepos.sort(reverse=True)
|
85 |
+
|
86 |
+
##Loop and replace
|
87 |
+
for s in spacepos:
|
88 |
+
source = source[:s] + source[s+1:]
|
89 |
+
|
90 |
+
data.loc[index,'source'] = source
|
91 |
+
|
92 |
+
print(f"Removing a random number of spaces 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
|
93 |
+
data = data.sample(frac=1).reset_index(drop=True)
|
94 |
+
|
95 |
+
# Remove all punctation in 10% of the cases
|
96 |
+
trans_table = source.maketrans("", "", trans_chars)
|
97 |
+
data.loc[:stop,'source'] = data['source'].str.translate(trans_table)
|
98 |
+
print(f"Removing punctation 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
|
99 |
+
data = data.sample(frac=1).reset_index(drop=True)
|
100 |
+
|
101 |
+
# Remove a random number of commas in 10% of the cases
|
102 |
+
for index, row in data[0:stop].iterrows():
|
103 |
+
source = row['source']
|
104 |
+
#Find the spaces
|
105 |
+
spacepos = [pos for pos, char in enumerate(source) if char == ", "]
|
106 |
+
random.shuffle(spacepos)
|
107 |
+
|
108 |
+
#Reduce to a random number
|
109 |
+
spacepos = spacepos[0:random.randint(0,len(spacepos))]
|
110 |
+
|
111 |
+
##Sort in reverse order
|
112 |
+
spacepos.sort(reverse=True)
|
113 |
+
|
114 |
+
##Loop and replace
|
115 |
+
for s in spacepos:
|
116 |
+
source = source[:s] + " " +source[s+1:]
|
117 |
+
|
118 |
+
data.loc[index,'source'] = source
|
119 |
+
|
120 |
+
print(f"Removing a random number of commas 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
|
121 |
+
data = data.sample(frac=1).reset_index(drop=True)
|
122 |
+
|
123 |
+
# Train - test - dev
|
124 |
+
train, test = train_test_split(data, test_size=0.2)
|
125 |
+
test, dev = train_test_split(test, test_size=0.5)
|
126 |
+
|
127 |
+
# Write the datasets to disk
|
128 |
+
train.to_csv('correct_datafiles/correct_train.tsv', index=False, header=False, sep='\t')
|
129 |
+
test.to_csv('correct_datafiles/correct_test.tsv', index=False, header=False, sep='\t')
|
130 |
+
dev.to_csv('correct_datafiles/correct_dev.tsv', index=False, header=False, sep='\t')
|
131 |
+
|
132 |
+
def parse_args():
|
133 |
+
# Parse commandline
|
134 |
+
parser = argparse.ArgumentParser()
|
135 |
+
args = parser.parse_args()
|
136 |
+
return args
|
137 |
+
|
138 |
+
if __name__ == "__main__":
|
139 |
+
args = parse_args()
|
140 |
+
main(args)
|
create_dataset_from_tsv.py β create_training_files.py
RENAMED
@@ -5,7 +5,7 @@ import glob
|
|
5 |
import sys
|
6 |
import os
|
7 |
|
8 |
-
filelist = glob.glob('
|
9 |
|
10 |
data = pd.DataFrame()
|
11 |
|
|
|
5 |
import sys
|
6 |
import os
|
7 |
|
8 |
+
filelist = glob.glob('translate_dataset/*.tsv')
|
9 |
|
10 |
data = pd.DataFrame()
|
11 |
|
langid/create_langid_trainset.py
CHANGED
@@ -32,7 +32,7 @@ for tsvfile in filelist:
|
|
32 |
# Write the datasets to disk
|
33 |
train.to_csv('langid_datafiles/'+lang+'_train.tsv', index=False, header=False, sep='\t')
|
34 |
test.to_csv('langid_datafiles/'+lang+'_test.tsv', index=False, header=False, sep='\t')
|
35 |
-
dev.to_csv('langid_datafiles'+lang+'_dev.tsv', index=False, header=False, sep='\t')
|
36 |
|
37 |
|
38 |
print("Finished")
|
|
|
32 |
# Write the datasets to disk
|
33 |
train.to_csv('langid_datafiles/'+lang+'_train.tsv', index=False, header=False, sep='\t')
|
34 |
test.to_csv('langid_datafiles/'+lang+'_test.tsv', index=False, header=False, sep='\t')
|
35 |
+
dev.to_csv('langid_datafiles/'+lang+'_dev.tsv', index=False, header=False, sep='\t')
|
36 |
|
37 |
|
38 |
print("Finished")
|
langid/langid_datafilesfao_dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:9e9862ecdd770235ee1e6e1389cd13c941c1fc67e347206addb5ceef8d8303ce
|
3 |
-
size 104101
|
|
|
|
|
|
|
|
langid/langid_datafilesisl_dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:431256015e7ecd8c7db47eb24de3030a9f0dfafcb6e473ad62f961ecb1ab02b2
|
3 |
-
size 116070
|
|
|
|
|
|
|
|
langid/langid_datafilesnno_dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b98781d8bfcf20efab1fc3ad723cc72a2edcb825b77ac87ac0ba55d71b395ea8
|
3 |
-
size 105195
|
|
|
|
|
|
|
|
langid/langid_datafilesnob_dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:40ec96b3a9056efe07930ffaa869289c443d7181878e2b30fb40bf635bb0da88
|
3 |
-
size 123710
|
|
|
|
|
|
|
|
langid/langid_datafilessma_dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d401a7e7325abde28f0616f5da413a776cbf0cf71acd472d0da429dc6dd54868
|
3 |
-
size 113845
|
|
|
|
|
|
|
|
langid/langid_datafilessme_dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:978a1bb983d7def98d552fdf0b2456b70cae30b350ecbcbbeb0711b95c0cb322
|
3 |
-
size 97202
|
|
|
|
|
|
|
|
langid/langid_datafilessmj_dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:726f6ba67de111833253522028cd84c2a1d725ed627b62e35827875aa5889b38
|
3 |
-
size 107928
|
|
|
|
|
|
|
|
langid/langid_datafilessmn_dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:fd8f7b55bf174a1a442f00e6e7ea5ba6af5378cd6bf01209ab195acc6a06a21f
|
3 |
-
size 111870
|
|
|
|
|
|
|
|
langid/langid_datafilessms_dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:19960f2c00c8058a2b4eb96d8639ee9fe08a1dbe99ae41ab6a6809aeafdc9e39
|
3 |
-
size 70369
|
|
|
|
|
|
|
|
langid/langid_datafilesswe_dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:80dea5798eef38ae4e84010f6c21581ae0a229a03cd476647e5497473e44a750
|
3 |
-
size 100161
|
|
|
|
|
|
|
|
{tsv_source_target β translate_dataset}/finnob.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/finsma.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/finsme.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/finsmn.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/finsms.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/nobnyn.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/nobsma.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/nobsme.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/nobsmj.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/smanob.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/smenob.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/smesma.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/smesmj.tsv
RENAMED
File without changes
|
{tsv_source_target β translate_dataset}/smesmn.tsv
RENAMED
File without changes
|
tsv_all_source_target/dev.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:f5de7e7fc05d9697ff9863f4846364878492e847d236c33d4e55495c66b3c2ae
|
3 |
-
size 25212526
|
|
|
|
|
|
|
|
tsv_all_source_target/test.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:53096012b9e502826ee0028367316aa624d6a1b847eb255f173b09d9475af4ce
|
3 |
-
size 25054929
|
|
|
|
|
|
|
|
tsv_all_source_target/train.tsv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:1f28b52818b79814f12c32e2ddf11f434e52439497a11bb8f251a89d682ac91e
|
3 |
-
size 201104421
|
|
|
|
|
|
|
|