File size: 4,980 Bytes
df907da 381bc18 df907da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
from datasets import load_dataset
import random
import argparse
import csv
import glob
import pandas as pd
from sklearn.model_selection import train_test_split
def main(args):
trans_chars = ",.:;!"
filelist = glob.glob('../langid/sentences/*.txt')
data = pd.DataFrame()
for tsvfile in filelist:
print(f"Processing {tsvfile}")
tmp = pd.read_csv(tsvfile, sep='\t',on_bad_lines='skip',engine='python',encoding='utf8')
if len(tmp.columns)==1:
tmp.insert(0,'id','')
tmp.columns=['id','source']
data=pd.concat([data,tmp])
# Trim
data['source'] = data['source'].str.strip()
# Drop rows that does not end with punctation
data = data[data['source'].str[-1:].isin([".",",",":",";","!"])]
# For not creating chaos later with . . . Just remove examples with elipsis
data = data[~data['source'].str.contains("...", regex=False)]
data = data[~data['source'].str.contains(". . .", regex=False)]
#Drop the id
data = data.drop(['id'],axis=1)
# Duplicate column
data['target'] = data['source']
# Apply each method to 10% of the corpus
# set a stop
stop =int(len(data)/10)
stop_small = int(stop/2)
#Main shuffling
data = data.sample(frac=1).reset_index(drop=True)
# Lowercase in 10% of the cases
data.loc[:stop,'source'] = data['source'].str.lower()
print(f"Lower casing 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Uppercase in 5% of the cases
data.loc[:stop_small,'source'] = data['source'].str.upper()
print(f"Upper casing 5% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Remove all spaces in 10% of the cases
data.loc[:stop,'source'] = data['source'].str.replace(" ","")
print(f"Removing space 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Remove both spaces and do lowercasing in 10%
data.loc[:stop,'source'] = data['source'].str.replace(" ","")
data.loc[:stop,'source'] = data['source'].str.lower()
print(f"Removing space and doing lovercase 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Remove a random number of spaces in 10% of the cases
for index, row in data[0:stop].iterrows():
source = row['source']
#Find the spaces
spacepos = [pos for pos, char in enumerate(source) if char == " "]
random.shuffle(spacepos)
#Reduce to a random number
spacepos = spacepos[0:random.randint(0,len(spacepos))]
##Sort in reverse order
spacepos.sort(reverse=True)
##Loop and replace
for s in spacepos:
source = source[:s] + source[s+1:]
data.loc[index,'source'] = source
print(f"Removing a random number of spaces 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Remove all punctation in 10% of the cases
trans_table = source.maketrans("", "", trans_chars)
data.loc[:stop,'source'] = data['source'].str.translate(trans_table)
print(f"Removing punctation 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Remove a random number of commas in 10% of the cases
for index, row in data[0:stop].iterrows():
source = row['source']
#Find the spaces
spacepos = [pos for pos, char in enumerate(source) if char == ", "]
random.shuffle(spacepos)
#Reduce to a random number
spacepos = spacepos[0:random.randint(0,len(spacepos))]
##Sort in reverse order
spacepos.sort(reverse=True)
##Loop and replace
for s in spacepos:
source = source[:s] + " " +source[s+1:]
data.loc[index,'source'] = source
print(f"Removing a random number of commas 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
data.loc[:,'source'] = "correct: "+data['source']
# Train - test - dev
train, test = train_test_split(data, test_size=0.2)
test, dev = train_test_split(test, test_size=0.5)
# Write the datasets to disk
train.to_csv('correct_datafiles/correct_train.tsv', index=False, header=False, sep='\t')
test.to_csv('correct_datafiles/correct_test.tsv', index=False, header=False, sep='\t')
dev.to_csv('correct_datafiles/correct_dev.tsv', index=False, header=False, sep='\t')
def parse_args():
# Parse commandline
parser = argparse.ArgumentParser()
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|