File size: 1,173 Bytes
aac2c7b df907da aac2c7b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import pandas as pd
import xmltodict
from sklearn.model_selection import train_test_split
import glob
import sys
import os
filelist = glob.glob('sentences/*.txt')
data = pd.DataFrame()
for tsvfile in filelist:
print(f"Processing {tsvfile}")
data = pd.read_csv(tsvfile, sep='\t',on_bad_lines='skip',engine='python',encoding='utf8')
lang=tsvfile.split('/')[1][0:3]
if len(data.columns)==1:
data.insert(0,'id','')
data.columns=['id','source']
data['target']=lang
data['source'] = "lang: "+data['source']
data['source'] = data['source'].str.replace('\t',' ')
data = data.sample(frac=1).reset_index(drop=True)
data = data[['source','target']]
# Train - test - dev
train, test = train_test_split(data, test_size=0.2)
test, dev = train_test_split(test, test_size=0.5)
# Write the datasets to disk
train.to_csv('langid_datafiles/'+lang+'_train.tsv', index=False, header=False, sep='\t')
test.to_csv('langid_datafiles/'+lang+'_test.tsv', index=False, header=False, sep='\t')
dev.to_csv('langid_datafiles/'+lang+'_dev.tsv', index=False, header=False, sep='\t')
print("Finished")
|