import pandas as pd import xmltodict from sklearn.model_selection import train_test_split import glob import sys import os filelist = glob.glob('translate_dataset/*.tsv') data = pd.DataFrame() for tsvfile in filelist: tmp = pd.read_csv(tsvfile, sep='\t') tmp.columns=['source','target'] tmp['rev_source'] = tmp['target'] tmp['rev_target'] = tmp['source'] path = tsvfile.split("/") source = path[1][0:3] target = path[1][3:6] prefix = f"{source}_{target}: " tmp['source'] = prefix + tmp['source'] rev_prefix = f"{target}_{source}: " tmp['rev_source'] = rev_prefix + tmp['rev_source'] data = pd.concat([data,tmp]) #Shuffle data = data.sample(frac=1).reset_index(drop=True) # Add both directions original = data[['source','target']] reverse = data[['rev_source','rev_target']] reverse.columns=['source','target'] data = pd.concat([original,reverse]) #remove the source prefix data['source'] = data['source'].str[4:] data = data.sample(frac=1).reset_index(drop=True) # Train - test - dev train, test = train_test_split(data, test_size=0.2) test, dev = train_test_split(test, test_size=0.5) # Write the datasets to disk test.to_csv('tsv_all_target/test.tsv', index=False, header=False, sep='\t') dev.to_csv('tsv_all_target/dev.tsv', index=False, header=False, sep='\t') # Add the language detection to the training dataset filelist = glob.glob('langid/langid_datafiles/*train.tsv') print(len(train)) for tsvfile in filelist: tmp = pd.read_csv(tsvfile, sep='\t') tmp.columns=['source','target'] train = pd.concat([train,tmp]) data = data.sample(frac=1).reset_index(drop=True) print(len(train)) train.to_csv('tsv_all_target/train.tsv', index=False, header=False, sep='\t') print("Finished")