from pywiki_custom import *
from urllib.parse import unquote

import io
import csv

import time

langs = {
    "en",
    "ru",
    "pt",
    "it",
    "es",
    "fr",
    "de",
    "nl"
}

n = 2200


def cleandata(lang):
    input_file = f'data_{lang}.csv'
    output_file = f'{lang}-wikihow-qa-dataset-{n / 1000}k.csv'

    unique_urls = {}

    with open(input_file, 'r') as f_input, open(output_file, 'w', newline='') as f_output:
        csv_input = csv.reader(f_input)
        csv_output = csv.writer(f_output)

        header = next(csv_input)
        header[3] = 'METADATA'
        csv_output.writerow(header)

        for row in csv_input:
            try:
                url = row[3]
                if url not in unique_urls:
                    row[3] = f'{{"url": "{url}", "language": "{lang}"}}'
                    csv_output.writerow(row)
                    unique_urls[url] = True
                else:
                    print(f"\033[91mDuplicate row found, url: {url}\033[0m")
            except:
                print(f"\033[91mBroken found, url: {row}\033[0m")

def getrandom():
    how_to = RandomHowTo(lang)
    wkhowto_url = how_to.url

    theme = unquote(how_to.title.encode('utf-8'))

    wkhowto_q = theme
    wkhowto_a = how_to.print(extended=True)
    return wkhowto_q, wkhowto_a, wkhowto_url

for lang in langs:
    print(f"\33[34mGenerating {lang}...\033[0m")
    with open(f'data_{lang}.csv', mode='w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow(['INSTRUCTION', 'RESPONSE', 'SOURCE', 'URL'])
        for i in range(n):
            wkhowto_q, wkhowto_a, wkhowto_url = getrandom()
            data = [wkhowto_q, wkhowto_a, f'{lang}.wikihow.com', wkhowto_url]
            writer.writerow(data)
            print(f"{i+1} out of {n}\033[0m")
            time.sleep(3)
        print(f"\33[92mDone for {lang}!\033[0m\n")

for lang in langs:
    cleandata(lang)

print("\33[32mDone for all!\033[0m")