#!/usr/bin/python3
import urllib.request
from urllib.error import HTTPError, URLError
import os
import re

def download_file(url, domain, savedir):
    filename = url.rsplit("/", 1)[1].replace("?cache", "")

    if not os.path.exists(savedir):
        os.mkdir(savedir)

    if os.path.exists(f"{savedir}/{filename}"):
        print(f"Skipping {filename} as file exists")
    else:
        print(f"Downloading {filename}")
        with urllib.request.urlopen(domain + url) as file:
            with open(f"{savedir}/{filename}", mode="wb") as local_file:
                local_file.write(file.read())
                print(f"Downloaded {filename}")

for url in ["https://transmemes.netlify.app/c/menhera-chan", "https://transmemes.netlify.app/c/menhera-kun", "https://transmemes.netlify.app/c/yurundara-chan", "https://transmemes.netlify.app/c/yurundara-kun", "https://transmemes.netlify.app/c/onii-chan-is-done-for"]:
    print(f"Scrapping {url}")
    with urllib.request.urlopen(url) as file:
        file_contents = file.read().decode("utf-8")
        matches = re.findall(r"src=\"(.*?)\"", file_contents, re.MULTILINE)

        for match in matches:
            print(match)
            try:
                if "gif" in match:
                    download_file(match, url.rsplit("/", 2)[0], url.rsplit("/", 2)[2])
                else:
                    download_file(match.replace("white", "white-3x"), url.rsplit("/", 2)[0], url.rsplit("/", 2)[2])
            except HTTPError as e:
                print(e)
            except URLError as e:
                print(e)

