import markdown import os import time import shutil import subprocess from html import escape from const import * def getTemplateHTML(name): html = "" with open(os.path.join(templates, name), "r") as file: html = file.read(); return html def lowerHeadings(html): # This is a dumb lol return html.replace("
", "

")\ .replace("

", "

")\ .replace("
", "
")\ .replace("
", "")\ .replace("

", "

")\ .replace("
", "")\ .replace("

", "

")\ .replace("

", "")\ .replace("

", "

")\ .replace("

", "")\ .replace("

", "

")\ .replace("

", "")\ def listPages(): return [ (lambda path: (lambda content: (lambda timestamp: (lambda name: { "source_file" : path, "source_content" : content, "html" : markdown.markdown("\n".join(content.split("\n...\n"))), "summary" : lowerHeadings(markdown.markdown(content.split("\n...\n")[0])), "timestamp" : timestamp, "date": time.strftime(date_format, time.localtime(timestamp)), "name" : name, "url" : f"entries/{name}.html" })(".".join(p.split(".")[:-1])) )(os.stat(path).st_ctime) )(open(path, "r").read()) )(os.path.join(source, p)) for p in os.listdir(source) ] def formatEntry(content, page): return content.replace("%date%", page["date"])\ .replace("%name%", page["name"])\ .replace("%time%", str(page["timestamp"]))\ .replace("%source%", site_index + page["source_file"])\ .replace("%url%", site_index + page["url"]) def make(): try: os.makedirs(os.path.join(dist, "entries")) except: print("Already have content") try: shutil.rmtree(os.path.join(dist, "src")) except: pass try: shutil.rmtree(os.path.join(dist, "images")) except: pass try: shutil.copytree(source, os.path.join(dist, "src")) except: pass try: shutil.copytree(images, os.path.join(dist, "images")) except: pass pages = listPages() pages = sorted(pages, key=lambda p: p["timestamp"]) summary_templ = getTemplateHTML("summary.html") summariesHTML = getTemplateHTML("about.html") + "\n
\n"+ "\n
\n".join( [ formatEntry(summary_templ, page) .replace( "%content%", page["summary"] + (f"read more..." if len(page["source_content"].split("\n...\n")) > 1 else "") ) for page in pages ][: : -1] ) entry_templ = getTemplateHTML("page.html") for page in pages: with open(os.path.join(dist, page["url"]), "w") as entry: entry.write( formatEntry( entry_templ, page ) .replace("%content%", page["html"]) ) index_templ = getTemplateHTML("page.html") with open(os.path.join(dist, "index.html"), "w") as index: index.write( index_templ.replace("%content%", summariesHTML) ) item_templ = getTemplateHTML("item.xml") rss_templ = getTemplateHTML("rss.xml") itemsXML = "\n".join( [ formatEntry(item_templ, page).replace("%content%", page["html"]) for page in pages ][: : -1] ) with open(os.path.join(dist, "rss.xml"), "w") as index: index.write( rss_templ.replace("%items%", itemsXML) ) for f in os.listdir(resources): shutil.copy(os.path.join(resources, f), dist) print(f"built in {len(pages)} pages") def get_repos(): repos = [] if os.path.exists("git_repos.txt"): with open("git_repos.txt", "r") as file: repos = [l for l in file.readlines() if l.startswith("http")] return repos def list_files(path): files = [] dirlist = [path] while len(dirlist) > 0: for (dirpath, dirnames, filenames) in os.walk(dirlist.pop()): dirlist.extend(dirnames) files.extend(map(lambda n: os.path.join(*n), zip([dirpath] * len(filenames), filenames))) print(len(files)) return files def linkify_path(path): output = [] full = "/" for s in path.split("/"): full += s + "/" output.append(f"{s}") return "/" + "/".join(output) def format_file(page_templ, content, v): return page_templ.replace("%title%", v["name"])\ .replace("%up%", v["above"])\ .replace("%filename%", linkify_path(v["filename"]))\ .replace("%commit%", str(v["commit"]))\ .replace("%url%", str(v["url"]))\ .replace("%content%", content) def traverse_repo(path, name, commit, url): page_templ = getTemplateHTML("page.html") page_templ = page_templ.replace("%content%", getTemplateHTML("file.html")) date = time.strftime(date_format, time.localtime()) footer = f"

This repo has been compiled for web view on {date} and may not be the latest version

" for root, dirs, files in os.walk(path): filename = "/".join(root.split("/")[1:]) index_content = "
" readme = os.path.join(root, "README.md") if os.path.exists(readme): with open(readme) as file: readme_content = markdown.markdown(file.read()) #massive hack readme_content = readme_content.replace("\"/", "\"/" + filename + "/") index_content += readme_content index_content += "
" index_content += footer index_content = format_file(page_templ, index_content, { "name": name, "commit": commit, "url": url, "filename": filename, "above": "/".join(root.split("/")[1:-1]), }) with open(os.path.join(root,"index.html"), "w") as file: file.write(index_content) def create_repos(): try: shutil.rmtree(os.path.join(dist, "git")) except: pass git_path = os.path.join(dist, "git") try: os.makedirs(git_path) except: print("Already have git path") for repo in get_repos(): print(repo) name = ".".join(repo.split("/")[-1].split(".")[:-1]) os.system(f"mkdir -p {dist}/git/{name} ;\ cd {dist}/git/{name} ;\ git pull || git clone {name}") command = subprocess.run(f"cd {dist}/git/{name} && git log --pretty=format:'%h%x09%an%x09%ad%x09%s' --no-decorate -1", stdout=subprocess.PIPE, shell=True) commit = command.stdout.decode() traverse_repo(os.path.join(git_path, name), name, commit, repo) make() create_repos()