add stuff

This commit is contained in:
Luna 2023-02-22 00:01:21 -03:00
parent 0fcec72f21
commit 5122caf74f
9 changed files with 706 additions and 0 deletions

1
requirements.txt Normal file
View file

@ -0,0 +1 @@
privy==6.0.0

27
tasks/aproxy.py Normal file
View file

@ -0,0 +1,27 @@
from pyinfra import host
from pyinfra.operations import dnf, server, files, systemd, postgresql
from pyinfra.api import deploy
from pyinfra.facts.server import Which
from .operations.git import repo
@deploy("install aproxy")
def install():
main_path = "/opt/aproxy"
repo_output = repo(
name="clone aproxy repo",
src="https://gitdab.com/luna/aproxy",
dest=f"{main_path}/src",
branch="mistress",
)
config_directory = "/etc/aproxy"
files.directory(config_directory)
remote_config_path = f"{config_directory}/conf.lua"
config_output = files.template(
"./files/aproxy/conf.lua.j2",
dest=remote_config_path,
mode=555,
cfg=host.data,
)

28
tasks/croc.py Normal file
View file

@ -0,0 +1,28 @@
from packaging import version
from pyinfra.operations import apk, server, files
from pyinfra.facts.server import LinuxName
from pyinfra.api import deploy
from pyinfra import host
CROC_ALPINE_VERSION = version.parse("3.14")
@deploy("install croc")
def install_croc():
# alpine provides croc in-repo as of 3.14
if host.get_fact(LinuxName) == "Alpine":
host_alpine_version = version.parse(host.data.alpine_version)
if host_alpine_version >= CROC_ALPINE_VERSION:
apk.packages(name="install croc via apk", packages=["croc"])
return
# for everyone else, install manually
files.directory("/opt/croc")
files.download(
"https://github.com/schollz/croc/releases/download/v9.6.3/croc_9.6.3_Linux-64bit.tar.gz",
"/opt/croc/croc.tar.gz",
md5sum="5550b0bfb50d0541cba790562c180bd7",
)
server.shell("tar xvf /opt/croc/croc.tar.gz", _chdir="/opt/croc")
server.shell("mv /opt/croc/croc /usr/bin/croc", _chdir="/opt/croc")

172
tasks/docker.py Normal file
View file

@ -0,0 +1,172 @@
import logging
import subprocess
import json
import tempfile
from pathlib import Path
from typing import Optional
from pyinfra import host
from pyinfra.api import deploy, FactBase, operation, FunctionCommand
from pyinfra.operations import files, apt, dnf, systemd, python, server
from pyinfra.facts.server import LinuxName
from .install_consul_server import template_and_install_systemd
DEFAULTS = {
"docker_registry_image": "registry:2.8.1",
}
@deploy("install docker")
def install_docker():
linux_name = host.get_fact(LinuxName)
if linux_name == "Fedora":
dnf.packages(["docker", "docker-compose"])
systemd.service("docker", enabled=True, running=True)
else:
apt.packages(["docker.io", "docker-compose"])
class TailscaleIPs(FactBase):
requires_command = "tailscale"
command = "tailscale ip"
def process(self, output):
# TODO provide ipaddress for nicer formatting in conf tools
for line in output:
if ":" in line:
continue
return [line]
class DockerImage(FactBase):
requires_command = "docker"
def command(self, object_id):
return f"docker image inspect {object_id} || true"
def process(self, output):
joined_out = "".join(output)
return json.loads(joined_out)
class DockerManifestInspect(FactBase):
requires_command = "docker"
def command(self, object_id):
return f"docker image inspect {object_id} || true"
def process(self, output):
if "no such manifest" in output:
return None
joined_out = "".join(output)
return json.loads(joined_out)
log = logging.getLogger(__name__)
def docker_image_from_host_to_target(image_reference: str):
assert image_reference
username = host.data.ssh_user
hostname = host.name
log.warning(
"hello, sending image %r to host %s@%s", image_reference, username, hostname
)
with tempfile.NamedTemporaryFile() as f:
cmdline = f"docker save {image_reference} | gzip | pv > {f.name}"
log.warning("exec %r", cmdline)
subprocess.check_output(cmdline, shell=True)
with subprocess.Popen(["croc", "send", f.name], stderr=subprocess.PIPE) as proc:
transfer_code = None
for line_in in proc.stderr:
line = line_in.decode()
log.warning("got stdin line: %r", line)
TRANSFER_CODE_PHRASE = "Code is: "
transfer_code_index = line.find(TRANSFER_CODE_PHRASE)
if transfer_code_index == -1:
continue
transfer_code = line[
transfer_code_index + len(TRANSFER_CODE_PHRASE) :
].strip()
assert len(transfer_code) > 10
log.warning("extracted transfer code: %r", transfer_code)
break
assert transfer_code
target_path = Path(f.name).name
send_cmdline = f"croc --yes {transfer_code}"
server.shell(send_cmdline, _chdir="/tmp")
server.shell(
f"cat {target_path} | docker load", _chdir="/tmp", name="load image file"
)
server.shell(f"rm /tmp/{target_path}", name="remove image file after importing")
@operation
def docker_image(image_reference: str):
images = host.get_fact(DockerImage, image_reference)
if not images:
name, *_version = image_reference.split(":")
if name in host.data.manual_docker_images:
# get it from my machine lmao
log.warning(
"this deploy script wants image %r, taking it from host system and sending it",
image_reference,
)
yield FunctionCommand(
docker_image_from_host_to_target, (image_reference,), {}
)
else:
# take it from given image ref
yield f"docker pull {image_reference}"
def template_and_install_compose(
compose_template_path: str,
env_dict: Optional[dict] = None,
*,
systemd_service: Optional[str] = None,
):
env_dict = env_dict or {}
compose_template = Path(compose_template_path)
systemd_service = systemd_service or compose_template.name.split(".")[0]
assert systemd_service != "compose"
assert systemd_service.endswith(".service")
systemd_service_name = systemd_service.split(".")[0]
working_directory = f"/opt/{systemd_service_name}"
files.template(
compose_template_path,
f"{working_directory}/compose.yaml",
env_dict=env_dict,
name=f"sending compose file {systemd_service_name}",
)
template_and_install_systemd(
"files/compose.service.j2",
env_dict={
"service_name": systemd_service_name,
"working_directory": working_directory,
},
service_name=systemd_service,
)
@deploy("install docker registry", data_defaults=DEFAULTS)
def install_registry():
install_docker()
docker_image(host.data.docker_registry_image)
template_and_install_compose(
"files/registry/compose.yaml.j2",
{
"docker_registry_image": host.data.docker_registry_image,
},
systemd_service="registry.service",
)

76
tasks/elixir.py Normal file
View file

@ -0,0 +1,76 @@
from typing import Optional
from pyinfra import host
from pyinfra.api import deploy
from pyinfra.operations import apt, files, server, dnf
from pyinfra.facts.server import LinuxName
from pyinfra.facts import server as server_facts
ELIXIR_DEFAULTS = {
"elixir_version": "1.13.4",
"erlang_version": "25",
}
def install_for_ubuntu():
elixir_is_updated = False
erlang_is_updated = False
wanted_elixir_version = host.data.elixir_version
wanted_erlang_version = host.data.erlang_version
elixir_command = host.get_fact(server_facts.Which, command="elixir")
if elixir_command:
elixir_version = host.get_fact(ElixirVersion)
elixir_is_updated = elixir_version == wanted_elixir_version
erlang_command = host.get_fact(server_facts.Which, command="erl")
if erlang_command:
erlang_version = host.get_fact(ErlangVersion)
erlang_is_updated = erlang_version == wanted_erlang_version
if elixir_is_updated and erlang_is_updated:
return
# elixir is non trivial to install because we can't
# rely on the ubuntu package repo to be updated
#
# so we use the Erlang Solutions repository as recommended by elixir themselves.
erlang_repo_deb_path = "/tmp/erlang-solutions.deb"
files.download(
name="download erlang solutions repo deb file",
src="https://packages.erlang-solutions.com/erlang-solutions_2.0_all.deb",
dest=erlang_repo_deb_path,
)
apt.deb(
name="install erlang solutions repo",
src=erlang_repo_deb_path,
)
# TODO: we don't need to update if we already installed the deb
apt.update(cache_time=3600)
# its in two separate steps as recommended by readme. who am i to judge
apt.packages(
name="install erlang",
packages=[
f"erlang={otp_version}" if otp_version else f"erlang",
f"erlang-manpages={otp_version}" if otp_version else f"erlang-manpages",
],
)
apt.packages(
name="install elixir",
packages=[f"elixir={elixir_version}" if elixir_version else "elixir"],
)
@deploy("Install Elixir", data_defaults=ELIXIR_DEFAULTS)
def install():
linux_name = host.get_fact(LinuxName)
if linux_name == "Fedora":
dnf.packages(["erlang", "elixir"])
else:
install_for_ubuntu()
server.shell(name="test elixir exists", commands=["elixir -v"])

163
tasks/operations/git.py Normal file
View file

@ -0,0 +1,163 @@
from pyinfra.operations import apk, files, server, git, systemd, python
from pyinfra import host
from pyinfra.facts.files import Directory
from pyinfra.facts.git import GitBranch
from pyinfra.api import deploy, operation, FactBase
from pyinfra.operations.util.files import chown, unix_path_join
class CoolerGitBranch(FactBase):
requires_command = "git"
@staticmethod
def command(repo):
# TODO should inject _sudo / _sudo_user if user is provided in repo()
return "! test -d {0} || (cd {0} && git rev-parse --abbrev-ref HEAD)".format(
repo
)
class GitFetch(FactBase):
def command(self, repo: str):
return f"git -C {repo} fetch"
def process(self, output):
return output
class GitRevListComparison(FactBase):
def command(self, repo: str, branch: str):
return f"git -C {repo} rev-list HEAD..origin/{branch} | wc -l"
def process(self, output):
return output
class RawCommandOutput(FactBase):
"""
Returns the raw output of a command.
"""
def command(self, command):
return command
def process(self, output):
return "\n".join(output) # re-join and return the output lines
@operation(
pipeline_facts={
"git_branch": "target",
}
)
def repo(
src,
dest,
branch=None,
pull=True,
rebase=False,
user=None,
group=None,
ssh_keyscan=False,
update_submodules=False,
recursive_submodules=False,
):
"""
Clone/pull git repositories.
+ src: the git source URL
+ dest: directory to clone to
+ branch: branch to pull/checkout
+ pull: pull any changes for the branch
+ rebase: when pulling, use ``--rebase``
+ user: chown files to this user after
+ group: chown files to this group after
+ ssh_keyscan: keyscan the remote host if not in known_hosts before clone/pull
+ update_submodules: update any git submodules
+ recursive_submodules: update git submodules recursively
Example:
.. code:: python
git.repo(
name='Clone repo',
src='https://github.com/Fizzadar/pyinfra.git',
dest='/usr/local/src/pyinfra',
)
"""
# Ensure our target directory exists
yield from files.directory(dest)
if ssh_keyscan:
raise NotImplementedError("TODO copypaste ssh_keyscan code")
# Store git commands for directory prefix
git_commands = []
git_dir = unix_path_join(dest, ".git")
is_repo = host.get_fact(Directory, path=git_dir)
# Cloning new repo?
if not is_repo:
if branch:
git_commands.append("clone {0} --branch {1} .".format(src, branch))
else:
git_commands.append("clone {0} .".format(src))
host.create_fact(GitBranch, kwargs={"repo": dest}, data=branch)
host.create_fact(CoolerGitBranch, kwargs={"repo": dest}, data=branch)
host.create_fact(
Directory,
kwargs={"path": git_dir},
data={"user": user, "group": group},
)
# Ensuring existing repo
else:
current_branch = host.get_fact(CoolerGitBranch, repo=dest)
# always fetch upstream branches (that way we can compare if the latest
# commit has changed, and then we don't need to execute anything!)
host.get_fact(GitFetch, repo=dest)
stdout = host.get_fact(GitRevListComparison, repo=dest, branch=branch)
repository_has_updates = stdout[0] != "0"
# since we immediately always fetch, we will always be modifying the
# .git folder, and that folder MUST be owned by the correct user afterwards.
if user or group:
chown_command = chown(dest, user, group, recursive=True)
host.get_fact(RawCommandOutput, command=chown_command.get_masked_value())
if branch and current_branch != branch:
git_commands.append("checkout {0}".format(branch))
host.create_fact(GitBranch, kwargs={"repo": dest}, data=branch)
host.create_fact(CoolerGitBranch, kwargs={"repo": dest}, data=branch)
repository_has_updates = True
if repository_has_updates and pull:
if rebase:
git_commands.append("pull --rebase")
else:
git_commands.append("pull")
if update_submodules:
if recursive_submodules:
git_commands.append("submodule update --init --recursive")
else:
git_commands.append("submodule update --init")
# Attach prefixes for directory
command_prefix = "cd {0} && git".format(dest)
git_commands = [
"{0} {1}".format(command_prefix, command) for command in git_commands
]
for cmd in git_commands:
yield cmd
# Apply any user or group if we did anything
if git_commands and (user or group):
yield chown(dest, user, group, recursive=True)

184
tasks/pleroma.py Normal file
View file

@ -0,0 +1,184 @@
from pyinfra import host
from pyinfra.operations import dnf, server, files, systemd, postgresql
from pyinfra.api import deploy
from pyinfra.facts.server import Which
from .secrets import secrets
from .operations.git import repo
from tasks.elixir import install as install_elixir
from .install_consul_server import template_and_install_systemd
from tasks.rpmfusion import install as install_rpmfusion
from tasks.postgresql import install as install_postgresql
class WithSecrets:
def __init__(self, secrets_fields):
self._secrets_values = {}
for field in secrets_fields:
secret_value = secrets.field(field)
self._secrets_values[field] = secret_value
def __getattr__(self, field):
if field in self._secrets_values:
return self._secrets_values[field]
return getattr(host.data, field)
@deploy("install pleroma")
def install():
install_elixir()
install_rpmfusion()
install_postgresql()
dnf.packages(
name="install system depedencies",
packages=[
"sudo",
"git",
"make",
"automake",
"gcc",
"gcc-c++",
"kernel-devel",
"cmake",
"file-libs",
"file-devel",
"ImageMagick",
"ImageMagick-libs",
"ffmpeg",
"perl-Image-ExifTool",
"erlang-parsetools",
],
)
files.directory(path="/opt/pleroma", present=True, mode=755, recursive=True)
runner_user = "pleroma"
server.group(runner_user)
remote_main_home_path = f"/opt/pleroma"
remote_main_pleroma_path = f"/opt/pleroma/pleroma"
server.user(
user=runner_user,
present=True,
home=remote_main_home_path,
shell="/bin/false",
group=runner_user,
ensure_home=True,
)
# commit pinning is done by having a separate branch on a mirror repo
repo_output = repo(
name="clone pleroma repo",
src="https://gitlab.com/luna/pleroma.git",
dest=remote_main_pleroma_path,
branch="securomoe/develop",
user=runner_user,
group=runner_user,
)
remote_config_path = f"{remote_main_pleroma_path}/config/prod.secret.exs"
with_secrets = WithSecrets(
(
"pleroma_secret_key_base",
"pleroma_db_password",
"pleroma_webpush_public_key",
"pleroma_webpush_private_key",
)
)
config_output = files.template(
"./files/pleroma/prod.secret.exs",
dest=remote_config_path,
user=runner_user,
group=runner_user,
mode=500,
cfg=with_secrets,
)
# download pleroma deps via mix
server.shell(
name="download pleroma deps",
_chdir=remote_main_pleroma_path,
_sudo=True,
_sudo_user=runner_user,
_env={"MIX_ENV": "prod"},
commands=[
"mix local.hex --if-missing --force",
"mix local.rebar --if-missing --force",
"mix deps.get",
],
)
# compile deps and compile pleroma
server.shell(
name="compile pleroma",
_chdir=remote_main_pleroma_path,
_sudo=True,
_sudo_user=runner_user,
_env={"MIX_ENV": "prod"},
commands=["mix deps.compile", "mix compile"],
)
# map the following sql script into pyinfra
# CREATE USER pleroma WITH ENCRYPTED PASSWORD 'aaa' CREATEDB;
# CREATE DATABASE pleroma_dev;
# ALTER DATABASE pleroma_dev OWNER TO pleroma;
# \c pleroma_dev;
# --Extensions made by ecto.migrate that need superuser access
# CREATE EXTENSION IF NOT EXISTS citext;
# CREATE EXTENSION IF NOT EXISTS pg_trgm;
# hacky as we need postgres user but also the fact will fail if postgres
# isnt initialized...
has_postgres = host.get_fact(Which, command="psql")
postgres_kwargs = {}
if has_postgres:
postgres_kwargs = {"_sudo": True, "_sudo_user": "postgres"}
postgresql.role(
role=host.data.pleroma_db_user,
password=with_secrets.pleroma_db_password,
login=True,
**postgres_kwargs,
)
db_result = postgresql.database(
database=host.data.pleroma_db_name,
owner=host.data.pleroma_db_user,
encoding="UTF8",
**postgres_kwargs,
)
# is it possible to configure pg_hba.conf to add md5 auth to local v4/v6
if db_result.changed:
postgresql.sql(
"""
CREATE EXTENSION IF NOT EXISTS citext;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
""",
database=host.data.pleroma_db_name,
**postgres_kwargs,
)
server.shell(
name="migrate database",
_chdir=remote_main_pleroma_path,
_sudo=True,
_sudo_user=runner_user,
_env={"MIX_ENV": "prod"},
commands=["mix ecto.migrate"],
)
template_and_install_systemd(
"./files/pleroma/pleroma.service.j2",
env_dict={
"user": runner_user,
"remote_main_home_path": remote_main_home_path,
"remote_main_pleroma_path": remote_main_pleroma_path,
},
restarted=repo_output.changed or config_output.changed,
)

42
tasks/postgresql.py Normal file
View file

@ -0,0 +1,42 @@
from typing import Optional
from pyinfra import host
from pyinfra.api import deploy
from pyinfra.operations import dnf, systemd, server
from pyinfra.facts.server import LinuxName, LinuxDistribution
@deploy("Install PostgreSQL")
def install():
linux_name = host.get_fact(LinuxName)
version = host.data.postgresql_version
if linux_name == "Fedora":
fedora_release = host.get_fact(LinuxDistribution)["major"]
dnf.rpm(
f"https://download.postgresql.org/pub/repos/yum/reporpms/F-{fedora_release}-x86_64/pgdg-fedora-repo-latest.noarch.rpm"
)
result = dnf.packages(
name=f"Install psql {version} packages",
packages=[
f"postgresql{version}",
f"postgresql{version}-server",
f"postgresql{version}-contrib",
],
)
if result.changed:
server.shell(
name="initialize pgsql db",
commands=[
f"/usr/pgsql-{version}/bin/postgresql-{version}-setup initdb"
],
)
systemd.service(
name="install psql {version} unit",
service=f"postgresql-{version}",
running=True,
enabled=True,
daemon_reload=True,
)

13
tasks/rpmfusion.py Normal file
View file

@ -0,0 +1,13 @@
from pyinfra import host
from pyinfra.api import deploy
from pyinfra.operations import dnf
from pyinfra.facts.server import LinuxDistribution
@deploy("install RPM fusion")
def install():
fedora_release = host.get_fact(LinuxDistribution)["major"]
free_url = f"https://mirrors.rpmfusion.org/free/fedora/rpmfusion-free-release-{fedora_release}.noarch.rpm"
nonfree_url = f"https://mirrors.rpmfusion.org/nonfree/fedora/rpmfusion-nonfree-release-{fedora_release}.noarch.rpm"
dnf.rpm(free_url)
dnf.rpm(nonfree_url)