initial commit

This commit is contained in:
Tobias Petrich 2024-07-15 16:03:01 +02:00
commit cf658a2846
No known key found for this signature in database
GPG Key ID: D99301AD0515015F
36 changed files with 769 additions and 0 deletions

18
.gitignore vendored Normal file
View File

@ -0,0 +1,18 @@
### Example user template template
### Example user template
# IntelliJ project files
.idea
*.iml
out
gen
### Ansible template
*.retry
# custom
inventory.txt
*.iso
gitea-db.container
gitea-srv.container
nextcloud-db.container
nextcloud-srv.container

27
ansible/README.md Normal file
View File

@ -0,0 +1,27 @@
# Ansible MicroOS VM setup
1. Install devsec hardening collection
```shell
ansible-galaxy collection install devsec.hardening
```
2. Create the inventory.txt file for the server
3. Run the hardening playbook. Does not run completely through because of MicroOS immutability. At some point, a PR properly supporting MicroOS could be opened to https://github.com/dev-sec/ansible-os-hardening
```shell
ansible-playbook -i inventory.txt hardening.yml
```
4. Run the custom_hardening playbook. This mostly sets SSH parameters to best practice values.
```shell
ansible-playbook -i inventory.txt custom_hardening.yml
```
5. Run the allow_privileged_ports_rootless playbook. This allows a rootless traefik container to use ports 80 and 443.
```shell
ansible-playbook -i inventory.txt allow_privileged_ports_rootless.yml
```
6. Run the deploy_services playbook. This creates groups and users for each service, creates a btrfs subvolume for data and copies the quadlet files to the correct location, then activates the service.
```shell
ansible-playbook -i inventory.txt deploy_services.yml
```
7. Run the deploy_traefik_config playbool. This copies the traefik configuration to the correct location.
```shell
ansible-playbook -i inventory.txt deploy_traefik_config.yml
```

View File

@ -0,0 +1,18 @@
---
- name: Allow normal users to bind to port 80
hosts: all
become: yes
tasks:
- name: Set sysctl to allow normal users to bind to ports starting from 80
sysctl:
name: net.ipv4.ip_unprivileged_port_start
value: 80
state: present
reload: yes
- name: Verify the sysctl setting
command: sysctl net.ipv4.ip_unprivileged_port_start
register: sysctl_result
- debug:
msg: "net.ipv4.ip_unprivileged_port_start: {{ sysctl_result.stdout }}"

View File

@ -0,0 +1,67 @@
---
- name: Apply SSH best practices configuration
hosts: all
become: yes
tasks:
- name: Ensure the sshd_config.d directory exists
file:
path: /etc/ssh/sshd_config.d
state: directory
owner: root
group: root
mode: '0755'
- name: Create a configuration file to apply SSH best practices
copy:
content: |
# Disable password authentication
PasswordAuthentication no
# Disable challenge-response authentication
ChallengeResponseAuthentication no
# Allow root login
PermitRootLogin yes
# Disable empty passwords
PermitEmptyPasswords no
# Disable X11 forwarding
X11Forwarding no
# Use only protocol 2
Protocol 2
# Log more verbosely
LogLevel VERBOSE
# Keep-alive packets to ensure connection stability
TCPKeepAlive yes
ClientAliveInterval 60
ClientAliveCountMax 10
dest: /etc/ssh/sshd_config.d/best_practices.conf
owner: root
group: root
mode: '0644'
- name: Restart SSH service to apply changes
service:
name: sshd
state: restarted
- name: Verify SSH configuration settings
shell: "sshd -T"
register: ssh_config_result
- name: Check specific SSH settings
debug:
msg: "{{ ssh_config_result.stdout_lines | select('search', 'passwordauthentication no') | list }}\n
{{ ssh_config_result.stdout_lines | select('search', 'challengeresponseauthentication no') | list }}\n
{{ ssh_config_result.stdout_lines | select('search', 'permitrootlogin yes') | list }}\n
{{ ssh_config_result.stdout_lines | select('search', 'permitemptypasswords no') | list }}\n
{{ ssh_config_result.stdout_lines | select('search', 'x11forwarding no') | list }}\n
{{ ssh_config_result.stdout_lines | select('search', 'protocol 2') | list }}\n
{{ ssh_config_result.stdout_lines | select('search', 'loglevel verbose') | list }}\n
{{ ssh_config_result.stdout_lines | select('search', 'clientaliveinterval 60') | list }}\n
{{ ssh_config_result.stdout_lines | select('search', 'clientalivecountmax 3') | list }}\n
{{ ssh_config_result.stdout_lines | select('search', 'tcpkeepalive yes') | list }}"

View File

@ -0,0 +1,82 @@
- name: Deploy services
hosts: all
become: yes
tasks:
# Install base software for rootless podman containers
- name: Check if systemd-container is installed
ansible.builtin.command:
cmd: "zypper se -i systemd-container"
register: systemd_container_installed
ignore_errors: yes
- name: Check if podman is installed
ansible.builtin.command:
cmd: "zypper se -i podman"
register: podman_installed
ignore_errors: yes
- name: Install software if not installed
ansible.builtin.command:
cmd: "transactional-update --non-interactive pkg in systemd-container podman"
become: yes
when: systemd_container_installed.rc != 0 or podman_installed.rc != 0
register: software_installed
- name: Reboot if software was installed
ansible.builtin.reboot:
when: software_installed.changed
# Deploy services as rootless containers
- name: Deploy traefik
include_role:
name: rootless-podman-service
vars:
service_name: "traefik"
systemd_service_name: "traefik"
quadlet_template_src: "./my_service_templates"
- name: Deploy wekantesting
include_role:
name: rootless-podman-service
vars:
service_name: "wekantesting"
systemd_service_name: "null" #"wekantesting-pod"
quadlet_template_src: "./my_service_templates"
- name: Deploy wekan
include_role:
name: rootless-podman-service
vars:
service_name: "wekan"
systemd_service_name: "null" #"wekan-pod"
quadlet_template_src: "./my_service_templates"
- name: Deploy gitea
include_role:
name: rootless-podman-service
vars:
service_name: "gitea"
systemd_service_name: "gitea-pod"
quadlet_template_src: "./my_service_templates"
- name: Deploy mumble
include_role:
name: rootless-podman-service
vars:
service_name: "mumble"
systemd_service_name: "mumble"
quadlet_template_src: "./my_service_templates"
- name: Deploy bitwarden
include_role:
name: rootless-podman-service
vars:
service_name: "bitwarden"
systemd_service_name: "bitwarden"
quadlet_template_src: "./my_service_templates"
- name: Deploy actual
include_role:
name: rootless-podman-service
vars:
service_name: "actual"
systemd_service_name: "actual"
quadlet_template_src: "./my_service_templates"
- name: Deploy nextcloud
include_role:
name: rootless-podman-service
vars:
service_name: "nextcloud"
systemd_service_name: "nextcloud-pod"
quadlet_template_src: "./my_service_templates"

View File

@ -0,0 +1,11 @@
---
- name: Copy Traefik configuration files to the server
hosts: all
become: yes
tasks:
- name: Synchronize Traefik configuration files
synchronize:
src: ./traefik_config/
dest: /var/vol/traefik/
rsync_opts:
- "--chown=traefik:traefik"

89
ansible/hardening.yml Normal file
View File

@ -0,0 +1,89 @@
- name: Apply DevSec hardening
hosts: all
become: yes
vars:
sysctl_overwrite:
# Enable IPv4 traffic forwarding. Needed for containers.
net.ipv4.ip_forward: 1
os_security_users_allow:
- "change_user" # Ensure this user is allowed to avoid modifying /bin/su (does not work on read-only filesystems)
os_family: "Suse"
os_release: "Tumbleweed" # Treat MicroOS as Tumbleweed
os_version: "{{ ansible_distribution_version }}"
os_vars:
packages:
- sudo
- openssh
ignore_fs_types:
- squashfs
- iso9660
- vfat
auth_pam:
- common-password
- common-auth
- common-account
- common-session
pam_passwords:
- password requisite pam_pwquality.so retry=3
- password required pam_unix.so use_authtok remember=5 sha512 shadow
securetty: [console, tty1, tty2, tty3, tty4, tty5, tty6]
sshd:
package: openssh
service: sshd
config: /etc/ssh/sshd_config
kernel_modules_disabled:
- cramfs
- freevxfs
- jffs2
- hfs
- hfsplus
- squashfs
- udf
- vfat
auditd_package: audit # This is the correct package name for auditd in openSUSE
os_env_umask: "027" # Setting a default umask value
os_auth_uid_min: "1000" # Setting the minimum user ID for non-system users
os_auth_uid_max: "60000" # Setting the maximum user ID for non-system users
os_auth_gid_min: 1000
os_auth_gid_max: 60000
os_auth_sys_uid_min: "100" # Setting the minimum user ID for system users
os_auth_sys_uid_max: "499" # Setting the maximum user ID for system users
os_auth_sys_gid_min: 100
os_auth_sys_gid_max: 499
os_auth_sub_uid_min: 100000
os_auth_sub_uid_max: 600100000
os_auth_sub_uid_count: 65536
os_auth_sub_gid_min: 100000
os_auth_sub_gid_max: 600100000
os_auth_sub_gid_count: 65536
os_shadow_perms:
owner: root
group: shadow
mode: "0640"
os_passwd_perms:
owner: root
group: root
mode: "0644"
hidepid_option: "2" # allowed values: 0, 1, 2
os_mnt_boot_group: 'root'
os_mnt_boot_owner: 'root'
os_mnt_dev_group: 'root'
os_mnt_dev_owner: 'root'
os_mnt_dev_shm_group: 'root'
os_mnt_dev_shm_owner: 'root'
os_mnt_home_group: 'root'
os_mnt_home_owner: 'root'
os_mnt_run_group: 'root'
os_mnt_run_owner: 'root'
os_mnt_tmp_group: 'root'
os_mnt_tmp_owner: 'root'
os_mnt_var_group: 'root'
os_mnt_var_owner: 'root'
os_mnt_var_log_group: 'root'
os_mnt_var_log_owner: 'root'
os_mnt_var_log_audit_group: 'root'
os_mnt_var_log_audit_owner: 'root'
os_mnt_var_tmp_group: 'root'
os_mnt_var_tmp_owner: 'root'
roles:
- devsec.hardening.os_hardening

View File

@ -0,0 +1,17 @@
[Unit]
Description=Actual deployment
[Container]
ContainerName=actual
Image=docker.io/actualbudget/actual-server:latest
PublishPort=127.0.0.1:8500:5006
Volume=/var/vol/actual:/data:Z
AutoUpdate=registry
[Service]
# Restart service when sleep finishes
Restart=on-failure
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target

View File

@ -0,0 +1,17 @@
[Unit]
Description=Bitwarden deployment
[Container]
ContainerName=bitwarden
Image=docker.io/vaultwarden/server:latest
PublishPort=127.0.0.1:8400:80
Volume=/var/vol/bitwarden:/data:Z
AutoUpdate=registry
[Service]
# Restart service when sleep finishes
Restart=on-failure
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target

View File

@ -0,0 +1,13 @@
[Unit]
Description=Gitea database
[Container]
ContainerName=gitea-db
Image=docker.io/postgres:14
Volume=/var/vol/gitea/db:/var/lib/postgresql/data:Z
Environment=LANG=en_US.utf8
Environment=PGDATA=/var/lib/postgresql/data/pgdata
Environment=POSTGRES_USER=<REDACTED>
Environment=POSTGRES_PASSWORD=<REDACTED>
AutoUpdate=registry
Pod=gitea.pod

View File

@ -0,0 +1,16 @@
[Unit]
Description=Gitea server
[Container]
ContainerName=gitea-srv
Image=docker.io/gitea/gitea:latest
Volume=/var/vol/gitea/data:/data:Z
Environment=USER_UID=1000
Environment=USER_GID=1000
Environment=GITEA__database__DB_TYPE=postgres
Environment=GITEA__database__DB_HOST=127.0.0.1:5432
Environment=GITEA__database__DB_NAME=<REDACTED>
Environment=GITEA__database__DB_USER=<REDACTED>
Environment=GITEA__database__DB_PASSWD=<REDACTED>
AutoUpdate=registry
Pod=gitea.pod

View File

@ -0,0 +1,10 @@
[Unit]
Description=Gitea deployment
[Pod]
PodName=gitea
PublishPort=127.0.0.1:8300:3000,7722:22
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target

View File

@ -0,0 +1,17 @@
[Unit]
Description=Mumble deployment
[Container]
ContainerName=mumble
Image=docker.io/mumblevoip/mumble-server:latest
PublishPort=64738:64738,64738:64738/udp
Volume=/var/vol/mumble:/data:Z
AutoUpdate=registry
[Service]
# Restart service when sleep finishes
Restart=on-failure
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target

View File

@ -0,0 +1,13 @@
[Unit]
Description=Nextcloud database
[Container]
ContainerName=nextcloud-db
Image=docker.io/postgres:12
Volume=/var/vol/nextcloud/db:/var/lib/postgresql/data:Z
Environment=LANG=en_US.utf8
Environment=PGDATA=/var/lib/postgresql/data/pgdata
Environment=POSTGRES_USER=<REDACTED>
Environment=POSTGRES_PASSWORD=<REDACTED>
AutoUpdate=registry
Pod=nextcloud.pod

View File

@ -0,0 +1,16 @@
[Unit]
Description=Nextcloud server
[Container]
ContainerName=nextcloud-srv
Image=docker.io/nextcloud:27
Volume=/var/vol/nextcloud/data:/var/www/html:Z
Environment=USER_UID=1000
Environment=USER_GID=1000
Environment=PHP_MEMORY_LIMIT=4G
Environment=POSTGRES_HOST=127.0.0.1:5432
Environment=POSTGRES_DB=<REDACTED>
Environment=POSTGRES_USER=<REDACTED>
Environment=POSTGRES_PASSWORD=<REDACTED>
AutoUpdate=registry
Pod=nextcloud.pod

View File

@ -0,0 +1,10 @@
[Unit]
Description=Nextcloud deployment
[Pod]
PodName=nextcloud
PublishPort=127.0.0.1:8600:80
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target

View File

@ -0,0 +1,17 @@
[Unit]
Description=Ingress for this server
[Container]
ContainerName=traefik
Image=docker.io/traefik:latest
Network=host
Volume=/var/vol/traefik:/etc/traefik:Z
AutoUpdate=registry
[Service]
# Restart service when sleep finishes
Restart=on-failure
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target

View File

@ -0,0 +1,9 @@
[Unit]
Description=Wekan database
[Container]
ContainerName=wekan-db
Image=docker.io/mongo:6
Volume=/var/vol/wekan/db:/data/db:Z
AutoUpdate=registry
Pod=wekan.pod

View File

@ -0,0 +1,10 @@
[Unit]
Description=Wekan server
[Container]
ContainerName=wekan-srv
Image=ghcr.io/wekan/wekan:v7.09-arm64
Environment=ROOT_URL=https://wekan.rohrschacht.de
Environment=MONGO_URL=mongodb://127.0.0.1:27017/wekan
AutoUpdate=registry
Pod=wekan.pod

View File

@ -0,0 +1,10 @@
[Unit]
Description=Wekan deployment
[Pod]
PodName=wekan
PublishPort=8100:8080
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target

View File

@ -0,0 +1,9 @@
[Unit]
Description=Wekan testing database
[Container]
ContainerName=wekantesting-db
Image=docker.io/mongo:6
Volume=/var/vol/wekantesting/db:/data/db:Z
AutoUpdate=registry
Pod=wekantesting.pod

View File

@ -0,0 +1,10 @@
[Unit]
Description=Wekan testing server
[Container]
ContainerName=wekantesting-srv
Image=ghcr.io/wekan/wekan:v7.09-arm64
Environment=ROOT_URL=https://wekantesting.rohrschacht.de
Environment=MONGO_URL=mongodb://127.0.0.1:27017/wekan
AutoUpdate=registry
Pod=wekantesting.pod

View File

@ -0,0 +1,10 @@
[Unit]
Description=Wekan testing deployment
[Pod]
PodName=wekantesting
PublishPort=8200:8080
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target

View File

@ -0,0 +1,6 @@
---
service_name: "default_service"
systemd_service_name: "default_service"
btrfs_base_path: "/var/vol"
quadlet_template_src: "./templates"
force_systemd_restart: false

View File

@ -0,0 +1,18 @@
---
- name: Ensure Quadlet configuration directory exists
ansible.builtin.file:
path: "/home/{{ service_name }}/.config/containers/systemd"
state: directory
owner: "{{ service_name }}"
group: "{{ service_name }}"
mode: '0755'
- name: Copy Quadlet files to the user's systemd directory
ansible.builtin.copy:
src: "{{ quadlet_template_src }}/{{ service_name }}/"
dest: "/home/{{ service_name }}/.config/containers/systemd/"
owner: "{{ service_name }}"
group: "{{ service_name }}"
mode: '0644'
remote_src: no
register: quadlet_files_copied

View File

@ -0,0 +1,18 @@
---
- name: Ensure Btrfs base path exists
ansible.builtin.file:
path: "{{ btrfs_base_path }}"
state: directory
mode: '0755'
- name: Create Btrfs subvolume
ansible.builtin.command:
cmd: "btrfs subvolume create {{ btrfs_base_path }}/{{ service_name }}"
args:
creates: "{{ btrfs_base_path }}/{{ service_name }}"
- name: Set permissions for Btrfs subvolume
ansible.builtin.file:
path: "{{ btrfs_base_path }}/{{ service_name }}"
owner: "{{ service_name }}"
group: "{{ service_name }}"

View File

@ -0,0 +1,14 @@
---
- name: Ensure group is present
ansible.builtin.group:
name: "{{ service_name }}"
state: present
- name: Ensure user is present
ansible.builtin.user:
name: "{{ service_name }}"
group: "{{ service_name }}"
state: present
shell: /bin/bash
home: "/home/{{ service_name }}"
create_home: yes

View File

@ -0,0 +1,4 @@
---
- name: Enable linger for the user
ansible.builtin.command:
cmd: "loginctl enable-linger {{ service_name }}"

View File

@ -0,0 +1,16 @@
---
- name: Check if service is already running
ansible.builtin.command:
cmd: "machinectl shell {{ service_name }}@ /bin/bash -c 'systemctl --user is-active {{ systemd_service_name }}' | grep -qv inactive"
register: service_status
ignore_errors: yes
- name: Enable and start the main service
ansible.builtin.command:
cmd: "machinectl shell {{ service_name }}@ /bin/bash -c 'systemctl --user daemon-reload && systemctl --user start {{ systemd_service_name }}'"
become: yes
when: service_status.rc != 0 and (quadlet_files_copied.changed or force_systemd_restart)
- name: Restart the main service
ansible.builtin.command:
cmd: "machinectl shell {{ service_name }}@ /bin/bash -c 'systemctl --user daemon-reload && systemctl --user restart {{ systemd_service_name }}'"
become: yes
when: service_status.rc == 0 and (quadlet_files_copied.changed or force_systemd_restart)

View File

@ -0,0 +1,15 @@
---
- name: Create user and group
include_tasks: create_user.yml
- name: Create Btrfs subvolume
include_tasks: create_btrfs_subvolume.yml
- name: Enable linger for the user
include_tasks: enable_linger.yml
- name: Copy Quadlet files
include_tasks: copy_quadlet_files.yml
- name: Enable and start main service
include_tasks: enable_service.yml

View File

@ -0,0 +1,92 @@
http:
routers:
# Router for wekan.rohrschacht.de
wekan-router:
rule: "Host(`wekan.rohrschacht.de`)"
entryPoints:
- websecure
tls:
certResolver: letsencrypt
service: wekan-service
# Router for wekantesting.rohrschacht.de
wekantesting-router:
rule: "Host(`wekantesting.rohrschacht.de`)"
entryPoints:
- websecure
tls:
certResolver: letsencrypt
service: wekantesting-service
# Router for git.rohrschacht.de
git-router:
rule: "Host(`git.rohrschacht.de`) || Host(`gitea.rohrschacht.de`)"
entryPoints:
- websecure
tls:
certResolver: letsencrypt
service: gitea-service
# Router for vault.rohrschacht.de
vault-router:
rule: "Host(`vault.rohrschacht.de`)"
entryPoints:
- websecure
tls:
certResolver: letsencrypt
service: vault-service
# Router for actual.rohrschacht.de
actual-router:
rule: "Host(`actual.rohrschacht.de`)"
entryPoints:
- websecure
tls:
certResolver: letsencrypt
service: actual-service
# Router for nextcloud.rohrschacht.de
nextcloud-router:
rule: "Host(`nextcloud.rohrschacht.de`)"
entryPoints:
- websecure
tls:
certResolver: letsencrypt
service: nextcloud-service
services:
# Service for wekan.rohrschacht.de
wekan-service:
loadBalancer:
servers:
- url: "http://localhost:8100"
# Service for wekantesting.rohrschacht.de
wekantesting-service:
loadBalancer:
servers:
- url: "http://localhost:8200"
# Service for gitea.rohrschacht.de
gitea-service:
loadBalancer:
servers:
- url: "http://localhost:8300"
# Service for vault.rohrschacht.de
vault-service:
loadBalancer:
servers:
- url: "http://localhost:8400"
# Service for vault.rohrschacht.de
actual-service:
loadBalancer:
servers:
- url: "http://localhost:8500"
# Service for nextcloud.rohrschacht.de
nextcloud-service:
loadBalancer:
servers:
- url: "http://localhost:8600"

View File

@ -0,0 +1,27 @@
entryPoints:
web:
address: ":80"
http:
redirections:
entryPoint:
to: websecure
scheme: https
permanent: true
websecure:
address: ":443"
certificatesResolvers:
letsencrypt:
acme:
email: tobias@rohrschacht.de
storage: /etc/traefik/acme.json
httpChallenge:
entryPoint: web
api:
dashboard: false
providers:
file:
directory: /etc/traefik
watch: true

12
ignition/README.md Normal file
View File

@ -0,0 +1,12 @@
# Ignition public SSH key setup
1. Configure ssh public key in ignition-config.yml
2. Run butane to generate the ignition file
```shell
podman run --interactive --rm quay.io/coreos/butane:release --pretty --strict < ignition-config.yml > disk/ignition/config.ign
```
3. Create the disk image
```shell
./generate-iso.sh
```
4. Load ignition.iso as DVD image into the VM after setting it up with the microos VM image, before the first boot

View File

@ -0,0 +1,15 @@
{
"ignition": {
"version": "3.0.0"
},
"passwd": {
"users": [
{
"name": "root",
"sshAuthorizedKeys": [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH+lOLplrdWardSEdw3aiEKj/P59UZwxpqQShfSVID/b"
]
}
]
}
}

8
ignition/generate-iso.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
mkisofs -max-iso9660-filenames \
-untranslated-filenames \
-allow-multidot \
-omit-period \
-V ignition \
-o "${1:-ignition.iso}" disk

View File

@ -0,0 +1,8 @@
variant: fcos
version: 1.0.0
passwd:
users:
- name: root
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH+lOLplrdWardSEdw3aiEKj/P59UZwxpqQShfSVID/b