From 7c136306d16e78839b861c9264b42e65ef53fec6 Mon Sep 17 00:00:00 2001 From: Saibotk Date: Thu, 12 Sep 2024 22:51:03 +0200 Subject: [PATCH] feat(podman): add role Copied from Histalek <3 Based on https://git.histalek.de/histalek-de/infrastructure/-/tree/b17a8f117bd2ce6e7b9c64dcffb2f24294306519/roles/podman --- playbooks/podman.yml | 7 + roles/podman/defaults/main.yml | 23 ++++ roles/podman/meta/main.yml | 22 ++++ roles/podman/tasks/Fedora.yml | 123 ++++++++++++++++++ roles/podman/tasks/main.yml | 9 ++ roles/podman/templates/podman-network.json.j2 | 23 ++++ 6 files changed, 207 insertions(+) create mode 100644 playbooks/podman.yml create mode 100644 roles/podman/defaults/main.yml create mode 100644 roles/podman/meta/main.yml create mode 100644 roles/podman/tasks/Fedora.yml create mode 100644 roles/podman/tasks/main.yml create mode 100644 roles/podman/templates/podman-network.json.j2 diff --git a/playbooks/podman.yml b/playbooks/podman.yml new file mode 100644 index 0000000..5addfa3 --- /dev/null +++ b/playbooks/podman.yml @@ -0,0 +1,7 @@ +- name: Install and configure podman. + + hosts: podman + + roles: + - role: podman + become: true diff --git a/roles/podman/defaults/main.yml b/roles/podman/defaults/main.yml new file mode 100644 index 0000000..5d1e5a9 --- /dev/null +++ b/roles/podman/defaults/main.yml @@ -0,0 +1,23 @@ +podman_install_machine_packages: false + +## The following defaults should work on most systems. +## They allocate the UIDs/GIDs 2147483647-4294967294 which is the upper half of UIDs/GIDs intended for +## 'normal' users and don't conflict with special systemd UIDs/GIDs. +## This is also incidentally the example given by the podman man page +## (Ref.: https://docs.podman.io/en/latest/markdown/podman-run.1.html#userns-mode) +# What should be the first allocated UID/GID available for usernamespaced containers +podman_usernamespace_uid_start: 2147483647 + +# What should be the amount of allocated UIDs/GIDs available for usernamespaced containers +podman_usernamespace_uid_amount: 2147483648 +# If this is set the default podman network will be manually configured to enable dual stack. +# This should not include the prefix-length, so the setting should end with "::" +# NOTE: This needs the default network to be recreated. Rebooting works and so should stopping +# all containers. +# podman_default_network_ipv6_prefix: "fdfc:ace7:1f7c:4ff3::" + +# Podman allows to set a timezone (--tz flag) for each container. A default can be set +# in any of the containers.conf config files. +# If the following option is set it will be added to the system-wide /etc/containers/containers.conf +# Has to be an IANA timezone or "local" (the latter matches the timezone of the host) +# podman_default_timezone: "local" # "Europe/Berlin" diff --git a/roles/podman/meta/main.yml b/roles/podman/meta/main.yml new file mode 100644 index 0000000..3daa07a --- /dev/null +++ b/roles/podman/meta/main.yml @@ -0,0 +1,22 @@ +galaxy_info: + author: histalek + description: Install podman via system package. + + issue_tracker_url: https://git.histalek.de/histalek-de/infrastructure/-/issues + + license: GPL-3.0-only + + min_ansible_version: "2.10" + + platforms: + - name: Fedora + versions: + - "38" + - "39" + - "40" + + standalone: true + + galaxy_tags: [] + +dependencies: [] diff --git a/roles/podman/tasks/Fedora.yml b/roles/podman/tasks/Fedora.yml new file mode 100644 index 0000000..0bddf12 --- /dev/null +++ b/roles/podman/tasks/Fedora.yml @@ -0,0 +1,123 @@ +- name: Ensure podman is installed. + ansible.builtin.package: + name: + - "podman" + state: "present" + become: true + +- name: Ensure needed packages for podman machine are installed. + ansible.builtin.package: + name: + - "qemu-system-x86-core" + - "qemu-img" + - "podman-gvproxy" + state: "present" + become: true + when: podman_install_machine_packages + +- name: Enable sebool container_manage_cgroup. + ansible.posix.seboolean: + name: container_manage_cgroup + state: true + persistent: true + become: true + +- name: Ensure 'containers' system user exists + ansible.builtin.user: + name: "containers" + comment: "system user which holds subuids/subgids used by podman for rootful usernamespaced containers" + create_home: false + password: "*" + state: present + system: true + become: true + +- name: Ensure the 'containers' user has subuids/subgids configured + ansible.builtin.lineinfile: + path: "{{ item.path }}" + regexp: "^containers:[0-9]+:[0-9]+$" + line: "containers:{{ podman_usernamespace_uid_start }}:{{ podman_usernamespace_uid_amount }}" + loop: + - path: "/etc/subuid" + - path: "/etc/subgid" + become: true + +- name: Setup default container timezone + when: podman_default_timezone is defined + block: + - name: Ensure timezone is set in containers.conf + community.general.ini_file: + path: /etc/containers/containers.conf + backup: true + create: true + state: present + mode: "0644" + owner: root + group: root + option: tz + section: containers + value: "'{{ podman_default_timezone }}'" + register: podman_updated_containers_conf + become: true + - name: Validate containers.conf + ansible.builtin.command: + cmd: podman info + changed_when: false + become: true + rescue: + # This is needed if there was no containers.conf to begin with. + # In that case there would be no backup file and the bad containers.conf would stay behind + # even after the `copy` module below. + - name: Remove bad containers.conf + ansible.builtin.file: + path: "/etc/containers/containers.conf" + state: absent + become: true + when: podman_updated_containers_conf is changed # noqa: no-handler + - name: Restore backup file + ansible.builtin.copy: + remote_src: true + dest: /etc/containers/containers.conf + src: "{{ podman_updated_containers_conf.backup_file }}" + mode: "0644" + owner: root + group: root + become: true + when: podman_updated_containers_conf is changed # noqa: no-handler + - name: Containers.conf could not be validated after setting default timezone + ansible.builtin.debug: + msg: Please make sure that `podman_default_timezone` is either an IANA timezone or 'local' + always: + - name: Remove backup file + ansible.builtin.file: + path: "{{ podman_updated_containers_conf.backup_file }}" + state: absent + become: true + when: podman_updated_containers_conf is changed # noqa: no-handler + +- name: Ensure default network configuration exists + when: podman_default_network_ipv6_prefix is defined + block: + - name: Ensure default network config directory exists + ansible.builtin.file: + path: "/etc/containers/networks" + state: directory + owner: root + group: root + mode: "0755" + become: true + - name: Ensure default network config file exists + ansible.builtin.template: + src: "podman-network.json.j2" + dest: "/etc/containers/networks/podman.json" + owner: root + group: root + mode: "0600" + become: true + +- name: Ensure podman auto update is enabled + ansible.builtin.systemd: + name: podman-auto-update.timer + enabled: true + state: started + become: true diff --git a/roles/podman/tasks/main.yml b/roles/podman/tasks/main.yml new file mode 100644 index 0000000..e97cefb --- /dev/null +++ b/roles/podman/tasks/main.yml @@ -0,0 +1,9 @@ +- name: Select tasks for detected distribution + ansible.builtin.include_tasks: "{{ distro_file }}" + with_first_found: + - "{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml" + - "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml" + - "{{ ansible_distribution }}.yml" + - "{{ ansible_os_family }}.yml" + loop_control: + loop_var: distro_file diff --git a/roles/podman/templates/podman-network.json.j2 b/roles/podman/templates/podman-network.json.j2 new file mode 100644 index 0000000..86ebbbe --- /dev/null +++ b/roles/podman/templates/podman-network.json.j2 @@ -0,0 +1,23 @@ +{ + "name": "podman", + "id": "2f259bab93aaaaa2542ba43ef33eb990d0999ee1b9924b557b7be53c0b7a1bb9", + "driver": "bridge", + "network_interface": "podman0", + "created": "2023-09-17T00:00:00.0Z", + "subnets": [ + { + "subnet": "10.88.0.0/16", + "gateway": "10.88.0.1" + }, + { + "subnet": "{{ podman_default_network_ipv6_prefix }}/64", + "gateway": "{{ podman_default_network_ipv6_prefix }}1" + } + ], + "ipv6_enabled": true, + "internal": false, + "dns_enabled": false, + "ipam_options": { + "driver": "host-local" + } +}