!fix(lvm_self_backup): Rename vars according to lint rule
This commit is contained in:
parent
64da9630c3
commit
824af595f2
5 changed files with 56 additions and 56 deletions
|
@ -19,12 +19,12 @@
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
# The directory to put the status flag in
|
# The directory to put the status flag in
|
||||||
backup_status_directory: "/monitoring"
|
lvm_self_backup_status_directory: "/monitoring"
|
||||||
|
|
||||||
# Status directory user & group id
|
# Status directory user & group id
|
||||||
# This can be used for example to give telegraf access to the status files.
|
# This can be used for example to give telegraf access to the status files.
|
||||||
backup_status_directory_uid: 100
|
lvm_self_backup_status_directory_uid: 100
|
||||||
backup_status_directory_gid: 101
|
lvm_self_backup_status_directory_gid: 101
|
||||||
|
|
||||||
# The LVM volumes to backup
|
# The LVM volumes to backup
|
||||||
# Available fields:
|
# Available fields:
|
||||||
|
@ -32,32 +32,32 @@ backup_status_directory_gid: 101
|
||||||
# lv_name - The logical volume name that should be backed up
|
# lv_name - The logical volume name that should be backed up
|
||||||
# fstype - Optional. Set to "xfs" to mount the snapshots with the `nouuid` flag when the XFS filesystem is used.
|
# fstype - Optional. Set to "xfs" to mount the snapshots with the `nouuid` flag when the XFS filesystem is used.
|
||||||
#
|
#
|
||||||
backup_vols: []
|
lvm_self_backup_vols: []
|
||||||
|
|
||||||
# The backup target (see duplicity for valid inputs)
|
# The backup target (see duplicity for valid inputs)
|
||||||
backup_target: ""
|
lvm_self_backup_target: ""
|
||||||
|
|
||||||
# The duplicity image & version
|
# The duplicity image & version
|
||||||
backup_duplicity_image: "quay.io/sheogorath/duplicity"
|
lvm_self_backup_duplicity_image: "quay.io/sheogorath/duplicity"
|
||||||
# renovate: depName=quay.io/sheogorath/duplicity
|
# renovate: depName=quay.io/sheogorath/duplicity
|
||||||
backup_duplicity_version: "1.2.3"
|
lvm_self_backup_duplicity_version: "1.2.3"
|
||||||
backup_duplicity_image_version: "{{ backup_duplicity_version }}"
|
lvm_self_backup_duplicity_image_version: "{{ lvm_self_backup_duplicity_version }}"
|
||||||
|
|
||||||
# The GPG options to be used when backing up
|
# The GPG options to be used when backing up
|
||||||
# Generate a GPG key as the root user before using this role and enter the details here!
|
# Generate a GPG key as the root user before using this role and enter the details here!
|
||||||
backup_gpg:
|
lvm_self_backup_gpg:
|
||||||
id: ""
|
id: ""
|
||||||
passphrase: "PASSWORD"
|
passphrase: "PASSWORD"
|
||||||
sign_key: "{{ backup_gpg.id }}"
|
sign_key: "{{ lvm_self_backup_gpg.id }}"
|
||||||
encryption_keys:
|
encryption_keys:
|
||||||
- "{{ backup_gpg.id }}"
|
- "{{ lvm_self_backup_gpg.id }}"
|
||||||
|
|
||||||
# Should the systemd timer be enabled to automatically backup every day?
|
# Should the systemd timer be enabled to automatically backup every day?
|
||||||
backup_timer_enabled: true
|
lvm_self_backup_timer_enabled: true
|
||||||
backup_timer_state: 'started'
|
lvm_self_backup_timer_state: "started"
|
||||||
|
|
||||||
# Management for backup retention if enabled, backups will be dropped.
|
# Management for backup retention if enabled, backups will be dropped.
|
||||||
backup_retention:
|
lvm_self_backup_retention:
|
||||||
# Number of incremental backups to keep.
|
# Number of incremental backups to keep.
|
||||||
# Incremental backups require a full backup as base.
|
# Incremental backups require a full backup as base.
|
||||||
# Therefore the incremental count must be lower
|
# Therefore the incremental count must be lower
|
||||||
|
|
|
@ -19,35 +19,35 @@
|
||||||
|
|
||||||
- name: Create backup mount directories
|
- name: Create backup mount directories
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
path: "/backup_snapshots/{{ item.vg_name }}-{{ item.lv_name }}"
|
path: "/lvm_self_backup_snapshots/{{ item.vg_name }}-{{ item.lv_name }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: '0700'
|
mode: "0700"
|
||||||
owner: 'root'
|
owner: "root"
|
||||||
group: 'root'
|
group: "root"
|
||||||
recurse: true
|
recurse: true
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ backup_vols }}"
|
- "{{ lvm_self_backup_vols }}"
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: Create backup status directory
|
- name: Create backup status directory
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: '0700'
|
mode: "0700"
|
||||||
owner: "{{ backup_status_directory_uid }}"
|
owner: "{{ lvm_self_backup_status_directory_uid }}"
|
||||||
group: "{{ backup_status_directory_gid }}"
|
group: "{{ lvm_self_backup_status_directory_gid }}"
|
||||||
recurse: true
|
recurse: true
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ backup_status_directory }}"
|
- "{{ lvm_self_backup_status_directory }}"
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: Install backup scripts.
|
- name: Install backup scripts.
|
||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: "{{ item.src }}"
|
src: "{{ item.src }}"
|
||||||
dest: "{{ item.dest }}"
|
dest: "{{ item.dest }}"
|
||||||
mode: '0700'
|
mode: "0700"
|
||||||
owner: 'root'
|
owner: "root"
|
||||||
group: 'root'
|
group: "root"
|
||||||
with_items:
|
with_items:
|
||||||
- src: "backup-lvm.sh"
|
- src: "backup-lvm.sh"
|
||||||
dest: "/usr/local/bin/backup-lvm"
|
dest: "/usr/local/bin/backup-lvm"
|
||||||
|
@ -59,9 +59,9 @@
|
||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: "{{ item }}"
|
src: "{{ item }}"
|
||||||
dest: /etc/systemd/system/
|
dest: /etc/systemd/system/
|
||||||
mode: '0600'
|
mode: "0600"
|
||||||
owner: 'root'
|
owner: "root"
|
||||||
group: 'root'
|
group: "root"
|
||||||
with_items:
|
with_items:
|
||||||
- "backup-lvm.service"
|
- "backup-lvm.service"
|
||||||
- "backup-lvm.timer"
|
- "backup-lvm.timer"
|
||||||
|
@ -72,6 +72,6 @@
|
||||||
ansible.builtin.systemd:
|
ansible.builtin.systemd:
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
name: backup-lvm.timer
|
name: backup-lvm.timer
|
||||||
enabled: '{{ backup_timer_enabled | bool }}'
|
enabled: "{{ lvm_self_backup_timer_enabled | bool }}"
|
||||||
state: '{{ backup_timer_state }}'
|
state: "{{ lvm_self_backup_timer_state }}"
|
||||||
become: true
|
become: true
|
||||||
|
|
|
@ -26,12 +26,12 @@ docker run --name="duplicity-backup-lvm" \
|
||||||
-v "$HOME/.cache/duplicity/:/archive" \
|
-v "$HOME/.cache/duplicity/:/archive" \
|
||||||
-v "/root/.gnupg/:/root/.gnupg/" \
|
-v "/root/.gnupg/:/root/.gnupg/" \
|
||||||
-v "/backup_snapshots/:/backup/:ro" \
|
-v "/backup_snapshots/:/backup/:ro" \
|
||||||
-e "PASSPHRASE={{ backup_gpg.passphrase }}" \
|
-e "PASSPHRASE={{ lvm_self_backup_gpg.passphrase }}" \
|
||||||
--hostname {{ ansible_fqdn }} \
|
--hostname {{ ansible_fqdn }} \
|
||||||
{{ backup_duplicity_image }}:{{ backup_duplicity_image_version }} \
|
{{ lvm_self_backup_duplicity_image }}:{{ lvm_self_backup_duplicity_image_version }} \
|
||||||
list-current-files \
|
list-current-files \
|
||||||
--sign-key "{{ backup_gpg.sign_key | default(backup_gpg.id) }}" \
|
--sign-key "{{ lvm_self_backup_gpg.sign_key | default(lvm_self_backup_gpg.id) }}" \
|
||||||
{% for encryption_key in backup_gpg.encryption_keys | default([backup_gpg.id]) %}
|
{% for encryption_key in lvm_self_backup_gpg.encryption_keys | default([lvm_self_backup_gpg.id]) %}
|
||||||
--encrypt-key "{{ encryption_key }}" \
|
--encrypt-key "{{ encryption_key }}" \
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
"{{ backup_target }}"
|
"{{ lvm_self_backup_target }}"
|
||||||
|
|
|
@ -7,13 +7,13 @@ Type=oneshot
|
||||||
TimeoutSec=20h
|
TimeoutSec=20h
|
||||||
|
|
||||||
# Creating snapshots
|
# Creating snapshots
|
||||||
{% for item in backup_vols %}
|
{% for item in lvm_self_backup_vols %}
|
||||||
ExecStartPre=/usr/sbin/lvcreate -L 1G -n {{ item.lv_name }}_snap -s {{ item.vg_name }}/{{ item.lv_name }}
|
ExecStartPre=/usr/sbin/lvcreate -L 1G -n {{ item.lv_name }}_snap -s {{ item.vg_name }}/{{ item.lv_name }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
# Mounting snapshots
|
# Mounting snapshots
|
||||||
{% for item in backup_vols %}
|
{% for item in lvm_self_backup_vols %}
|
||||||
ExecStartPre=/usr/bin/mount -o ro,nosuid,noexec{% if item.fstype is defined and item.fstype == "xfs" %},nouuid{% endif %} -t {{ item.fstype | default("ext4") }} /dev/{{ item.vg_name }}/{{ item.lv_name }}_snap /backup_snapshots/{{ item.vg_name }}-{{ item.lv_name }}
|
ExecStartPre=/usr/bin/mount -o ro,nosuid,noexec{% if item.fstype is defined and item.fstype == "xfs" %},nouuid{% endif %} -t {{ item.fstype | default("ext4") }} /dev/{{ item.vg_name }}/{{ item.lv_name }}_snap /lvm_self_backup_snapshots/{{ item.vg_name }}-{{ item.lv_name }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
ExecStart=/usr/local/bin/backup-lvm
|
ExecStart=/usr/local/bin/backup-lvm
|
||||||
|
@ -22,12 +22,12 @@ ExecStart=/usr/local/bin/backup-lvm
|
||||||
ExecStopPost=/usr/bin/docker rm --force duplicity-backup-lvm
|
ExecStopPost=/usr/bin/docker rm --force duplicity-backup-lvm
|
||||||
|
|
||||||
# Unmount snapshots
|
# Unmount snapshots
|
||||||
{% for item in backup_vols %}
|
{% for item in lvm_self_backup_vols %}
|
||||||
ExecStopPost=/usr/bin/umount /dev/{{ item.vg_name }}/{{ item.lv_name }}_snap
|
ExecStopPost=/usr/bin/umount /dev/{{ item.vg_name }}/{{ item.lv_name }}_snap
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
# Remove snapshots
|
# Remove snapshots
|
||||||
{% for item in backup_vols %}
|
{% for item in lvm_self_backup_vols %}
|
||||||
ExecStopPost=/usr/sbin/lvremove -f {{ item.vg_name }}/{{ item.lv_name }}_snap
|
ExecStopPost=/usr/sbin/lvremove -f {{ item.vg_name }}/{{ item.lv_name }}_snap
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
|
|
|
@ -30,20 +30,20 @@ function backup () {(
|
||||||
-v "$HOME/.cache/duplicity/:/archive" \
|
-v "$HOME/.cache/duplicity/:/archive" \
|
||||||
-v "/root/.gnupg/:/root/.gnupg/" \
|
-v "/root/.gnupg/:/root/.gnupg/" \
|
||||||
-v "/backup_snapshots/:/backup/:ro" \
|
-v "/backup_snapshots/:/backup/:ro" \
|
||||||
-e "PASSPHRASE={{ backup_gpg.passphrase }}" \
|
-e "PASSPHRASE={{ lvm_self_backup_gpg.passphrase }}" \
|
||||||
--hostname {{ ansible_fqdn }} \
|
--hostname {{ ansible_fqdn }} \
|
||||||
{{ backup_duplicity_image }}:{{ backup_duplicity_image_version }} \
|
{{ lvm_self_backup_duplicity_image }}:{{ lvm_self_backup_duplicity_image_version }} \
|
||||||
--full-if-older-than 1M \
|
--full-if-older-than 1M \
|
||||||
--progress \
|
--progress \
|
||||||
--progress-rate 60 \
|
--progress-rate 60 \
|
||||||
--sign-key "{{ backup_gpg.sign_key | default(backup_gpg.id) }}" \
|
--sign-key "{{ lvm_self_backup_gpg.sign_key | default(lvm_self_backup_gpg.id) }}" \
|
||||||
{% for encryption_key in backup_gpg.encryption_keys | default([backup_gpg.id]) %}
|
{% for encryption_key in lvm_self_backup_gpg.encryption_keys | default([lvm_self_backup_gpg.id]) %}
|
||||||
--encrypt-key "{{ encryption_key }}" \
|
--encrypt-key "{{ encryption_key }}" \
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
/backup/ \
|
/backup/ \
|
||||||
"{{ backup_target }}"
|
"{{ lvm_self_backup_target }}"
|
||||||
|
|
||||||
{% if backup_retention.incremental.enabled %}
|
{% if lvm_self_backup_retention.incremental.enabled %}
|
||||||
echo "Remove old incremental backups"
|
echo "Remove old incremental backups"
|
||||||
docker run --name="duplicity-backup-lvm" \
|
docker run --name="duplicity-backup-lvm" \
|
||||||
--security-opt "label=disable" \
|
--security-opt "label=disable" \
|
||||||
|
@ -53,12 +53,12 @@ function backup () {(
|
||||||
--rm \
|
--rm \
|
||||||
-v "$HOME/.cache/duplicity/:/archive" \
|
-v "$HOME/.cache/duplicity/:/archive" \
|
||||||
--hostname {{ ansible_fqdn }} \
|
--hostname {{ ansible_fqdn }} \
|
||||||
{{ backup_duplicity_image }}:{{ backup_duplicity_image_version }} \
|
{{ lvm_self_backup_duplicity_image }}:{{ lvm_self_backup_duplicity_image_version }} \
|
||||||
remove-all-inc-of-but-n-full {{ backup_retention.incremental.count }} --force \
|
remove-all-inc-of-but-n-full {{ lvm_self_backup_retention.incremental.count }} --force \
|
||||||
"{{ backup_target }}"
|
"{{ lvm_self_backup_target }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if backup_retention.full.enabled %}
|
{% if lvm_self_backup_retention.full.enabled %}
|
||||||
echo "Remove old full backups"
|
echo "Remove old full backups"
|
||||||
docker run --name="duplicity-backup-lvm" \
|
docker run --name="duplicity-backup-lvm" \
|
||||||
--security-opt "label=disable" \
|
--security-opt "label=disable" \
|
||||||
|
@ -68,9 +68,9 @@ function backup () {(
|
||||||
--rm \
|
--rm \
|
||||||
-v "$HOME/.cache/duplicity/:/archive" \
|
-v "$HOME/.cache/duplicity/:/archive" \
|
||||||
--hostname {{ ansible_fqdn }} \
|
--hostname {{ ansible_fqdn }} \
|
||||||
{{ backup_duplicity_image }}:{{ backup_duplicity_image_version }} \
|
{{ lvm_self_backup_duplicity_image }}:{{ lvm_self_backup_duplicity_image_version }} \
|
||||||
remove-all-but-n-full {{ backup_retention.full.count }} --force \
|
remove-all-but-n-full {{ lvm_self_backup_retention.full.count }} --force \
|
||||||
"{{ backup_target }}"
|
"{{ lvm_self_backup_target }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
@ -82,8 +82,8 @@ if [ $success -gt 0 ]; then
|
||||||
echo "ERROR: An error occured during backup! $success"
|
echo "ERROR: An error occured during backup! $success"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo $success > {{ backup_status_directory }}/backup.status
|
echo $success > {{ lvm_self_backup_status_directory }}/backup.status
|
||||||
|
|
||||||
chown {{ backup_status_directory_uid }}:{{ backup_status_directory_gid }} {{ backup_status_directory }}/backup.status
|
chown {{ lvm_self_backup_status_directory_uid }}:{{ lvm_self_backup_status_directory_gid }} {{ lvm_self_lvm_self_backup_status_directory }}/backup.status
|
||||||
|
|
||||||
exit $success
|
exit $success
|
||||||
|
|
Loading…
Add table
Reference in a new issue