diff --git a/.ansible-lint-ignore b/.ansible-lint-ignore
new file mode 100644
index 0000000000000000000000000000000000000000..2c6e4f10a1b2d178f49657941e94ed9571d7ef93
--- /dev/null
+++ b/.ansible-lint-ignore
@@ -0,0 +1,194 @@
+# This file contains ignores rule violations for ansible-lint
+playbooks/celerity.yml name[casing]
+playbooks/live/functions/create-live-app.yml name[template]
+playbooks/live/live.yml name[play]
+playbooks/live/subplays/ha-case.yml name[casing]
+playbooks/live/subplays/ha-case.yml name[play]
+playbooks/live/subplays/standard-case.yml name[play]
+playbooks/mediacache.yml name[casing]
+playbooks/mediacache/mediacache.yml name[casing]
+playbooks/mediaimport.yml name[casing]
+playbooks/mediaserver.yml name[casing]
+playbooks/mediavault/deploy.yml name[casing]
+playbooks/mediavault/ressources/add_backup_task.yml name[casing]
+playbooks/mediavault/ressources/add_backup_task.yml name[template]
+playbooks/mediaworker.yml name[casing]
+playbooks/mirismanager.yml name[casing]
+playbooks/monitor/all.yml name[play]
+playbooks/monitor/msmonitor.yml name[casing]
+playbooks/monitor/msmonitor.yml role-name[path]
+playbooks/monitor/munin_node.yml role-name[path]
+playbooks/monitor/munin_server.yml name[casing]
+playbooks/monitor/munin_server.yml role-name[path]
+playbooks/netcapture.yml name[casing]
+playbooks/postgres-ha.yml name[casing]
+playbooks/postgres-maintenance.yml name[play]
+playbooks/postgres-maintenance/fenced_to_standby.yml name[casing]
+playbooks/postgres-maintenance/rephacheck_status.yml name[casing]
+playbooks/postgres-maintenance/restart_repmgrd.yml name[casing]
+playbooks/postgres-maintenance/standby_to_primary.yml name[casing]
+playbooks/postgres.yml name[casing]
+playbooks/site.yml name[casing]
+playbooks/site.yml name[play]
+playbooks/tests/data-partition.yml name[casing]
+playbooks/tests/exec-tester.yml name[casing]
+playbooks/tests/firewall-rules.yml name[casing]
+playbooks/tests/ressources/firewall/listen.yml name[casing]
+playbooks/tests/ressources/firewall/listen.yml name[template]
+roles/celerity/defaults/main.yml var-naming[no-role-prefix]
+roles/celerity/handlers/main.yml name[casing]
+roles/celerity/tasks/configure/celerity-conf.yml name[casing]
+roles/celerity/tasks/install.yml name[casing]
+roles/celerity/vars/main.yml var-naming[no-role-prefix]
+roles/celerity/vars/main.yml yaml[line-length]
+roles/fail2ban/handlers/main.yml name[casing]
+roles/fail2ban/tasks/base.yml name[casing]
+roles/fail2ban/tasks/configure/f2b-configure.yml name[casing]
+roles/fail2ban/tasks/install.yml name[casing]
+roles/haproxy/handlers/main.yml name[casing]
+roles/haproxy/tasks/base.yml name[casing]
+roles/haproxy/tasks/install.yml name[casing]
+roles/haproxy/vars/main.yml var-naming[no-role-prefix]
+roles/letsencrypt/defaults/main.yml yaml[line-length]
+roles/letsencrypt/handlers/main.yml name[casing]
+roles/letsencrypt/tasks/configure/letsencrypt-configure.yml name[casing]
+roles/letsencrypt/tasks/install.yml name[casing]
+roles/live/defaults/main.yml yaml[truthy]
+roles/live/handlers/main.yml name[casing]
+roles/live/tasks/base.yml name[casing]
+roles/live/vars/main.yml var-naming[no-role-prefix]
+roles/lxc/handlers/main.yml name[casing]
+roles/lxc/tasks/base.yml name[casing]
+roles/mediacache/defaults/main.yml var-naming[no-role-prefix]
+roles/mediacache/handlers/main.yml name[casing]
+roles/mediacache/tasks/base.yml name[casing]
+roles/mediacache/tasks/configure/live.yml name[casing]
+roles/mediacache/tasks/configure/local-resolution.yml name[casing]
+roles/mediacache/tasks/configure/nginx-live-upstream.yml name[casing]
+roles/mediacache/tasks/configure/nginx-vhost.yml name[casing]
+roles/mediacache/tasks/configure/nginx-vod-upstream.yml name[casing]
+roles/mediacache/tasks/configure/vod.yml name[casing]
+roles/mediacache/vars/main.yml var-naming[no-role-prefix]
+roles/mediaimport/handlers/main.yml name[casing]
+roles/mediaimport/tasks/base.yml name[casing]
+roles/mediaimport/tasks/install.yml name[casing]
+roles/mediaimport/vars/main.yml var-naming[no-role-prefix]
+roles/mediaserver/defaults/main.yml var-naming[no-role-prefix]
+roles/mediaserver/defaults/main.yml yaml[line-length]
+roles/mediaserver/handlers/main.yml name[casing]
+roles/mediaserver/tasks/base.yml name[casing]
+roles/mediaserver/tasks/configure/email.yml name[casing]
+roles/mediaserver/tasks/configure/instance.yml name[casing]
+roles/mediaserver/tasks/configure/synchronize.yml name[casing]
+roles/mediaserver/tasks/install.yml name[casing]
+roles/mediaserver/vars/main.yml var-naming[no-role-prefix]
+roles/mediavault/defaults/main.yml var-naming[no-role-prefix]
+roles/mediavault/defaults/main.yml yaml[truthy]
+roles/mediavault/handlers/main.yml name[casing]
+roles/mediavault/tasks/base.yml name[casing]
+roles/mediavault/tasks/configure/mailer.yml name[casing]
+roles/mediavault/tasks/install.yml name[casing]
+roles/mediavault/vars/main.yml var-naming[no-role-prefix]
+roles/mediaworker/defaults/main.yml var-naming[no-role-prefix]
+roles/mediaworker/handlers/main.yml name[casing]
+roles/mediaworker/tasks/configure/celerity-conf.yml name[casing]
+roles/mediaworker/tasks/install.yml name[casing]
+roles/mediaworker/vars/main.yml var-naming[no-role-prefix]
+roles/mediaworker/vars/main.yml yaml[line-length]
+roles/mirismanager/defaults/main.yml var-naming[no-role-prefix]
+roles/mirismanager/handlers/main.yml name[casing]
+roles/mirismanager/tasks/base.yml name[casing]
+roles/mirismanager/tasks/configure/apt-cacher-proxy.yml name[casing]
+roles/mirismanager/tasks/configure/email.yml name[casing]
+roles/mirismanager/tasks/install.yml name[casing]
+roles/mirismanager/vars/main.yml var-naming[no-role-prefix]
+roles/monitor/msmonitor/handlers/main.yml name[casing]
+roles/monitor/msmonitor/tasks/install.yml name[casing]
+roles/monitor/msmonitor/vars/main.yml var-naming[no-role-prefix]
+roles/monitor/munin_node/defaults/main.yml var-naming[no-role-prefix]
+roles/monitor/munin_node/handlers/main.yml name[casing]
+roles/monitor/munin_node/tasks/base.yml name[casing]
+roles/monitor/munin_node/tasks/configure/main-configuration.yml name[casing]
+roles/monitor/munin_node/tasks/install.yml name[casing]
+roles/monitor/munin_node/vars/main.yml var-naming[no-role-prefix]
+roles/monitor/munin_server/defaults/main.yml var-naming[no-role-prefix]
+roles/monitor/munin_server/handlers/main.yml name[casing]
+roles/monitor/munin_server/tasks/base.yml name[casing]
+roles/monitor/munin_server/tasks/configure/main-configuration.yml name[casing]
+roles/monitor/munin_server/tasks/install.yml name[casing]
+roles/monitor/munin_server/vars/main.yml var-naming[no-role-prefix]
+roles/netcapture/defaults/main.yml yaml[truthy]
+roles/netcapture/tasks/configure/main-configure.yml name[casing]
+roles/netcapture/tasks/configure/miris-configure.yml name[casing]
+roles/netcapture/tasks/install.yml name[casing]
+roles/netcapture/vars/main.yml yaml[truthy]
+roles/nftables/tasks/install.yml name[casing]
+roles/nginx/handlers/main.yml name[casing]
+roles/nginx/tasks/base.yml name[casing]
+roles/nginx/tasks/install.yml name[casing]
+roles/postfix/handlers/main.yml name[casing]
+roles/postfix/tasks/configure/aliases.yml name[casing]
+roles/postfix/tasks/configure/mailname.yml name[casing]
+roles/postfix/tasks/configure/postfix_authentication.yml name[casing]
+roles/postfix/tasks/configure/postfix_generic.yml name[casing]
+roles/postfix/tasks/configure/postfix_main.yml name[casing]
+roles/postfix/tasks/configure/postfix_virtual.yml name[casing]
+roles/postfix/tasks/install.yml name[casing]
+roles/postgres-ha role-name
+roles/postgres-ha/handlers/main.yml name[casing]
+roles/postgres-ha/tasks/base.yml name[casing]
+roles/postgres-ha/tasks/base/rephacheck.yml name[casing]
+roles/postgres-ha/tasks/base/rephacheck.yml var-naming[no-role-prefix]
+roles/postgres-ha/tasks/base/repmgr.yml name[casing]
+roles/postgres-ha/tasks/base/swappiness.yml name[casing]
+roles/postgres-ha/tasks/configure.yml name[casing]
+roles/postgres-ha/tasks/configure/postgres-role.yml name[casing]
+roles/postgres-ha/tasks/configure/postgres-role.yml var-naming[no-role-prefix]
+roles/postgres-ha/tasks/configure/register-primary.yml name[casing]
+roles/postgres-ha/tasks/configure/register-standby.yml name[casing]
+roles/postgres-ha/tasks/configure/register-witness.yml name[casing]
+roles/postgres-ha/tasks/configure/ssh-key.yml name[casing]
+roles/postgres-ha/tasks/install.yml name[casing]
+roles/postgres-ha/vars/main.yml yaml[line-length]
+roles/postgres/defaults/main.yml var-naming[no-role-prefix]
+roles/postgres/handlers/main.yml name[casing]
+roles/postgres/tasks/base.yml name[casing]
+roles/postgres/tasks/base/logrotate.yml name[casing]
+roles/postgres/tasks/configure/authentication.yml name[casing]
+roles/postgres/tasks/configure/custom-settings.yml name[casing]
+roles/postgres/tasks/configure/databases.yml name[casing]
+roles/postgres/tasks/configure/users.yml name[casing]
+roles/postgres/tasks/install.yml name[casing]
+roles/postgres/vars/main.yml var-naming[no-role-prefix]
+roles/proxy/tasks/configure/proxy-configure.yml name[casing]
+roles/shared/handlers/celerity.yml name[casing]
+roles/shared/handlers/nftables.yml name[casing]
+roles/shared/tasks/celerity_base_config.yml name[casing]
+roles/shared/tasks/celerity_base_config.yml name[template]
+roles/sysconfig/defaults/main.yml var-naming[no-role-prefix]
+roles/sysconfig/defaults/main.yml yaml[line-length]
+roles/sysconfig/defaults/main.yml yaml[truthy]
+roles/sysconfig/handlers/main.yml name[casing]
+roles/sysconfig/tasks/base/apt_ubicast.yml name[casing]
+roles/sysconfig/tasks/base/apt_ubicast.yml name[template]
+roles/sysconfig/tasks/base/logs.yml name[casing]
+roles/sysconfig/tasks/base/prompt.yml name[casing]
+roles/sysconfig/tasks/base/ssh-client.yml name[casing]
+roles/sysconfig/tasks/base/ssh-server.yml name[casing]
+roles/sysconfig/tasks/base/unattended_upgrades.yml name[casing]
+roles/sysconfig/tasks/configure/locale.yml name[casing]
+roles/sysconfig/tasks/configure/ntp.yml name[casing]
+roles/sysconfig/tasks/configure/proxy.yml name[casing]
+roles/sysconfig/tasks/configure/timezone.yml name[casing]
+roles/sysconfig/tasks/install.yml name[casing]
+roles/sysconfig/vars/main.yml var-naming[no-role-prefix]
+roles/sysuser/defaults/main.yml var-naming[no-role-prefix]
+roles/sysuser/defaults/main.yml yaml[truthy]
+roles/sysuser/tasks/base/sudoers.yml name[casing]
+roles/sysuser/tasks/common/dotfiles.yml name[template]
+roles/sysuser/tasks/configure/create_user.yml name[template]
+roles/sysuser/tasks/install.yml name[casing]
+roles/sysuser/vars/main.yml var-naming[no-role-prefix]
+roles/tester/defaults/main.yml var-naming[no-role-prefix]
+roles/tester/tasks/configure/tester-configure.yml name[casing]
+roles/tester/tasks/install.yml name[casing]
diff --git a/.lint/ansible-lint.conf b/.lint/ansible-lint.conf
index 5ef088041ab1f5110870677255c8b95a7eb7ab58..78ce6b741ffae972fbd766ceef38b5c6b6118f79 100644
--- a/.lint/ansible-lint.conf
+++ b/.lint/ansible-lint.conf
@@ -7,11 +7,12 @@ exclude_paths:
   - .cache/
   - ${HOME}/.cache/
 
-skip_list:
-  - role-name                  # Skip role name pattern verification ("-" should not be used)
-  - name[play]                 # Skip the rule dictating that all play should have a name
-  - name[casing]               # Skip the rule dictating that all task name should begin with uppercase
-  - name[template]             # Skip forcing to use jinja var at the end of a task name
-  - var-naming[no-role-prefix] # Skip necessity to prefix role vars with role name
+# Moved into the ".ansible-lint-ignore" file
+#skip_list:
+#  - role-name                  # Skip role name pattern verification ("-" should not be used)
+#  - name[play]                 # Skip the rule dictating that all play should have a name
+#  - name[casing]               # Skip the rule dictating that all task name should begin with uppercase
+#  - name[template]             # Skip forcing to use jinja var at the end of a task name
+#  - var-naming[no-role-prefix] # Skip necessity to prefix role vars with role name
 
 ...
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 03103daceaf4c4e097aeb2f25ae325aa8c5a9e7b..5f68b592147e6ea593651710794e12240013cb7e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,25 +1,40 @@
+# 2024-02-16
+
+* Rework roles in 3 distinct stages. Stages can be used with the ansible tags of the same name.
+  * `install` for package installations,
+  * `base` for basic configuration (without variable),
+  * `configure` for case specific configurations.  
+* Remove some unused variables in roles.
+* Migrate variables from roles `defaults` to `vars`.
+* Add variables to `munin_server` and `munin_node` roles to allow a standalone installation of the roles.
+* Add documentation for ansible `vars` and `default`.
+
 # 2024-02-15
 
 * Remove obsolete `DATA_DIRS` configuration in Nudgis Front configuration template
 * Add `MSCONTROLLER_LOCKS_DIR` configuration in Nudgis Front configuration template (use the `nudgis_front_instances_dir` value as default value)
 
+# 2024-01-24
+
+* Changing the firewall solution from `ferm`/`iptables` to `nftables`.
+
 # 2023-10-23
 
-Remove benchmark solution deployment.
+* Remove benchmark solution deployment.
 
 # 2023-10-20
 
-Avoid multiple `base` role execution.  
+* Avoid multiple `base` role execution.  
 Now you have to create a meta group `[base:children]` containing all defined groups (`mediaserver`, `mirismanager`, ... see example inventories).
 
 # 2023-10-18
 
-Ansible installation documentation has been enhanced.  
+* Ansible installation documentation has been enhanced.  
 Makefile has been removed in favor of the more complete `ansible` and `ansible-playbook` default commands.
 
 # 2023-08-16
 
-Many changes have been made with the arrival of debian 12. 
+* Many changes have been made with the arrival of debian 12.  
 Variables have been completely restructured.
 
 To help you make the transition : 
diff --git a/doc/deploy.md b/doc/deploy.md
index c8d5e493729e5dedfa4e53f8576ac8aa1ad6052b..35d12b7c4511b3929c6255aa1a67ff066d050ca7 100644
--- a/doc/deploy.md
+++ b/doc/deploy.md
@@ -17,26 +17,11 @@ To deploy all components, execute:
 ansible-playbook -i inventories/customer playbooks/site.yml 
 ```
 
-If you want to limit and deploy specific part, you can add a `tag`:
+Additionnaly, each role is splitted into 3 tags:
+* "install" to install the application packages required
+* "base" to to the base configuration of the application
+* "configure" to deploy specific configurations for the role
 
-```sh
-ansible-playbook -i inventories/customer -t <tag> playbooks/site.yml 
-```
-
-The avalaible tags are:
-
-| Component     | Tag            |
-|---------------|----------------|
-| mediaserver   | `server`       |
-| mediaworker   | `worker`       |
-| mirismanager  | `manager`      |
-| mediaimport   | `import`       |
-| mediavault    | `vault`        |
-| celerity      | `celerity`     |
-| ...           | ...            |
+You can limit your deployment to this tags by using `--tags <tag_name>` to your ansible command.
 
-
-To view all tags avalaible, run: 
-```
-awk '/tags:/ && !/always/ {print $2}' ./playbooks/site.yml
-```
+To personalise components to install/configure during the deployment, the best method is to duplicate and edit the `site.yml` playbook to suit your needs.
diff --git a/playbooks/base.yml b/playbooks/base.yml
index 5c2719e68d9eee2b9a1c6af8249215ae0af681a1..207aae30a1fb77634b01cef49f21c535f95d9c29 100755
--- a/playbooks/base.yml
+++ b/playbooks/base.yml
@@ -3,7 +3,6 @@
 
 - name: BASE
   hosts: base
-  tags: all
   roles:
     - base
 
diff --git a/playbooks/celerity.yml b/playbooks/celerity.yml
index 62277e88887a3b23221ef0202242982ed2a31be5..ce00bb099d7984c4f2ba0bfb15d83e268f544785 100755
--- a/playbooks/celerity.yml
+++ b/playbooks/celerity.yml
@@ -3,7 +3,6 @@
 
 - name: CELERITY SERVER
   hosts: celerity
-  tags: celerity
   roles:
     - celerity
   post_tasks:
diff --git a/playbooks/firewall.yml b/playbooks/firewall.yml
index a6999f8185f906ebcaf1b0ded8053ff6b6cb56a6..8941d9175f05931fb5f1554ca9a5ae4a4f2ec047 100755
--- a/playbooks/firewall.yml
+++ b/playbooks/firewall.yml
@@ -3,7 +3,6 @@
 
 - name: FIREWALL
   hosts: firewall
-  tags: firewall
   roles:
     - nftables
 
diff --git a/playbooks/letsencrypt.yml b/playbooks/letsencrypt.yml
index 571d59282b9a0bc534ad935fa899d4d4cf98938f..191415079c09742fb8430e906b10ffe64f3b92c2 100755
--- a/playbooks/letsencrypt.yml
+++ b/playbooks/letsencrypt.yml
@@ -3,7 +3,6 @@
 
 - name: Let's encrypt
   hosts: all
-  tags: all
   roles:
     - letsencrypt
 
diff --git a/playbooks/live/functions/create-live-app.yml b/playbooks/live/functions/create-live-app.yml
index ed68a3f820a034d45a5add6a640249fa3f9c514a..e84b5bb64d67de031c3b1c142516f14e68d8938e 100644
--- a/playbooks/live/functions/create-live-app.yml
+++ b/playbooks/live/functions/create-live-app.yml
@@ -44,7 +44,7 @@
 
     - name: Extracting the application secret
       ansible.builtin.set_fact:
-        live_secret: "{{ ms_live_config.content | b64decode | from_json | json_query('RTMP_APP') }}"
+        live_secret: "{{ (ms_live_config.content | b64decode | from_json).RTMP_APP }}"
       when: ms_conf_live.stat.exists
 
     - name: Declaring the application secret
@@ -97,7 +97,10 @@
       failed_when: false
 
     - name: (Re)create the RTMP app configuration
-      notify: Reload nginx
+      notify:
+        - Reload nginx
+        - Setup munin-node plugins link
+        - Restart munin-node service
       ansible.builtin.command:
         cmd: ubicast-livectl add {{ live_app_name }} {{ hostvars['localhost'].live_app_secret }}
         creates: /etc/nginx/rtmp.d/{{ live_app_name }}.conf
@@ -127,6 +130,18 @@
         name: nginx
         state: reloaded
 
+    - name: Setup munin-node plugins link
+      ansible.builtin.shell:
+        munin-node-configure --shell --remove-also 2>&1 | sh -x  # noqa risky-shell-pipe
+      # sh -x print executed cmd to stderr
+      register: munin_plugin_linked
+      changed_when: munin_plugin_linked.stderr | length > 0
+
+    - name: Restart munin-node service
+      ansible.builtin.service:
+        name: munin-node
+        state: restarted
+
 - name: MediaServer(s) - "{{ live_app_name }}" live application configuration
   hosts: mediaserver
   gather_facts: false
diff --git a/playbooks/live/live.yml b/playbooks/live/live.yml
index ab3afe3659ca058fee4b3a001232714d7b73b7d4..05bb1257e0d59803ca2cff3df6d62a36c0c96452 100644
--- a/playbooks/live/live.yml
+++ b/playbooks/live/live.yml
@@ -3,16 +3,17 @@
 - name: LIVE
   hosts: live
   gather_facts: false
-  tags: live
   roles:
     - live
 
 - import_playbook: subplays/standard-case.yml
   # playbook target mediaserver and live hosts
   when: groups['live'] | d('') | length >= 1 and ( hostvars[groups['live'][0]].live_ha is undefined or hostvars[groups['live'][0]].live_ha == false )
+  tags: configure
 
 - import_playbook: subplays/ha-case.yml
   # playbook target mediaserver and live hosts
   when: groups['live'] | d('') | length >= 1 and ( hostvars[groups['live'][0]].live_ha is defined and hostvars[groups['live'][0]].live_ha == true )
+  tags: configure
 
 ...
diff --git a/playbooks/mediacache.yml b/playbooks/mediacache.yml
index c17acc27baa336af666b5b7c0c615bac57e92c2b..c166af0356ad97af044d175da4453d3639d3ef47 100755
--- a/playbooks/mediacache.yml
+++ b/playbooks/mediacache.yml
@@ -3,7 +3,6 @@
 
 - name: MEDIACACHE
   hosts: mediacache
-  tags: mediacache
   roles:
     - mediacache
   post_tasks:
diff --git a/playbooks/mediacache/mediacache.yml b/playbooks/mediacache/mediacache.yml
index 21ee8d0e8bbf85827ddaf5ee8adc75c05e2584d7..cb904b5f6a889b818eac17bbc008e4db99bd50f1 100644
--- a/playbooks/mediacache/mediacache.yml
+++ b/playbooks/mediacache/mediacache.yml
@@ -2,13 +2,13 @@
 
 - name: MEDIACACHE
   hosts: mediacache
-  tags: mediacache
   roles:
     - mediacache
   tasks:
     - name: Getting the IP to trust in term of securelink
       ansible.builtin.set_fact:
         securelink_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+      tags: configure
 
     - name: authorize mediacache on mediaserver
       notify: restart nginx on mediaservers
@@ -19,6 +19,7 @@
       delegate_to: "{{ item }}"
       delegate_facts: true
       loop: "{{ groups['mediaserver'] }}"
+      tags: configure
 
   handlers:
     - name: restart nginx on mediaservers
diff --git a/playbooks/mediaimport.yml b/playbooks/mediaimport.yml
index 60016ca70b48938ff035d3cfc80ff6c0998e445d..618d30903d85f3c7528a847a2bd85c09d275a3f1 100755
--- a/playbooks/mediaimport.yml
+++ b/playbooks/mediaimport.yml
@@ -3,7 +3,6 @@
 
 - name: MEDIAIMPORT
   hosts: mediaimport
-  tags: mediaimport
   roles:
     - mediaimport
   post_tasks:
diff --git a/playbooks/mediaserver.yml b/playbooks/mediaserver.yml
index ab8b5d6db6b371877a2f196e4495ba0d33292349..3f3778062ce133cc94b1401fea81d58c17938fee 100755
--- a/playbooks/mediaserver.yml
+++ b/playbooks/mediaserver.yml
@@ -3,7 +3,6 @@
 
 - name: MEDIASERVER
   hosts: mediaserver
-  tags: mediaserver
   roles:
     - mediaserver
   post_tasks:
diff --git a/playbooks/mediaworker.yml b/playbooks/mediaworker.yml
index 6649a6ffeecde04595d5938c2434252906aa866c..924136543af6e3b993ff407f5345f72f05bdfbe5 100755
--- a/playbooks/mediaworker.yml
+++ b/playbooks/mediaworker.yml
@@ -3,7 +3,6 @@
 
 - name: MEDIAWORKER
   hosts: mediaworker
-  tags: mediaworker
   roles:
     - mediaworker
   post_tasks:
diff --git a/playbooks/mirismanager.yml b/playbooks/mirismanager.yml
index 771fe6916d5c308e367db1c4bbc6f8bb0b534e55..d3df46c20ff157a3030f096bc4b617321169a5bf 100755
--- a/playbooks/mirismanager.yml
+++ b/playbooks/mirismanager.yml
@@ -3,7 +3,6 @@
 
 - name: MIRIS MANAGER
   hosts: mirismanager
-  tags: mirismanager
   roles:
     - mirismanager
   post_tasks:
diff --git a/playbooks/netcapture.yml b/playbooks/netcapture.yml
index 5bceaf40106cfec058bac5921e4792e67673c1a0..d1244788bcea085a58f36c9e5a2001b56a8afb15 100755
--- a/playbooks/netcapture.yml
+++ b/playbooks/netcapture.yml
@@ -3,7 +3,6 @@
 
 - name: NETCAPTURE
   hosts: netcapture
-  tags: netcapture
   roles:
     - netcapture
   post_tasks:
diff --git a/playbooks/postfix.yml b/playbooks/postfix.yml
deleted file mode 100755
index 719fba051f559046933342f416bab88ee0a94377..0000000000000000000000000000000000000000
--- a/playbooks/postfix.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: POSTFIX
-  hosts: all
-  tags: all
-  roles:
-    - conf
-    - postfix
-
-...
diff --git a/playbooks/postgres-ha.yml b/playbooks/postgres-ha.yml
index 0e79d651f07af20285b535d09cb877266d25b75c..5ea790facd59d92f2287112a935b7dae8f63d906 100755
--- a/playbooks/postgres-ha.yml
+++ b/playbooks/postgres-ha.yml
@@ -3,7 +3,6 @@
 
 - name: POSTGRES HA
   hosts: postgres
-  tags: postgres
   pre_tasks:
     - name: check that repmgr_node_id is set
       ansible.builtin.assert:
@@ -26,7 +25,6 @@
 
 - name: POSTGRES HA CLIENTS
   hosts: mediaserver
-  tags: [postgres, mediaserver]
   # pre_tasks:
   #   - name: check that haproxy is configured
   #     ansible.builtin.assert:
diff --git a/playbooks/postgres-maintenance.yml b/playbooks/postgres-maintenance.yml
index 4fd236e494d46f706f1c5601fd58b9b9503526a0..d6a3d388c9ae7b01351fcff7a7ac51151f4c2af0 100755
--- a/playbooks/postgres-maintenance.yml
+++ b/playbooks/postgres-maintenance.yml
@@ -3,10 +3,13 @@
 
 - import_playbook: postgres-maintenance/rephacheck_status.yml
   tags: [always]
+
 - import_playbook: postgres-maintenance/fenced_to_standby.yml
   tags: [never, fenced-to-standby]
+
 - import_playbook: postgres-maintenance/standby_to_primary.yml
   tags: [never, standby-to-primary]
+
 - import_playbook: postgres-maintenance/restart_repmgrd.yml
   tags: [never, restart-repmgrd, standby-to-primary]
 
diff --git a/playbooks/postgres.yml b/playbooks/postgres.yml
index 677d89418f4ccc6a3ffe14056f91cfcbf0522891..778212133a9b024764e2b1e4388a3e452003cdef 100755
--- a/playbooks/postgres.yml
+++ b/playbooks/postgres.yml
@@ -3,7 +3,6 @@
 
 - name: POSTGRESQL
   hosts: postgres
-  tags: postgres
   roles:
     - postgres
   post_tasks:
diff --git a/playbooks/site.yml b/playbooks/site.yml
index 7332bd40b7c00e25b88ed49748e73b7a991e2cca..dc158cbc15e1b8cd018993e96b7e4fca84376a8f 100755
--- a/playbooks/site.yml
+++ b/playbooks/site.yml
@@ -4,54 +4,50 @@
 - name: PYTHON
   hosts: all
   gather_facts: false
-
   tasks:
     - name: ensure python3 is installed
       register: python_install
       changed_when: "'es_pyinstall' in python_install.stdout_lines"
       ansible.builtin.raw: command -v python3 || echo es_pyinstall && apt update && apt install -y python3-minimal python3-apt iproute2
-      tags: always
+
+# Should be in a installation role (see #39283)
+- name: UPDATE PACKAGES
+  hosts: base
+  gather_facts: false
+  tags: install
+  tasks:
+    - name: Update apt cache
+      apt:
+        update_cache: true
+        cache_valid_time: 86400
+      changed_when: false
 
 - import_playbook: base.yml
-  tags: base
 
 - import_playbook: firewall.yml
-  tags: firewall
 
 - import_playbook: "{{ 'postgres-ha' if groups['postgres']|d('') | length > 1 else 'postgres' }}.yml"
-  tags: postgres
 
 - import_playbook: mirismanager.yml
-  tags: manager
 
 - import_playbook: celerity.yml
-  tags: celerity
 
 - import_playbook: mediaworker.yml
-  tags: worker
 
 - import_playbook: mediaserver.yml
-  tags: server
 
 - import_playbook: live/live.yml
-  tags: live
 
 - import_playbook: mediacache/mediacache.yml
-  tags: mediacache
 
 - import_playbook: mediavault/deploy.yml
-  tags: vault
 
 - import_playbook: mediaimport.yml
-  tags: import
 
 - import_playbook: netcapture/netcapture.yml
-  tags: netcapture
 
 - import_playbook: monitor/all.yml
-  tags: monitor
 
 - import_playbook: tester.yml
-  tags: tester
 
 ...
diff --git a/playbooks/tester.yml b/playbooks/tester.yml
index 4626bbc0b835f2dc0f8e630f68423b65d14adaab..e38acea5066341f0519f5050dcea4cbf1d43ccd4 100755
--- a/playbooks/tester.yml
+++ b/playbooks/tester.yml
@@ -3,7 +3,6 @@
 
 - name: Install UbiCast tester
   hosts: tester
-  tags: all
   roles:
     - tester
 
diff --git a/playbooks/users.yml b/playbooks/users.yml
deleted file mode 100755
index 5bbd75b39f5bd8048044444e0bdee33d33bfc1c8..0000000000000000000000000000000000000000
--- a/playbooks/users.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: USERS
-  hosts: all
-  tags: all
-  roles:
-    - conf
-    - sysuser
-
-...
diff --git a/roles/base/meta/main.yml b/roles/base/meta/main.yml
index e16b3f7b2f87bcf9a306272fa9f5110739749c25..cb50f1f076f5c17b9808166729a6241a6d1bc147 100644
--- a/roles/base/meta/main.yml
+++ b/roles/base/meta/main.yml
@@ -1,8 +1,6 @@
 ---
 
 dependencies:
-  - role: conf
-  - role: init
   - role: sysconfig
   - role: sysuser
   - role: postfix
diff --git a/roles/celerity/README.md b/roles/celerity/README.md
index a6dfbd0df0496455b2f3e0f4836dff44abb9979e..7f501a9c969ed8e82b132f3a27f32ce54bf8fa33 100644
--- a/roles/celerity/README.md
+++ b/roles/celerity/README.md
@@ -9,11 +9,6 @@ The celerity group is used to configure the hosts that balance the transcoding t
 
 Available variables are listed below, along with the descriptions and the default values.
 
-`nudgis_front_system_user`: Nudgis system username for the application portal, used as a JSON key in celerity config for nudgis API usage (see also nudgis_front_api_key) (Optional)
-```
-nudgis_front_system_user: "msuser"
-```
-
 `nudgis_front_api_key`: Nudgis API key, used to communicate with the nudgis portal
 ```
 nudgis_front_api_key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX"
diff --git a/roles/celerity/defaults/main.yml b/roles/celerity/defaults/main.yml
index 27e5f6f691c48ed410ade32ab9f55839644ed594..762a7802587db28922b410af8a067bfcedafee9b 100644
--- a/roles/celerity/defaults/main.yml
+++ b/roles/celerity/defaults/main.yml
@@ -1,8 +1,15 @@
 ---
-celerity_signing_key: "changeit"
-celerity_server_domain: "celerity.example.com"
 
-nudgis_front_domain: "nudgis.example.com"
+# Nudgis API key, used to communicate with the nudgis portal
 nudgis_front_api_key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX"
-nudgis_front_system_user: "msuser"
+
+# Defines the default deployed Nudgis portal domain (will be the URL of the portal when adding the HTTP(S) prefix)
+nudgis_front_domain: "nudgis.example.com"
+
+# IP or domain on which the celerity server service can be joined
+celerity_server_domain: "celerity.example.com"
+
+# Secret key shared between celerity server and workers for communications (should be the same everywhere for communication)
+celerity_signing_key: "changeit"
+
 ...
diff --git a/roles/celerity/tasks/base.yml b/roles/celerity/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fb96aaff0d855ecacec4c0a0699a29ed0e7f81c9
--- /dev/null
+++ b/roles/celerity/tasks/base.yml
@@ -0,0 +1,5 @@
+---
+
+- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+
+...
diff --git a/roles/celerity/tasks/configure.yml b/roles/celerity/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ff326c7aae80666999bf888a8e61ef08ebe1e729
--- /dev/null
+++ b/roles/celerity/tasks/configure.yml
@@ -0,0 +1,11 @@
+---
+
+- name: CONFIGURE CELERITY
+  ansible.builtin.include_tasks: "configure/celerity-conf.yml"
+  when:
+    - nudgis_front_api_key is defined
+    - nudgis_front_domain is defined
+    - celerity_server_domain is defined
+    - celerity_signing_key is defined
+
+...
diff --git a/roles/mediaworker/tasks/celerity_base_config.yml b/roles/celerity/tasks/configure/celerity-conf.yml
similarity index 83%
rename from roles/mediaworker/tasks/celerity_base_config.yml
rename to roles/celerity/tasks/configure/celerity-conf.yml
index fde98bae52859161bb826e0b2dfaf1f77519a587..aac3c49a96492345149ada9f845e4fc094dc874a 100644
--- a/roles/mediaworker/tasks/celerity_base_config.yml
+++ b/roles/celerity/tasks/configure/celerity-conf.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: copy celerity example configuration
-  notify: "restart {{ _celerity_service }}"
+  notify: "restart celerity-server"
   ansible.builtin.copy:
     remote_src: true
     src: /etc/celerity/config.example.py
@@ -10,7 +10,7 @@
     force: false
 
 - name: celerity base configuration
-  notify: "restart {{ _celerity_service }}"
+  notify: "restart celerity-server"
   ansible.builtin.replace:
     path: /etc/celerity/config.py
     regexp: '^(\s*)#?{{ item.name }}\s*=.*$'
@@ -21,7 +21,7 @@
   when: item.value != "" and item.value != '""'
 
 - name: celerity add nudgis portal
-  notify: "restart {{ _celerity_service }}"
+  notify: "restart celerity-server"
   ansible.builtin.command:
     cmd: >
       celerity-configurator add-portal
@@ -31,9 +31,9 @@
   register: celerity_add_portal
   changed_when: celerity_add_portal.stdout != 'The configuration file is already up to date.'
 
-- name: "ensure {{ _celerity_service }} is running"
+- name: "ensure celerity-server is running"
   ansible.builtin.service:
-    name: "{{ _celerity_service }}"
+    name: "celerity-server"
     enabled: true
     state: started
 
diff --git a/roles/init/tasks/main.yml b/roles/celerity/tasks/install.yml
similarity index 55%
rename from roles/init/tasks/main.yml
rename to roles/celerity/tasks/install.yml
index 5f4c3895a3216e64cf00118d6d4f4be7d79480c3..36270c8378f326332212155be70fd0ada9d7c798 100644
--- a/roles/init/tasks/main.yml
+++ b/roles/celerity/tasks/install.yml
@@ -1,18 +1,13 @@
 ---
 
-- name: install initial packages
+- name: celerity server install
+  notify: "reload fail2ban"
   ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
-    name: "{{ init_packages }}"
+    name: ubicast-celerity-server
   register: apt_status
   retries: 60
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
-- name: configure proxy
-  when: proxy_when is not defined or proxy_when != "end"
-  ansible.builtin.include_role:
-    name: proxy
-    allow_duplicates: true
-
 ...
diff --git a/roles/celerity/tasks/main.yml b/roles/celerity/tasks/main.yml
index f6458f786097662fe872349b711897bc87bdc9a2..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/celerity/tasks/main.yml
+++ b/roles/celerity/tasks/main.yml
@@ -1,23 +1,33 @@
 ---
 
-- name: celerity server install
-  notify: "reload fail2ban"
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: ubicast-celerity-server
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: configure celerity
-  ansible.builtin.include_tasks: celerity_base_config.yml
-  vars:
-    _celerity_service: "celerity-server"
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
-- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
-
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/celerity/vars/main.yml b/roles/celerity/vars/main.yml
index 45dfc0788516bdc7e26bf4dbfd8328131aa1e2ec..6b054628db9263bd6afc6b274a340864008e6377 100644
--- a/roles/celerity/vars/main.yml
+++ b/roles/celerity/vars/main.yml
@@ -1,5 +1,10 @@
 ---
 
-firewall_rules_files: ['celerity']
+# Nudgis system username for the application portal, used as a JSON key in celerity config for nudgis API usage (see also nudgis_front_api_key) **shared with mediaserver and mediaworker roles**
+nudgis_front_system_user: "msuser"
+
+# Group firewall rules filename, see roles/shared/files/nftables/
+firewall_rules_files:
+  - celerity
 
 ...
diff --git a/roles/conf/defaults/main.yml b/roles/conf/defaults/main.yml
deleted file mode 100644
index 5c10c6169af967232993af024501e4495606bcd8..0000000000000000000000000000000000000000
--- a/roles/conf/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# nothing
-...
diff --git a/roles/conf/tasks/main.yml b/roles/conf/tasks/main.yml
deleted file mode 100644
index 188820bd303e2689b3dd702e56b92f8542d751e8..0000000000000000000000000000000000000000
--- a/roles/conf/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: proxy
-  when:
-    - proxy_http | d()
-    - proxy_https | d()
-  ansible.builtin.include_role:
-    name: proxy
-
-- name: install requirements
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ conf_req_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: generate root ssh key pair
-  register: conf_root
-  ansible.builtin.user:
-    name: root
-    generate_ssh_key: true
-    ssh_key_type: ed25519
-    ssh_key_file: .ssh/id_ed25519
-...
diff --git a/roles/conf/vars/main.yml b/roles/conf/vars/main.yml
deleted file mode 100644
index 2fdd2bbbfe11963935b29ca6cf53bd51a6f78d7a..0000000000000000000000000000000000000000
--- a/roles/conf/vars/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-conf_req_packages:
-  - ca-certificates
-  - openssh-client
-...
diff --git a/roles/fail2ban/README.md b/roles/fail2ban/README.md
index 6411ae42a91b5ffceda30d391fe6d3151d440e85..db79f87963a72299e8e67143d3153ac91d08fc2d 100644
--- a/roles/fail2ban/README.md
+++ b/roles/fail2ban/README.md
@@ -1,27 +1,12 @@
 # Fail2ban
 ## Description
 
-Used by the "base" metagroup to provide and configure ban capabilities for various services
+The fail2ban group handles the installation and configuration of fail2ban.
 
 ## Role Variables
 
 Available variables are listed below, along with the descriptions and the default values.
 
-`fail2ban_ignoreip`: IP addresses exceptions (no monitored by fail2ban) (Optional)
-```
-fail2ban_ignoreip: "127.0.0.1/8 ::1"
-```
-
-`fail2ban_maxretry`: Number of acceptable failures before banning an IP (Optional)
-```
-fail2ban_maxretry: "5"
-```
-
-`fail2ban_bantime`: Duration of bans (Optional)
-```
-fail2ban_bantime: "10m"
-```
-
 `fail2ban_email_from`: Email sender of the fail2ban reports (Optional)
 ```
 fail2ban_email_from: "root@localhost"
@@ -31,8 +16,3 @@ fail2ban_email_from: "root@localhost"
 ```
 fail2ban_email_to: "noreply@example.com"
 ```
-
-`fail2ban_action`: Define the default action to do when a ban occurs ("action_mwl" to send whois and logs via email or "action_" for default) (Optional)
-```
-fail2ban_action: "action_mwl"
-```
diff --git a/roles/fail2ban/defaults/main.yml b/roles/fail2ban/defaults/main.yml
index f444c35aa3c31d044d8a76675091ffa9f29bec9c..263fbd0a16a6efb67ffb54c7b58a503bf80f7899 100644
--- a/roles/fail2ban/defaults/main.yml
+++ b/roles/fail2ban/defaults/main.yml
@@ -1,8 +1,9 @@
 ---
-fail2ban_ignoreip: "127.0.0.1/8 ::1"
-fail2ban_maxretry: "5"
-fail2ban_bantime: "10m"
+
+# Email sender of the fail2ban reports
 fail2ban_email_from: "root@localhost"
+
+# Email reciepient of the fail2ban reports
 fail2ban_email_to: "noreply@example.com"
-fail2ban_action: "action_mwl"
+
 ...
diff --git a/roles/fail2ban/tasks/base.yml b/roles/fail2ban/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..63913f65c89e4996eddd03f7de289d855e94e3db
--- /dev/null
+++ b/roles/fail2ban/tasks/base.yml
@@ -0,0 +1,26 @@
+---
+
+- name: create fail2ban path-overrides
+  notify: restart fail2ban
+  ansible.builtin.copy:
+    dest: /etc/fail2ban/paths-overrides.local
+    mode: "644"
+    force: false
+    content: |
+      [DEFAULT]
+
+- name: configure sshd jail backend
+  notify: restart fail2ban
+  ansible.builtin.lineinfile:
+    path: /etc/fail2ban/paths-overrides.local
+    regexp: '^sshd_backend'
+    insertafter: '[DEFAULT]'
+    line: 'sshd_backend = systemd'
+
+- name: service
+  ansible.builtin.systemd:
+    name: fail2ban
+    enabled: true
+    state: started
+
+...
diff --git a/roles/fail2ban/tasks/configure.yml b/roles/fail2ban/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b770c46711f7cd875fed43a235e557f548181354
--- /dev/null
+++ b/roles/fail2ban/tasks/configure.yml
@@ -0,0 +1,9 @@
+---
+
+- name: CONFIGURE FAIL2BAN
+  ansible.builtin.include_tasks: "configure/f2b-configure.yml"
+  when:
+    - fail2ban_email_from is defined
+    - fail2ban_email_to is defined
+
+...
diff --git a/roles/fail2ban/tasks/configure/f2b-configure.yml b/roles/fail2ban/tasks/configure/f2b-configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..31b372240424309ef470f7b9f90a59fdce440cdf
--- /dev/null
+++ b/roles/fail2ban/tasks/configure/f2b-configure.yml
@@ -0,0 +1,10 @@
+---
+
+- name: jail defaults
+  notify: restart fail2ban
+  ansible.builtin.template:
+    src: jail.local.j2
+    dest: /etc/fail2ban/jail.local
+    mode: "644"
+
+...
diff --git a/roles/fail2ban/tasks/install.yml b/roles/fail2ban/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c04fb735b05ca4ddb97df8a1cef7924d74592d39
--- /dev/null
+++ b/roles/fail2ban/tasks/install.yml
@@ -0,0 +1,13 @@
+---
+
+- name: packages
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ fail2ban_packages }}"
+    state: present
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/fail2ban/tasks/main.yml b/roles/fail2ban/tasks/main.yml
index f6f94368e439bc55f78a25d189ee34ab99c70b47..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/fail2ban/tasks/main.yml
+++ b/roles/fail2ban/tasks/main.yml
@@ -1,43 +1,33 @@
 ---
 
-- name: packages
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ fail2ban_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: create fail2ban path-overrides
-  notify: restart fail2ban
-  ansible.builtin.copy:
-    dest: /etc/fail2ban/paths-overrides.local
-    mode: "644"
-    force: false
-    content: |
-      [DEFAULT]
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
-- name: configure sshd jail backend
-  notify: restart fail2ban
-  ansible.builtin.lineinfile:
-    path: /etc/fail2ban/paths-overrides.local
-    regexp: '^sshd_backend'
-    insertafter: '[DEFAULT]'
-    line: 'sshd_backend = systemd'
-
-- name: jail defaults
-  notify: restart fail2ban
-  ansible.builtin.template:
-    src: jail.local.j2
-    dest: /etc/fail2ban/jail.local
-    mode: "644"
-
-- name: service
-  ansible.builtin.systemd:
-    name: fail2ban
-    enabled: true
-    state: started
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/fail2ban/vars/main.yml b/roles/fail2ban/vars/main.yml
index 16534e5cf7aa6d4a74165e5464b1345591e8a516..219a875217e927cc8aa5b2ea892cd90c33bc8ffd 100644
--- a/roles/fail2ban/vars/main.yml
+++ b/roles/fail2ban/vars/main.yml
@@ -1,5 +1,20 @@
 ---
+
+# Packages required for the group. python3-systemd is used to read systemd journal
 fail2ban_packages:
   - fail2ban
-  - python3-systemd  # used to read systemd journal
+  - python3-systemd
+
+# IP addresses exceptions (no monitored by fail2ban)
+fail2ban_ignoreip: "127.0.0.1/8 ::1"
+
+# Number of acceptable failures before banning an IP
+fail2ban_maxretry: "5"
+
+# Duration of bans
+fail2ban_bantime: "10m"
+
+# Define the default action to do when a ban occurs ("action_mwl" to send whois and logs via email or "action_" for default)
+fail2ban_action: "action_mwl"
+
 ...
diff --git a/roles/haproxy/README.md b/roles/haproxy/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5cf78aebbfc1a190550d544efcb379ebb120361e
--- /dev/null
+++ b/roles/haproxy/README.md
@@ -0,0 +1,4 @@
+# Haproxy
+## Description
+
+Install and configure the HAProxy reverse-proxy/loadbalancer in front of PostgreSQL
diff --git a/roles/haproxy/tasks/base.yml b/roles/haproxy/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5af3d299f20a78b6b4d8f4baccfcc97add77cf7a
--- /dev/null
+++ b/roles/haproxy/tasks/base.yml
@@ -0,0 +1,13 @@
+---
+
+- name: configure
+  notify: reload haproxy
+  ansible.builtin.template:
+    src: haproxy.cfg.j2
+    dest: /etc/haproxy/haproxy.cfg
+    backup: true
+    mode: "644"
+
+- ansible.builtin.meta: flush_handlers # noqa name[missing]
+
+...
diff --git a/roles/haproxy/tasks/install.yml b/roles/haproxy/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..01543ad29c92018a7b3572ceb8d1fad0b35eba85
--- /dev/null
+++ b/roles/haproxy/tasks/install.yml
@@ -0,0 +1,12 @@
+---
+
+- name: install packages
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ hap_packages }}"
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
index 571ae67790cf2e25c85057d023b4e4fc49e74f8f..5489c96f5fdafb340fd2c1d566cb768977d6e6f9 100644
--- a/roles/haproxy/tasks/main.yml
+++ b/roles/haproxy/tasks/main.yml
@@ -1,22 +1,23 @@
 ---
 
-- name: install packages
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ hap_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: configure
-  notify: reload haproxy
-  ansible.builtin.template:
-    src: haproxy.cfg.j2
-    dest: /etc/haproxy/haproxy.cfg
-    backup: true
-    mode: "644"
-
-- ansible.builtin.meta: flush_handlers # noqa name[missing]
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
 ...
diff --git a/roles/haproxy/vars/main.yml b/roles/haproxy/vars/main.yml
index 7988cb1be98bd84357f86426a60be612d844a567..777fb0de5c2018f3ec6b107af62b387eed09f958 100644
--- a/roles/haproxy/vars/main.yml
+++ b/roles/haproxy/vars/main.yml
@@ -1,6 +1,9 @@
 ---
+
+# Packages required for the group
 hap_packages:
   - haproxy
   - rsyslog
   - logrotate
+
 ...
diff --git a/roles/init/README.md b/roles/init/README.md
deleted file mode 100644
index e99190af009915f4951ff835219a8cbd6e168b11..0000000000000000000000000000000000000000
--- a/roles/init/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Init
-## Description
-
-Used by the "base" metagroup to provide basic utilities and initialise a mandatory proxy if necessary
diff --git a/roles/init/vars/main.yml b/roles/init/vars/main.yml
deleted file mode 100644
index d54a05b9f4b817197b57553c67d988e7096824bd..0000000000000000000000000000000000000000
--- a/roles/init/vars/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-init_packages:
-  - apt-utils
-  - gnupg
-  - ssh-client
-  - sudo
-...
diff --git a/roles/letsencrypt/README.md b/roles/letsencrypt/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..13a130edb9e2d5272bbbdac8491d9b9e5b11be95
--- /dev/null
+++ b/roles/letsencrypt/README.md
@@ -0,0 +1,23 @@
+# Letsencrypt
+## Description
+
+Install and configure Let's Encrypt tools to generate and maintain Let's Encrypt SSL certificates for the webdomains
+
+## Role Variables
+
+Available variables are listed below, along with the descriptions and the default values.
+
+`letsencrypt_domains`: List the domain to configure with a let's encrypt certificate. If an empty list is provided, every domain found in nginx is configured. (Optional)
+```
+letsencrypt_domains: []
+```
+
+`letsencrypt_email`: Email of the Let's Encrypt SSL certificates administrator(s)
+```
+letsencrypt_email: "admin@example.com"
+```
+
+`letsencrypt_webroot`: Default Let's Encrypt web root folder for challenges publication (Optional)
+```
+letsencrypt_webroot: "/var/www/letsencrypt"
+```
diff --git a/roles/letsencrypt/defaults/main.yml b/roles/letsencrypt/defaults/main.yml
index 344c36ec6552e835256ea26015bebcee9283b158..0c0c93a64029a5c4d55a1b9c5f40037077a90410 100644
--- a/roles/letsencrypt/defaults/main.yml
+++ b/roles/letsencrypt/defaults/main.yml
@@ -1,7 +1,12 @@
 ---
 
+# List the domain to configure with a let's encrypt certificate. If an empty list is provided, every domain found in nginx is configured.
 letsencrypt_domains: []
-letsencrypt_webroot: "/var/www/letsencrypt"
+
+# Email of the Let's Encrypt SSL certificates administrator(s)
 letsencrypt_email: "admin@example.com"
 
+# Default Let's Encrypt web root folder for challenges publication
+letsencrypt_webroot: "/var/www/letsencrypt"
+
 ...
diff --git a/roles/letsencrypt/tasks/configure.yml b/roles/letsencrypt/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b1d8e29a06d6a0f6ef68abde6289b963719f7c73
--- /dev/null
+++ b/roles/letsencrypt/tasks/configure.yml
@@ -0,0 +1,10 @@
+---
+
+- name: CONFIGURE UBICAST-TESTER
+  ansible.builtin.include_tasks: "configure/letsencrypt-configure.yml"
+  when:
+    - letsencrypt_domains is defined
+    - letsencrypt_email is defined
+    - letsencrypt_webroot is defined
+
+...
diff --git a/roles/letsencrypt/tasks/configure/letsencrypt-configure.yml b/roles/letsencrypt/tasks/configure/letsencrypt-configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2cc4c3b4777fd6190e8c73d4013caec4d9df697e
--- /dev/null
+++ b/roles/letsencrypt/tasks/configure/letsencrypt-configure.yml
@@ -0,0 +1,127 @@
+---
+
+- name: get all server_name values
+  when: letsencrypt_domains == []
+  changed_when: false
+  register: letsencryt_nginx_output
+  ansible.builtin.shell:
+    executable: /bin/bash
+    cmd: >
+      set -o pipefail;
+      nginx -T 2>&1 | grep -v localhost | grep -P '^\s+server_name\s+.*;$' | sed -r 's/\s+server_name\s+(.*);/\1/' | uniq
+
+- name: save result as list
+  when: letsencrypt_domains == []
+  ansible.builtin.set_fact:
+    letsencrypt_domains: "{{ letsencryt_nginx_output.stdout.split() }}"
+
+- name: save domains list in a file
+  register: letsencrypt_save_list
+  ansible.builtin.copy:
+    dest: /etc/letsencrypt/domains.txt
+    content: |
+      {% for domain in letsencrypt_domains %}
+      {{ domain }}
+      {% endfor %}
+    mode: "644"
+
+- name: create webroot directory
+  ansible.builtin.file:
+    path: "{{ letsencrypt_webroot }}"
+    state: directory
+    mode: "755"
+
+- name: create pre hook directory
+  ansible.builtin.file:
+    path: /etc/letsencrypt/renewal-hooks/pre
+    state: directory
+    mode: "755"
+
+- name: create pre hook script
+  ansible.builtin.copy:
+    dest: /etc/letsencrypt/renewal-hooks/pre/mkdir
+    mode: "0755"
+    content: |
+      #!/usr/bin/env bash
+      CERTBOT_DOCROOT=/var/www/letsencrypt
+      mkdir -p "$CERTBOT_DOCROOT"
+      chmod 755 "$CERTBOT_DOCROOT"
+
+- name: create deploy hook directory
+  ansible.builtin.file:
+    path: /etc/letsencrypt/renewal-hooks/deploy
+    state: directory
+    mode: "755"
+
+- name: create deploy hook script
+  ansible.builtin.copy:
+    dest: /etc/letsencrypt/renewal-hooks/deploy/nginx
+    mode: "0755"
+    content: |
+      #!/usr/bin/env bash
+      systemctl reload nginx
+
+- name: test generate certificates
+  when:
+    - letsencrypt_domains != []
+    - letsencrypt_save_list is changed
+  register: letsencrypt_dry_run
+  ignore_errors: true
+  changed_when: false
+  ansible.builtin.command:
+    cmd: >
+      certbot certonly
+        --dry-run
+        -n --agree-tos -m {{ letsencrypt_email }}
+        --webroot -w {{ letsencrypt_webroot }}
+        --expand
+        -d {{ letsencrypt_domains | join(',') }}
+
+- name: remove domains list file in case of failure
+  when: letsencrypt_dry_run is failed
+  ansible.builtin.file:
+    path: "{{ letsencrypt_save_list.dest }}"
+    state: absent
+
+- name: exit in case of failure
+  when: letsencrypt_dry_run is failed
+  ansible.builtin.fail:
+
+- name: generate certificates
+  notify: restart nginx
+  when:
+    - letsencrypt_domains != []
+    - letsencrypt_save_list is changed
+    - letsencrypt_dry_run is succeeded
+  ansible.builtin.command:
+    cmd: >
+      certbot certonly
+        -n --agree-tos -m {{ letsencrypt_email }}
+        --webroot -w {{ letsencrypt_webroot }}
+        --expand
+        -d {{ letsencrypt_domains | join(',') }}
+    creates: "/etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/privkey.pem"
+
+- name: update nginx certificate configuration
+  when:
+    - letsencrypt_domains != []
+    - letsencrypt_save_list is changed
+    - letsencrypt_dry_run is succeeded
+  notify: restart nginx
+  ansible.builtin.lineinfile:
+    path: /etc/nginx/conf.d/ssl_certificate.conf
+    regexp: 'ssl_certificate\s+([\w/\-\_\.]+);'
+    line: ssl_certificate /etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/fullchain.pem;
+
+- name: update nginx certificate key configuration
+  when:
+    - letsencrypt_domains != []
+    - letsencrypt_save_list is changed
+    - letsencrypt_dry_run is succeeded
+  notify: restart nginx
+  ansible.builtin.lineinfile:
+    path: /etc/nginx/conf.d/ssl_certificate.conf
+    regexp: 'ssl_certificate_key\s+([\w/\-\_\.]+);'
+    line: ssl_certificate_key /etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/privkey.pem;
+
+...
diff --git a/roles/letsencrypt/tasks/install.yml b/roles/letsencrypt/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d7d96d4480c865f7047634606c8c397bb41d73eb
--- /dev/null
+++ b/roles/letsencrypt/tasks/install.yml
@@ -0,0 +1,9 @@
+---
+
+- name: install certbot
+  ansible.builtin.package:
+    force_apt_get: true
+    install_recommends: false
+    name: certbot
+
+...
diff --git a/roles/letsencrypt/tasks/main.yml b/roles/letsencrypt/tasks/main.yml
index 711ebf43558323503a3a4a8afb2777a2492be882..4ca9b1ce59bf99a101c275a40afab2c0fc69e94a 100644
--- a/roles/letsencrypt/tasks/main.yml
+++ b/roles/letsencrypt/tasks/main.yml
@@ -1,133 +1,23 @@
 ---
 
-- name: install certbot
-  ansible.builtin.package:
-    force_apt_get: true
-    install_recommends: false
-    name: certbot
-
-- name: get all server_name values
-  when: letsencrypt_domains == []
-  changed_when: false
-  register: letsencryt_nginx_output
-  ansible.builtin.shell:
-    executable: /bin/bash
-    cmd: >
-      set -o pipefail;
-      nginx -T 2>&1 | grep -v localhost | grep -P '^\s+server_name\s+.*;$' | sed -r 's/\s+server_name\s+(.*);/\1/' | uniq
-
-- name: save result as list
-  when: letsencrypt_domains == []
-  ansible.builtin.set_fact:
-    letsencrypt_domains: "{{ letsencryt_nginx_output.stdout.split() }}"
-
-- name: save domains list in a file
-  register: letsencrypt_save_list
-  ansible.builtin.copy:
-    dest: /etc/letsencrypt/domains.txt
-    content: |
-      {% for domain in letsencrypt_domains %}
-      {{ domain }}
-      {% endfor %}
-    mode: "644"
-
-- name: create webroot directory
-  ansible.builtin.file:
-    path: "{{ letsencrypt_webroot }}"
-    state: directory
-    mode: "755"
-
-- name: create pre hook directory
-  ansible.builtin.file:
-    path: /etc/letsencrypt/renewal-hooks/pre
-    state: directory
-    mode: "755"
-
-- name: create pre hook script
-  ansible.builtin.copy:
-    dest: /etc/letsencrypt/renewal-hooks/pre/mkdir
-    mode: "0755"
-    content: |
-      #!/usr/bin/env bash
-      CERTBOT_DOCROOT=/var/www/letsencrypt
-      mkdir -p "$CERTBOT_DOCROOT"
-      chmod 755 "$CERTBOT_DOCROOT"
-
-- name: create deploy hook directory
-  ansible.builtin.file:
-    path: /etc/letsencrypt/renewal-hooks/deploy
-    state: directory
-    mode: "755"
-
-- name: create deploy hook script
-  ansible.builtin.copy:
-    dest: /etc/letsencrypt/renewal-hooks/deploy/nginx
-    mode: "0755"
-    content: |
-      #!/usr/bin/env bash
-      systemctl reload nginx
-
-- name: test generate certificates
-  when:
-    - letsencrypt_domains != []
-    - letsencrypt_save_list is changed
-  register: letsencrypt_dry_run
-  ignore_errors: true
-  changed_when: false
-  ansible.builtin.command:
-    cmd: >
-      certbot certonly
-        --dry-run
-        -n --agree-tos -m {{ letsencrypt_email }}
-        --webroot -w {{ letsencrypt_webroot }}
-        --expand
-        -d {{ letsencrypt_domains | join(',') }}
-
-- name: remove domains list file in case of failure
-  when: letsencrypt_dry_run is failed
-  ansible.builtin.file:
-    path: "{{ letsencrypt_save_list.dest }}"
-    state: absent
-
-- name: exit in case of failure
-  when: letsencrypt_dry_run is failed
-  ansible.builtin.fail:
-
-- name: generate certificates
-  notify: restart nginx
-  when:
-    - letsencrypt_domains != []
-    - letsencrypt_save_list is changed
-    - letsencrypt_dry_run is succeeded
-  ansible.builtin.command:
-    cmd: >
-      certbot certonly
-        -n --agree-tos -m {{ letsencrypt_email }}
-        --webroot -w {{ letsencrypt_webroot }}
-        --expand
-        -d {{ letsencrypt_domains | join(',') }}
-    creates: "/etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/privkey.pem"
-
-- name: update nginx certificate configuration
-  when:
-    - letsencrypt_domains != []
-    - letsencrypt_save_list is changed
-    - letsencrypt_dry_run is succeeded
-  notify: restart nginx
-  ansible.builtin.lineinfile:
-    path: /etc/nginx/conf.d/ssl_certificate.conf
-    regexp: 'ssl_certificate\s+([\w/\-\_\.]+);'
-    line: ssl_certificate /etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/fullchain.pem;
-
-- name: update nginx certificate key configuration
-  when:
-    - letsencrypt_domains != []
-    - letsencrypt_save_list is changed
-    - letsencrypt_dry_run is succeeded
-  notify: restart nginx
-  ansible.builtin.lineinfile:
-    path: /etc/nginx/conf.d/ssl_certificate.conf
-    regexp: 'ssl_certificate_key\s+([\w/\-\_\.]+);'
-    line: ssl_certificate_key /etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/privkey.pem;
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
+
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/live/defaults/main.yml b/roles/live/defaults/main.yml
index 4ba88a44293618714d3f82599ddf935adfd06019..4a7c8350bfad28a290160024f7cfa99016636b9e 100644
--- a/roles/live/defaults/main.yml
+++ b/roles/live/defaults/main.yml
@@ -1,5 +1,12 @@
 ---
-live_ha: false
+
+# Define if the deployment is an HA architecture (i.e. live domain is not handle by nudgis frontend server)
+live_ha: False
+
+# Domain name of the live cluster (only if distinct live server(s) from MediaServer and live_ha variable is set to True)
 live_domain: "live.example.com"
+
+# Size of the tmpfs storing the live chunks (unit g or m and only if distinct live server(s) from MediaServer)
 live_tmpfs_size: "2048m"
+
 ...
diff --git a/roles/live/tasks/base.yml b/roles/live/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..014d6e3ed29fd1de8d0bf91123f5f5544ec547f0
--- /dev/null
+++ b/roles/live/tasks/base.yml
@@ -0,0 +1,17 @@
+---
+
+# not working with a tmpfs (mode=777, user=group=root)
+# - name: Changing the rights on the TMPFS directory
+#   notify: restart nginx
+#   ansible.builtin.file:
+#     path: /var/tmp/nginx-rtmp
+#     owner: nginx
+#     group: root
+#     mode: "0700"
+
+- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+...
diff --git a/roles/live/tasks/configure.yml b/roles/live/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..40f8073162876b6946b135600f69680cdd2171d3
--- /dev/null
+++ b/roles/live/tasks/configure.yml
@@ -0,0 +1,8 @@
+---
+
+- name: SETUP LIVE TMPFS PARTITION
+  ansible.builtin.include_tasks: "configure/tmpfs.yml"
+  when:
+    - live_tmpfs_size is defined
+
+...
diff --git a/roles/live/tasks/configure/tmpfs.yml b/roles/live/tasks/configure/tmpfs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..de1a08c68e00d7319cf5fd448f3a4879137d0c71
--- /dev/null
+++ b/roles/live/tasks/configure/tmpfs.yml
@@ -0,0 +1,12 @@
+---
+
+- name: TMPFS creation to store the live chunks
+  notify: restart nginx
+  ansible.posix.mount:
+    path: /var/tmp/nginx-rtmp
+    src: tmpfs
+    fstype: tmpfs
+    opts: defaults,size={{ live_tmpfs_size }}
+    state: mounted
+
+...
diff --git a/roles/live/tasks/install.yml b/roles/live/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..95fa504816b7be8e6d8d9ae4f737b179ef7fa309
--- /dev/null
+++ b/roles/live/tasks/install.yml
@@ -0,0 +1,13 @@
+---
+
+- name: Live packages installation
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ live_packages }}"
+    state: present
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/live/tasks/main.yml b/roles/live/tasks/main.yml
index e0968abad6f041335add7d1ea51df377a7d34a5d..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/live/tasks/main.yml
+++ b/roles/live/tasks/main.yml
@@ -1,36 +1,33 @@
 ---
 
-- name: Live packages installation
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ live_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: TMPFS creation to store the live chunks
-  notify: restart nginx
-  ansible.posix.mount:
-    path: /var/tmp/nginx-rtmp
-    src: tmpfs
-    fstype: tmpfs
-    opts: defaults,size={{ live_tmpfs_size }}
-    state: mounted
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
-# not working with a tmpfs (mode=777, user=group=root)
-# - name: Changing the rights on the TMPFS directory
-#   notify: restart nginx
-#   ansible.builtin.file:
-#     path: /var/tmp/nginx-rtmp
-#     owner: nginx
-#     group: root
-#     mode: "0700"
-
-- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
-
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/live/vars/main.yml b/roles/live/vars/main.yml
index a5b2a9e37fd2271f811cbf26c5b5cb490c28f98b..6dcaded94d453b9d1c0fd09b3483f722778cffc2 100644
--- a/roles/live/vars/main.yml
+++ b/roles/live/vars/main.yml
@@ -1,8 +1,12 @@
 ---
 
+# Packages required for the group
 live_packages:
   - ubicast-live
 
-firewall_rules_files: ['http', 'rtmp']
+# Group firewall rules filename, see roles/shared/files/nftables/
+firewall_rules_files:
+  - http
+  - rtmp
 
 ...
diff --git a/roles/lxc/README.md b/roles/lxc/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7d041342f41fc6ed77b595a1f268f61cfb83fc80
--- /dev/null
+++ b/roles/lxc/README.md
@@ -0,0 +1,13 @@
+# Lxc
+## Description
+
+Used by netcapture for LXC installation/configuration
+
+## Role Variables
+
+Available variables are listed below, along with the descriptions and the default values.
+
+`lxc_network_type`: LXC network type
+```
+lxc_network_type: "masquerade_bridge"
+```
diff --git a/roles/lxc/defaults/main.yml b/roles/lxc/defaults/main.yml
index 4c7f5fdb72e2ee4fb9ead5c54e022411f9f8360e..b4a9e33c6c1da212ddda14fe79a05cf82e36a971 100644
--- a/roles/lxc/defaults/main.yml
+++ b/roles/lxc/defaults/main.yml
@@ -1,8 +1,6 @@
 ---
 
-# lxc_network_type possible value:
-#  - masquerade_bridge => independent private bridge
-#  - host_bridge => host shared network bridge
-lxc_network_type: masquerade_bridge
+# LXC network type
+lxc_network_type: "masquerade_bridge"
 
 ...
diff --git a/roles/lxc/tasks/base.yml b/roles/lxc/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b873969f02522b38204758c6a73813b868ee56c7
--- /dev/null
+++ b/roles/lxc/tasks/base.yml
@@ -0,0 +1,47 @@
+---
+
+- name: Host bridge configuration
+  when: lxc_network_type == 'host_bridge'
+  block:
+    - name: Masquerade bridge configuration disabling
+      notify: restart lxc-net
+      ansible.builtin.copy:
+        src: lxc-net.host_bridge
+        dest: /etc/default/lxc-net
+        mode: "644"
+
+    - name: Ask confirmation
+      ansible.builtin.pause:
+        prompt: |
+          -------------------------------------------------------------------------------------------
+          ! WARNING !
+          Host bridge configuration must be done manually, and named: br0
+          Documentation (section host device as bridge): https://wiki.debian.org/LXC/SimpleBridge
+          Continue (yes/no)
+          -------------------------------------------------------------------------------------------
+      register: confirm_continue
+      no_log: true
+
+    - name: check parm is null or invalid
+      ansible.builtin.fail:
+        msg: 'Installation aborted'
+      when: not ((confirm_continue.user_input | bool) or (confirm_continue.user_input | length == 0))
+
+- name: Masquerade bridge configuration
+  when: lxc_network_type == 'masquerade_bridge'
+  block:
+    - name: Container network configuration
+      notify: restart lxc-net
+      ansible.builtin.copy:
+        src: lxc-net.masquerade_bridge
+        dest: /etc/default/lxc-net
+        mode: "644"
+
+- name: Default container configuration
+  notify: restart lxc
+  ansible.builtin.template:
+    src: lxc-default.j2
+    dest: /etc/lxc/default.conf
+    mode: "644"
+
+...
diff --git a/roles/lxc/tasks/install.yml b/roles/lxc/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1b685aac0a44bf48f20226060df4b9d7efe91d48
--- /dev/null
+++ b/roles/lxc/tasks/install.yml
@@ -0,0 +1,15 @@
+---
+
+- name: LXC packages installation
+  ansible.builtin.apt:
+    force_apt_get: true
+    name:
+      - lxc
+      - lxcfs
+      - bridge-utils
+    state: present
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/lxc/tasks/main.yml b/roles/lxc/tasks/main.yml
index c758b5e4795a01574a1a73d070c26461cc3ce4a9..5489c96f5fdafb340fd2c1d566cb768977d6e6f9 100644
--- a/roles/lxc/tasks/main.yml
+++ b/roles/lxc/tasks/main.yml
@@ -1,59 +1,23 @@
 ---
 
-- name: LXC packages installation
-  ansible.builtin.apt:
-    force_apt_get: true
-    name:
-      - lxc
-      - lxcfs
-      - bridge-utils
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: Host bridge configuration
-  when: lxc_network_type == 'host_bridge'
-  block:
-    - name: Masquerade bridge configuration disabling
-      notify: restart lxc-net
-      ansible.builtin.copy:
-        src: lxc-net.host_bridge
-        dest: /etc/default/lxc-net
-        mode: "644"
-
-    - name: Ask confirmation
-      ansible.builtin.pause:
-        prompt: |
-          -------------------------------------------------------------------------------------------
-          ! WARNING !
-          Host bridge configuration must be done manually, and named: br0
-          Documentation (section host device as bridge): https://wiki.debian.org/LXC/SimpleBridge
-          Continue (yes/no)
-          -------------------------------------------------------------------------------------------
-      register: confirm_continue
-      no_log: true
-
-    - name: check parm is null or invalid
-      ansible.builtin.fail:
-        msg: 'Installation aborted'
-      when: not ((confirm_continue.user_input | bool) or (confirm_continue.user_input | length == 0))
-
-- name: Masquerade bridge configuration
-  when: lxc_network_type == 'masquerade_bridge'
-  block:
-    - name: Container network configuration
-      notify: restart lxc-net
-      ansible.builtin.copy:
-        src: lxc-net.masquerade_bridge
-        dest: /etc/default/lxc-net
-        mode: "644"
-
-- name: Default container configuration
-  notify: restart lxc
-  ansible.builtin.template:
-    src: lxc-default.j2
-    dest: /etc/lxc/default.conf
-    mode: "644"
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
 ...
diff --git a/roles/mediacache/README.md b/roles/mediacache/README.md
index 35954ea17aaf0ce5541bd9be586fb91d0628fc48..f38c77601ab7a34d792d8a2e3f3135ceab6cb3ea 100644
--- a/roles/mediacache/README.md
+++ b/roles/mediacache/README.md
@@ -1,7 +1,7 @@
 # Mediacache
 ## Description
 
-The cache group is used to configure all hosts that will server as a proxy cache of live and/or vod.
+The Nudgis cache group is used to configure all hosts that will server as a proxy cache of live and/or vod.
 
 ## Role Variables
 
diff --git a/roles/mediacache/defaults/main.yml b/roles/mediacache/defaults/main.yml
index 09db9281787571d5cadb9e91b7f4ea926773bd60..403f1d7ae1281df941e2a4722c52cc48ad592697 100644
--- a/roles/mediacache/defaults/main.yml
+++ b/roles/mediacache/defaults/main.yml
@@ -1,15 +1,24 @@
 ---
+
+# URL of the Nudgis Cache vhost
 cache_domain: "cache.example.com"
+
+# URL of Nudgis Front cluster
 nudgis_front_domain: "nudgis.example.com"
-live_domain: "nudgis.example.com"
 
-# MediaCache data folder - for VOD
+# URL of the Nudgis Live cluster
+live_domain: "live.example.com"
+
+# Path of the folder to cache the VOD service data
 cache_vod_folder: "/var/cache/nginx/mediacache-vod"
-# MediaCache size in Gb - for VOD
+
+# Max size allowed for the VOD service data
 cache_vod_size: "1"
 
-# MediaCache data folder - for live
+# Path of the folder to cache the Live service data
 cache_live_folder: "/var/cache/nginx/mediacache-live"
-# MediaCache size in Gb - for live
+
+# Max size allowed for the Live service data
 cache_live_size: "1"
+
 ...
diff --git a/roles/mediacache/tasks/base.yml b/roles/mediacache/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0c9a935818c471a9cd231c7ad87d3eed75fc3b67
--- /dev/null
+++ b/roles/mediacache/tasks/base.yml
@@ -0,0 +1,22 @@
+---
+
+- name: Ensure /etc/munin/plugin-conf.d/ directory exist
+  ansible.builtin.file:
+    dest: /etc/munin/plugin-conf.d
+    state: directory
+    mode: "750"
+
+- name: Configure mediacache nginx monitoring plugin
+  ansible.builtin.copy:
+    content: |
+      [nginx_ubicast_multi]
+      env.path_filters *.ts *.m3u8
+    dest: /etc/munin/plugin-conf.d/nginx_ubicast_multi
+    mode: "644"
+
+- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+...
diff --git a/roles/mediacache/tasks/configure.yml b/roles/mediacache/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ea4b7ba0ef7c368c5758dff33b78f365e81f0fb9
--- /dev/null
+++ b/roles/mediacache/tasks/configure.yml
@@ -0,0 +1,35 @@
+---
+
+- name: CONFIGURE LOCALHOST NAME RESOLUTION
+  ansible.builtin.include_tasks: "configure/local-resolution.yml"
+  when:
+    - cache_domain is defined
+
+- name: CONFIGURE VOD FOLDER
+  ansible.builtin.include_tasks: "configure/vod.yml"
+  when:
+    - cache_vod_folder is defined
+    - cache_vod_size is defined
+
+- name: CONFIGURE LIVE FOLDER
+  ansible.builtin.include_tasks: "configure/live.yml"
+  when:
+    - cache_live_folder is defined
+    - cache_live_size is defined
+
+- name: CONFIGURE NUDGIS CACHE VHOST
+  ansible.builtin.include_tasks: "configure/nginx-vhost.yml"
+  when:
+    - cache_domain is defined
+
+- name: CONFIGURE VOD UPSTREAM DOMAIN
+  ansible.builtin.include_tasks: "configure/nginx-vod-upstream.yml"
+  when:
+    - nudgis_front_domain is defined
+
+- name: CONFIGURE LIVE UPSTREAM DOMAIN
+  ansible.builtin.include_tasks: "configure/nginx-live-upstream.yml"
+  when:
+    - live_domain is defined
+
+...
diff --git a/roles/mediacache/tasks/configure/live.yml b/roles/mediacache/tasks/configure/live.yml
new file mode 100644
index 0000000000000000000000000000000000000000..36b4ae3e592db7274da25fe14ef047ccd5c0de08
--- /dev/null
+++ b/roles/mediacache/tasks/configure/live.yml
@@ -0,0 +1,26 @@
+---
+
+- name: create mediacache live data directory
+  ansible.builtin.file:
+    dest: "{{ cache_live_folder }}"
+    state: directory
+    owner: nginx
+    group: root
+    mode: "0700"
+  when: live_domain is defined
+
+- name: fill the mediacache zones file - Live folder
+  notify: restart nginx
+  ansible.builtin.replace:
+    path: /etc/mediacache/nginx-zones.conf
+    regexp: /var/cache/nginx/mediacache-live
+    replace: "{{ cache_live_folder }}"
+
+- name: fill the mediacache zones file - Live folder size
+  notify: restart nginx
+  ansible.builtin.replace:
+    path: /etc/mediacache/nginx-zones.conf
+    regexp: (?P<key>keys_zone=mediacache-live.*max_size=).*(?P<unit>g)
+    replace: \g<key>{{ cache_live_size }}\g<unit>
+
+...
diff --git a/roles/mediacache/tasks/configure/local-resolution.yml b/roles/mediacache/tasks/configure/local-resolution.yml
new file mode 100644
index 0000000000000000000000000000000000000000..021c46a111291fd4464972599962c98e4e6b958f
--- /dev/null
+++ b/roles/mediacache/tasks/configure/local-resolution.yml
@@ -0,0 +1,9 @@
+---
+
+- name: resolve domain name to localhost
+  ansible.builtin.lineinfile:
+    path: /etc/hosts
+    line: 127.0.1.1 {{ cache_domain }}
+    backup: true
+
+...
diff --git a/roles/mediacache/tasks/configure/nginx-live-upstream.yml b/roles/mediacache/tasks/configure/nginx-live-upstream.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e0ce241264a7f33c9e0b9ee09df18fd030293568
--- /dev/null
+++ b/roles/mediacache/tasks/configure/nginx-live-upstream.yml
@@ -0,0 +1,10 @@
+---
+
+- name: fill the nginx Live proxypass
+  notify: restart nginx
+  ansible.builtin.replace:
+    path: /etc/mediacache/nginx-proxy-live.conf
+    regexp: ^(proxy_pass)\s+.*(;)$
+    replace: \1 https://{{ live_domain }}\2
+
+...
diff --git a/roles/mediacache/tasks/configure/nginx-vhost.yml b/roles/mediacache/tasks/configure/nginx-vhost.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2e5f1424afab128cc6d8a5e85bbf3099372a6751
--- /dev/null
+++ b/roles/mediacache/tasks/configure/nginx-vhost.yml
@@ -0,0 +1,10 @@
+---
+
+- name: fill the vhost file
+  notify: restart nginx
+  ansible.builtin.replace:
+    path: /etc/nginx/sites-available/mediacache.conf
+    regexp: ^(\s+server_name)\s+.*(;)$
+    replace: \1 {{ cache_domain }}\2
+
+...
diff --git a/roles/mediacache/tasks/configure/nginx-vod-upstream.yml b/roles/mediacache/tasks/configure/nginx-vod-upstream.yml
new file mode 100644
index 0000000000000000000000000000000000000000..17bed7d3fa86d9affdfb07dfa7cf60150389e2e4
--- /dev/null
+++ b/roles/mediacache/tasks/configure/nginx-vod-upstream.yml
@@ -0,0 +1,10 @@
+---
+
+- name: fill the nginx VOD proxypass
+  notify: restart nginx
+  ansible.builtin.replace:
+    path: /etc/mediacache/nginx-proxy-mediaserver.conf
+    regexp: ^(proxy_pass)\s+.*(;)$
+    replace: \1 https://{{ nudgis_front_domain }}\2
+
+...
diff --git a/roles/mediacache/tasks/configure/vod.yml b/roles/mediacache/tasks/configure/vod.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5ce6f9db7556d5a74134868b8590eac04aef75fe
--- /dev/null
+++ b/roles/mediacache/tasks/configure/vod.yml
@@ -0,0 +1,25 @@
+---
+
+- name: create mediacache VOD data directory
+  ansible.builtin.file:
+    dest: "{{ cache_vod_folder }}"
+    state: directory
+    owner: nginx
+    group: root
+    mode: "0700"
+
+- name: fill the mediacache zones file - VOD folder
+  notify: restart nginx
+  ansible.builtin.replace:
+    path: /etc/mediacache/nginx-zones.conf
+    regexp: /var/cache/nginx/mediacache-vod
+    replace: "{{ cache_vod_folder }}"
+
+- name: fill the mediacache zones file - VOD folder size
+  notify: restart nginx
+  ansible.builtin.replace:
+    path: /etc/mediacache/nginx-zones.conf
+    regexp: (?P<key>keys_zone=mediacache-vod.*max_size=).*(?P<unit>g)
+    replace: \g<key>{{ cache_vod_size }}\g<unit>
+
+...
diff --git a/roles/mediacache/tasks/install.yml b/roles/mediacache/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e78f0f298d5661b2074754592e5f810d150ffb02
--- /dev/null
+++ b/roles/mediacache/tasks/install.yml
@@ -0,0 +1,13 @@
+---
+
+- name: MediaCache packages installation
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ cache_packages }}"
+    state: present
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/mediacache/tasks/main.yml b/roles/mediacache/tasks/main.yml
index bc44e591266a802537da5254e1359cf4462b28a4..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/mediacache/tasks/main.yml
+++ b/roles/mediacache/tasks/main.yml
@@ -1,106 +1,33 @@
 ---
 
-- name: MediaCache packages installation
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ cache_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: resolve domain name to localhost
-  ansible.builtin.lineinfile:
-    path: /etc/hosts
-    line: 127.0.1.1 {{ cache_domain }}
-    backup: true
-
-- name: create mediacache VOD data directory
-  ansible.builtin.file:
-    dest: "{{ cache_vod_folder }}"
-    state: directory
-    owner: nginx
-    group: root
-    mode: "0700"
-
-- name: create mediacache live data directory
-  ansible.builtin.file:
-    dest: "{{ cache_live_folder }}"
-    state: directory
-    owner: nginx
-    group: root
-    mode: "0700"
-  when: live_domain is defined
-
-- name: fill the vhost file
-  notify: restart nginx
-  ansible.builtin.replace:
-    path: /etc/nginx/sites-available/mediacache.conf
-    regexp: ^(\s+server_name)\s+.*(;)$
-    replace: \1 {{ cache_domain }}\2
-
-- name: fill the mediacache zones file - VOD folder
-  notify: restart nginx
-  ansible.builtin.replace:
-    path: /etc/mediacache/nginx-zones.conf
-    regexp: /var/cache/nginx/mediacache-vod
-    replace: "{{ cache_vod_folder }}"
-
-- name: fill the mediacache zones file - Live folder
-  notify: restart nginx
-  ansible.builtin.replace:
-    path: /etc/mediacache/nginx-zones.conf
-    regexp: /var/cache/nginx/mediacache-live
-    replace: "{{ cache_live_folder }}"
-
-- name: fill the mediacache zones file - VOD folder size
-  notify: restart nginx
-  ansible.builtin.replace:
-    path: /etc/mediacache/nginx-zones.conf
-    regexp: (?P<key>keys_zone=mediacache-vod.*max_size=).*(?P<unit>g)
-    replace: \g<key>{{ cache_vod_size }}\g<unit>
-
-- name: fill the mediacache zones file - Live folder size
-  notify: restart nginx
-  ansible.builtin.replace:
-    path: /etc/mediacache/nginx-zones.conf
-    regexp: (?P<key>keys_zone=mediacache-live.*max_size=).*(?P<unit>g)
-    replace: \g<key>{{ cache_live_size }}\g<unit>
-
-- name: fill the nginx VOD proxypass
-  notify: restart nginx
-  ansible.builtin.replace:
-    path: /etc/mediacache/nginx-proxy-mediaserver.conf
-    regexp: ^(proxy_pass)\s+.*(;)$
-    replace: \1 https://{{ nudgis_front_domain }}\2
-
-- name: fill the nginx Live proxypass
-  notify: restart nginx
-  ansible.builtin.replace:
-    path: /etc/mediacache/nginx-proxy-live.conf
-    regexp: ^(proxy_pass)\s+.*(;)$
-    replace: \1 https://{{ live_domain }}\2
-
-# MONITORING
-
-- name: Ensure /etc/munin/plugin-conf.d/ directory exist
-  ansible.builtin.file:
-    dest: /etc/munin/plugin-conf.d
-    state: directory
-    mode: "750"
-
-- name: Configure mediacache nginx monitoring plugin
-  ansible.builtin.copy:
-    content: |
-      [nginx_ubicast_multi]
-      env.path_filters *.ts *.m3u8
-    dest: /etc/munin/plugin-conf.d/nginx_ubicast_multi
-    mode: "644"
-
-- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
-
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
+
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
+
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/mediacache/vars/main.yml b/roles/mediacache/vars/main.yml
index 1a8e8ab7102887ff51ea25a3880971720474c576..11a58f22f093e5a790e859e885b98f7785138f90 100644
--- a/roles/mediacache/vars/main.yml
+++ b/roles/mediacache/vars/main.yml
@@ -1,8 +1,11 @@
 ---
 
+# Packages required for the group
 cache_packages:
   - ubicast-mediacache
 
-firewall_rules_files: ['http']
+# Group firewall rules filename, see roles/shared/files/nftables/
+firewall_rules_files:
+  - http
 
 ...
diff --git a/roles/mediaimport/README.md b/roles/mediaimport/README.md
index 5e75b5077dc0c05b336da08675e6660673e0e237..93cc3fcef0b99daa564a971ffdb3ba3ffc1ff6c3 100644
--- a/roles/mediaimport/README.md
+++ b/roles/mediaimport/README.md
@@ -1,4 +1,4 @@
 # Mediaimport
 ## Description
 
-The cache group is used to configure all hosts that allow video to be added to the Nudgis platform via an upload (sftp, ftp(s)).
+The Nudgis import group is used to configure all hosts that allow video to be added to the Nudgis platform via an upload (sftp, ftp(s)).
diff --git a/roles/mediaimport/tasks/base.yml b/roles/mediaimport/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..860060499f3956c362ab97e5e0383d926cd52a10
--- /dev/null
+++ b/roles/mediaimport/tasks/base.yml
@@ -0,0 +1,60 @@
+---
+
+- name: enable password login for ssh
+  notify: restart sshd
+  ansible.builtin.replace:
+    dest: /etc/ssh/sshd_config
+    regexp: ^PasswordAuthentication no
+    replace: "#PasswordAuthentication yes"
+
+# mediaimport service failed without any account configured
+- name: disable mediaimport service
+  ansible.builtin.systemd:
+    name: mediaimport
+    enabled: false
+
+- name: enable mediaimport-cleanup timer
+  ansible.builtin.systemd:
+    name: mediaimport-cleanup.timer
+    enabled: true
+
+- name: configure mediaimport admin emails
+  when:
+    - import_email_to | d(false)
+  notify: restart mediaimport
+  ansible.builtin.template:
+    src: mediaimport.json.j2
+    dest: /etc/mediaimport/mediaimport.json
+    backup: true
+    mode: "0600"
+
+# FAIL2BAN
+
+- name: deploy fail2ban pure-ftpd jail
+  notify: restart fail2ban
+  ansible.builtin.copy:
+    dest: /etc/fail2ban/jail.d/pure-ftpd.conf
+    mode: "0644"
+    content: |
+      [pure-ftpd]
+      enabled = true
+
+- name: configure pure-ftpd jail backend
+  notify: restart fail2ban
+  ansible.builtin.lineinfile:
+    path: /etc/fail2ban/paths-overrides.local
+    regexp: '^pureftpd_backend'
+    insertafter: '[DEFAULT]'
+    line: 'pureftpd_backend = systemd'
+
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+# FIREWALL
+
+- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+...
diff --git a/roles/mediaimport/tasks/install.yml b/roles/mediaimport/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6fed0540c8c8f428a12a3772031cc0b792914dda
--- /dev/null
+++ b/roles/mediaimport/tasks/install.yml
@@ -0,0 +1,9 @@
+---
+
+- name: install packages
+  ansible.builtin.package:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ import_packages }}"
+
+...
diff --git a/roles/mediaimport/tasks/main.yml b/roles/mediaimport/tasks/main.yml
index 73b40a33458e23ae0684c2122a22ac069eed1168..5489c96f5fdafb340fd2c1d566cb768977d6e6f9 100644
--- a/roles/mediaimport/tasks/main.yml
+++ b/roles/mediaimport/tasks/main.yml
@@ -1,66 +1,23 @@
 ---
 
-- name: install packages
-  ansible.builtin.package:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ import_packages }}"
-
-## MEDIAIMPORT
-
-- name: enable password login for ssh
-  notify: restart sshd
-  ansible.builtin.replace:
-    dest: /etc/ssh/sshd_config
-    regexp: ^PasswordAuthentication no
-    replace: "#PasswordAuthentication yes"
-
-# mediaimport service failed without any account configured
-- name: disable mediaimport service
-  ansible.builtin.systemd:
-    name: mediaimport
-    enabled: false
-
-- name: enable mediaimport-cleanup timer
-  ansible.builtin.systemd:
-    name: mediaimport-cleanup.timer
-    enabled: true
-
-- name: configure mediaimport admin emails
-  when:
-    - import_email_to | d(false)
-  notify: restart mediaimport
-  ansible.builtin.template:
-    src: mediaimport.json.j2
-    dest: /etc/mediaimport/mediaimport.json
-    backup: true
-    mode: "0600"
-
-# FAIL2BAN
-
-- name: deploy fail2ban pure-ftpd jail
-  notify: restart fail2ban
-  ansible.builtin.copy:
-    dest: /etc/fail2ban/jail.d/pure-ftpd.conf
-    mode: "0644"
-    content: |
-      [pure-ftpd]
-      enabled = true
-
-- name: configure pure-ftpd jail backend
-  notify: restart fail2ban
-  ansible.builtin.lineinfile:
-    path: /etc/fail2ban/paths-overrides.local
-    regexp: '^pureftpd_backend'
-    insertafter: '[DEFAULT]'
-    line: 'pureftpd_backend = systemd'
-
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
-
-- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
-
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
+
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
 ...
diff --git a/roles/mediaimport/vars/main.yml b/roles/mediaimport/vars/main.yml
index 06300591112f8b0a361268d3b55101f4986e567c..b3150b63dca13fe26034349f45dc8c5ef27e7edc 100644
--- a/roles/mediaimport/vars/main.yml
+++ b/roles/mediaimport/vars/main.yml
@@ -1,9 +1,12 @@
 ---
 
+# Packages required for the group. ssl-cert is used for optionnal FTPS support
 import_packages:
-  - ssl-cert # for optionnal FTPS support (the mediaimport postinst will will use the snakeoil certificate for pure-ftpd)
+  - ssl-cert
   - ubicast-mediaimport
 
-firewall_rules_files: ['ftp']
+# Group firewall rules filename, see roles/shared/files/nftables/
+firewall_rules_files:
+  - ftp
 
 ...
diff --git a/roles/mediaserver/README.md b/roles/mediaserver/README.md
index d9bf219c25771a35729a7b17e7f14a7192670c4f..177b15aa38573ace36f35756333b56bb7c8c0a1c 100644
--- a/roles/mediaserver/README.md
+++ b/roles/mediaserver/README.md
@@ -14,11 +14,6 @@ Available variables are listed below, along with the descriptions and the defaul
 nudgis_front_email_from: "noreply@{{ nudgis_front_domain }}"
 ```
 
-`nudgis_front_system_user`: Defines the default deployed Nudgis portal short name (and linked unix user) (Optional)
-```
-nudgis_front_system_user: "msuser"
-```
-
 `nudgis_front_domain`: Defines the default deployed Nudgis portal domain (will be the URL of the portal when adding the HTTP(S) prefix)
 ```
 nudgis_front_domain: "nudgis.example.com"
@@ -68,13 +63,3 @@ nudgis_front_database_port: "5432"
 ```
 nudgis_front_database_password: "changeit"
 ```
-
-`nudgis_front_instances_dir`: Directory to store the Nudgis portals data (Optional)
-```
-nudgis_front_instances_dir: "/data"
-```
-
-`nudgis_front_lock_dir`: Directory to store the Nudgis lock files. This directory should be shared between the Nudgis Fronts in HA cases. (Optional)
-```
-nudgis_front_lock_dir: "/data"
-```
diff --git a/roles/mediaserver/defaults/main.yml b/roles/mediaserver/defaults/main.yml
index 82258a1d85f3a8e05be0e25387d42c227cda91d9..f5e32412064eecc8455a61da5f3c040e973aa7f3 100644
--- a/roles/mediaserver/defaults/main.yml
+++ b/roles/mediaserver/defaults/main.yml
@@ -1,21 +1,36 @@
 ---
 
-nudgis_front_domain: "nudgis.example.com"
+# Defines the address for the Nudgis Front emails sender
 nudgis_front_email_from: "noreply@{{ nudgis_front_domain }}"
-nudgis_front_system_user: "msuser"
+
+# Defines the default deployed Nudgis portal domain (will be the URL of the portal when adding the HTTP(S) prefix)
+nudgis_front_domain: "nudgis.example.com"
+
+# Defines the default deployed Nudgis portal linked mirismanager domain (correspond to the URL of the mirismanager portal when adding the HTTP(S) prefix)
 manager_domain: "manager.example.com"
+
+# Defines the default deployed Nudgis portal master API key
 nudgis_front_api_key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX"
+
+# Defines the default deployed Nudgis portal "ubicast" user password
 nudgis_front_user_ubicast_password: "changeit"
+
+# Defines the default deployed Nudgis portal "admin" user password
 nudgis_front_user_admin_password: "changeit"
 
+# IPv4 address used to join the celerity server
+celerity_server_domain: "celerity.example.com"
+
+# Key used to encrypt communications to and from celerity server
+celerity_signing_key: "changeit"
+
+# Domain to reach PostgreSQL database
 nudgis_front_database_domain: "database.nudgis.example.com"
+
+# Port to reach PostgreSQL database
 nudgis_front_database_port: "5432"
-nudgis_front_database_password: "changeit"
-nudgis_front_instances_dir: "/data"
-nudgis_front_lock_dir: "/data"
 
-celerity_signing_key: "changeit"
-celerity_server_domain: "celerity.example.com"
+# Port to connect to PostgreSQL database with superuser rights
+nudgis_front_database_password: "changeit"
 
-real_ip_from: "" # default for OVH LB is 10.108.0.0/14
 ...
diff --git a/roles/mediaserver/handlers/main.yml b/roles/mediaserver/handlers/main.yml
index 8f614849884b004743cd0e37a2171cb42699e783..3789fc1433e5123aaed47cd14bb61298093a5dd1 100644
--- a/roles/mediaserver/handlers/main.yml
+++ b/roles/mediaserver/handlers/main.yml
@@ -15,6 +15,7 @@
     name: systemd-sysusers
     state: restarted
 
+- import_tasks: ../../shared/handlers/munin-node.yml  # noqa: name[missing]
 - import_tasks: ../../shared/handlers/nftables.yml  # noqa: name[missing]
 
 ...
diff --git a/roles/mediaserver/tasks/base.yml b/roles/mediaserver/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3ebc6a483197604c4bde2ea661ed08d196172b6e
--- /dev/null
+++ b/roles/mediaserver/tasks/base.yml
@@ -0,0 +1,27 @@
+---
+
+- name: enable and start the clamav-freshclam service
+  ansible.builtin.service:
+    name: clamav-freshclam
+    enabled: true
+    state: started
+
+- name: Stats Nudgis Front main configuration file
+  ansible.builtin.stat:
+    path: "/etc/mediaserver/install.ini"
+  register: nudgis_config
+
+- name: Populate Nudgis Front base configuration with example values
+  when: nudgis_config.stat.size == 0
+  ansible.builtin.copy:
+    src: "/etc/mediaserver/install.example.ini"
+    dest: "/etc/mediaserver/install.ini"
+    mode: "preserve"
+    remote_src: true
+
+- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+...
diff --git a/roles/mediaserver/tasks/configure.yml b/roles/mediaserver/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1656dfb3c81bf8fba07175cec0f5ed205eb0d75f
--- /dev/null
+++ b/roles/mediaserver/tasks/configure.yml
@@ -0,0 +1,43 @@
+---
+
+- name: CONFIGURE DATABASE
+  ansible.builtin.include_tasks: "configure/database.yml"
+  when:
+    - nudgis_front_database_domain is defined
+    - nudgis_front_database_port is defined
+    - nudgis_front_database_password is defined
+
+- name: CONFIGURE INSTANCES DIRECTORY
+  ansible.builtin.include_tasks: "configure/data-dirs.yml"
+
+- name: CONFIGURE MSCONTROLLER LOCKS DIRECTORY
+  ansible.builtin.include_tasks: "configure/locks-dirs.yml"
+
+- name: CONFIGURE NUDGIS EMAIL SENDER ADDRESS
+  ansible.builtin.include_tasks: "configure/email.yml"
+  when:
+    - nudgis_front_email_from is defined
+
+- name: CONFIGURE CELERITY SERVER VARIABLES
+  ansible.builtin.include_tasks: "configure/celerity.yml"
+  when:
+    - celerity_server_domain is defined
+    - celerity_signing_key is defined
+
+- name: CREATE FIRST INSTANCE
+  ansible.builtin.include_tasks: "configure/instance.yml"
+  when:
+    - nudgis_front_domain is defined
+    - nudgis_front_api_key is defined
+    - nudgis_front_user_ubicast_password is defined
+    - nudgis_front_user_admin_password is defined
+    - manager_domain is defined
+
+# Should only be played in High Availibility cases
+- name: HA CONFIGURATIONS SYNCHRONIZATION
+  ansible.builtin.include_tasks: "configure/synchronize.yml"
+  when:
+    - "'mediaserver' in groups"
+    - groups['mediaserver'] | length > 1
+
+...
diff --git a/roles/mediaserver/tasks/configure/celerity.yml b/roles/mediaserver/tasks/configure/celerity.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e590be18d4df325f2561bbb41f87b5f74f0424df
--- /dev/null
+++ b/roles/mediaserver/tasks/configure/celerity.yml
@@ -0,0 +1,27 @@
+---
+
+- name: Update the MS configuration with the celerity server IP
+  ansible.builtin.lineinfile:
+    path: /etc/mediaserver/msconf.py
+    regexp: "^CELERITY_SERVER_URL = "
+    line: CELERITY_SERVER_URL = 'https://{{ celerity_server_domain }}:6200'
+    create: true
+    owner: root
+    group: root
+    # 644 as all the instances must reach this file
+    # The instances cannot be in a common group as of now => https://redmine.ubicast.net/issues/33046
+    mode: "0644"
+
+- name: Update the MS configuration with the celerity server secret
+  ansible.builtin.lineinfile:
+    path: /etc/mediaserver/msconf.py
+    regexp: "^CELERITY_SIGNING_KEY = "
+    line: CELERITY_SIGNING_KEY = '{{ celerity_signing_key }}'
+    create: true
+    owner: root
+    group: root
+    # 644 as all the instances must reach this file
+    # The instances cannot be in a common group as of now => https://redmine.ubicast.net/issues/33046
+    mode: "0644"
+
+...
diff --git a/roles/mediaserver/tasks/configure/data-dirs.yml b/roles/mediaserver/tasks/configure/data-dirs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4a9af6b45086fcd6bab9e33a5a27131689528f4f
--- /dev/null
+++ b/roles/mediaserver/tasks/configure/data-dirs.yml
@@ -0,0 +1,13 @@
+---
+
+- name: Configure instances directories
+  ansible.builtin.replace:
+    path: /etc/mediaserver/install.ini
+    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
+    replace: '\1{{ item.name }} = {{ item.value }}'
+  loop:
+    - { name: 'DATA_NEW_DIRS', value: '{{ nudgis_front_instances_dir }}' }
+
+# /!\ Does not alter the already created instances /!\
+
+...
diff --git a/roles/mediaserver/tasks/configure/database.yml b/roles/mediaserver/tasks/configure/database.yml
new file mode 100644
index 0000000000000000000000000000000000000000..dba208599d9b3f029cbec64a61e26ffae72f5673
--- /dev/null
+++ b/roles/mediaserver/tasks/configure/database.yml
@@ -0,0 +1,15 @@
+---
+
+- name: Configure database
+  ansible.builtin.replace:
+    path: /etc/mediaserver/install.ini
+    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
+    replace: '\1{{ item.name }} = {{ item.value }}'
+  loop:
+    - { name: 'DB_HOST',     value: '{{ nudgis_front_database_domain }}' }   # noqa: yaml[commas]
+    - { name: 'DB_PORT',     value: '{{ nudgis_front_database_port }}' }     # noqa: yaml[commas]
+    - { name: 'DB_ROOT_PWD', value: '{{ nudgis_front_database_password }}' }
+
+# /!\ Does not alter the already created instances /!\
+
+...
diff --git a/roles/mediaserver/tasks/configure/email.yml b/roles/mediaserver/tasks/configure/email.yml
new file mode 100644
index 0000000000000000000000000000000000000000..57ced556c960e8906ea5d2bd529a9fcb22a61b75
--- /dev/null
+++ b/roles/mediaserver/tasks/configure/email.yml
@@ -0,0 +1,19 @@
+---
+
+- name: configure email sender address
+  notify:
+    - restart mediaserver
+  ansible.builtin.lineinfile:
+    path: /etc/mediaserver/msconf.py
+    backup: true
+    create: true
+    owner: root
+    group: root
+    # 644 as all the instances must reach this file
+    # The instances cannot be in a common group as of now => https://redmine.ubicast.net/issues/33046
+    mode: "0644"
+    regexp: ^#? ?DEFAULT_FROM_EMAIL.*
+    line: DEFAULT_FROM_EMAIL = '{{ nudgis_front_email_from }}'
+    validate: python3 -m py_compile %s
+
+...
diff --git a/roles/mediaserver/tasks/configure/instance.yml b/roles/mediaserver/tasks/configure/instance.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f84329b1547e384f9ec84d8281fb44f5ab0706f6
--- /dev/null
+++ b/roles/mediaserver/tasks/configure/instance.yml
@@ -0,0 +1,20 @@
+---
+
+- name: create instance
+  notify:
+    - Setup munin-node plugins link
+    - Restart munin-node service
+  ansible.builtin.command:
+    cmd: >
+      mscontroller.py add -u "{{ nudgis_front_system_user }}" -t '{
+        "id": "1_{{ nudgis_front_system_user }}",
+        "domain": "{{ nudgis_front_domain }}",
+        "api_key": "{{ nudgis_front_api_key }}",
+        "superuser_pwd": "{{ nudgis_front_user_ubicast_password }}",
+        "admin_pwd": "{{ nudgis_front_user_admin_password }}",
+        "skyreach_url": "{{ manager_domain }}"
+      }'
+    creates: "/etc/nginx/sites-available/mediaserver-{{ nudgis_front_system_user }}.conf"
+  throttle: 1
+
+...
diff --git a/roles/mediaserver/tasks/configure/locks-dirs.yml b/roles/mediaserver/tasks/configure/locks-dirs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..66c58ddc868e97140f9379892995f984282195d4
--- /dev/null
+++ b/roles/mediaserver/tasks/configure/locks-dirs.yml
@@ -0,0 +1,13 @@
+---
+
+- name: Configure mscontroller script locks directory
+  ansible.builtin.replace:
+    path: /etc/mediaserver/install.ini
+    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
+    replace: '\1{{ item.name }} = {{ item.value }}'
+  loop:
+    - { name: 'MSCONTROLLER_LOCKS_DIR', value: '{{ nudgis_front_lock_dir }}' }
+
+# /!\ Existing locks wont be altered /!\
+
+...
diff --git a/roles/mediaserver/tasks/configure/synchronize.yml b/roles/mediaserver/tasks/configure/synchronize.yml
new file mode 100644
index 0000000000000000000000000000000000000000..887d928d69f1a455910489888fb55b9f96aedd4a
--- /dev/null
+++ b/roles/mediaserver/tasks/configure/synchronize.yml
@@ -0,0 +1,67 @@
+---
+
+- name: fetch ssh public key
+  register: root_ssh_pubkey
+  ansible.builtin.slurp:
+    path: /root/.ssh/id_ed25519.pub
+  tags: always
+
+- name: register ssh public key as an ansible fact
+  ansible.builtin.set_fact:
+    pubkey: "{{ root_ssh_pubkey['content'] | b64decode }}"
+  tags: always
+
+- name: share ssh public key between cluster members
+  loop: "{{ groups['mediaserver'] }}"
+  ansible.posix.authorized_key:
+    user: root
+    key: "{{ hostvars[item]['pubkey'] }}"
+  tags: always
+
+- name: synchronize configuration between servers # noqa command-instead-of-module
+  # Cannot use the ansible synchronization module, cause there is no way to set a destination IP intead of the destination ansible hostname
+  # noqa command-instead-of-module = warn to use the synchronization module instead of rsync in the command module
+  when:
+    - inventory_hostname != groups['mediaserver'][0]
+  loop:
+    - /etc/mediaserver
+    - /etc/nginx
+    - /etc/sysusers.d
+    - /var/www
+  ansible.builtin.command: |
+    rsync \
+      -avh \
+      -e "ssh -o StrictHostKeyChecking=no" \
+      --delete \
+      "{{ item }}/" \
+      "root@{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}:{{ item }}/"
+  notify:
+    - restart systemd-sysusers
+    - restart nginx
+    - restart mediaserver
+  delegate_to: "{{ groups['mediaserver'][0] }}"
+  changed_when: false
+  tags: mediaserver-synchronize
+
+- name: synchronize letsencrypt configuration between servers # noqa command-instead-of-module
+  # Cannot use the ansible synchronization module, cause there is no way to set a destination IP intead of the destination ansible hostname
+  # noqa command-instead-of-module = warn to use the synchronization module instead of rsync in the command module
+  when:
+    - inventory_hostname != groups['mediaserver'][0]
+    - letsencrypt_enabled | d(false)
+  loop:
+    - /etc/letsencrypt
+  ansible.builtin.command: |
+    rsync \
+      -avh \
+      -e "ssh -o StrictHostKeyChecking=no" \
+      --delete \
+      "{{ item }}/" \
+      "root@{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}:{{ item }}/"
+  notify:
+    - restart nginx
+  delegate_to: "{{ groups['mediaserver'][0] }}"
+  changed_when: false
+  tags: mediaserver-synchronize
+
+...
diff --git a/roles/mediaserver/tasks/install.yml b/roles/mediaserver/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..52dfec49e085fba0e11b48dc4af87e7a77e1cfb3
--- /dev/null
+++ b/roles/mediaserver/tasks/install.yml
@@ -0,0 +1,12 @@
+---
+
+- name: mediaserver install
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ nudgis_front_packages }}"
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/mediaserver/tasks/main.yml b/roles/mediaserver/tasks/main.yml
index d3e4195b0eee610fdcb544647f88923f853eeaec..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/mediaserver/tasks/main.yml
+++ b/roles/mediaserver/tasks/main.yml
@@ -1,198 +1,33 @@
 ---
 
-- name: ensure /etc/mediaserver directory exist
-  ansible.builtin.file:
-    path: /etc/mediaserver
-    state: directory
-    mode: "755"  # use mediaserver package post-install rights...
-- name: copy mediaserver install.ini file from sample
-  ansible.builtin.copy:
-    src: install.example.ini
-    dest: /etc/mediaserver/install.ini
-    force: false
-    mode: "640"  # use mediaserver package post-install rights...
-
-- name: prepare mediaserver variables in install.ini file
-  ansible.builtin.replace:
-    path: /etc/mediaserver/install.ini
-    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
-    replace: '\1{{ item.name }} = {{ item.value }}'
-  loop:
-    - { name: 'DB_HOST',                value: '{{ nudgis_front_database_domain }}' }   # noqa: yaml[commas]
-    - { name: 'DB_PORT',                value: '{{ nudgis_front_database_port }}' }     # noqa: yaml[commas]
-    - { name: 'DB_ROOT_PWD',            value: '{{ nudgis_front_database_password }}' } # noqa: yaml[commas]
-    - { name: 'DATA_NEW_DIRS',          value: '{{ nudgis_front_instances_dir }}' }     # noqa: yaml[commas]
-    - { name: 'MSCONTROLLER_LOCKS_DIR', value: '{{ nudgis_front_lock_dir }}' }     # noqa: yaml[commas]
-  when: item.value != "" and item.value != '""'
-
-- name: mediaserver install
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ nudgis_front_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: fetch ssh public key
-  register: root_ssh_pubkey
-  ansible.builtin.slurp:
-    path: /root/.ssh/id_ed25519.pub
-  tags: always
-
-- name: register ssh public key as an ansible fact
-  ansible.builtin.set_fact:
-    pubkey: "{{ root_ssh_pubkey['content'] | b64decode }}"
-  tags: always
-
-- name: share ssh public key between cluster members
-  loop: "{{ groups['mediaserver'] }}"
-  ansible.posix.authorized_key:
-    user: root
-    key: "{{ hostvars[item]['pubkey'] }}"
-  tags: always
-
-- name: Update the MS configuration with the celerity server IP
-  ansible.builtin.lineinfile:
-    path: /etc/mediaserver/msconf.py
-    regexp: "^CELERITY_SERVER_URL = "
-    line: CELERITY_SERVER_URL = 'https://{{ celerity_server_domain }}:6200'
-    create: true
-    owner: root
-    group: root
-    # 644 as all the instances must reach this file
-    # The instances cannot be in a common group as of now => https://redmine.ubicast.net/issues/33046
-    mode: "0644"
-
-- name: Update the MS configuration with the celerity server secret
-  ansible.builtin.lineinfile:
-    path: /etc/mediaserver/msconf.py
-    regexp: "^CELERITY_SIGNING_KEY = "
-    line: CELERITY_SIGNING_KEY = '{{ celerity_signing_key }}'
-    create: true
-    owner: root
-    group: root
-    # 644 as all the instances must reach this file
-    # The instances cannot be in a common group as of now => https://redmine.ubicast.net/issues/33046
-    mode: "0644"
-
-- name: create instance
-  ansible.builtin.command:
-    cmd: >
-      mscontroller.py add -u "{{ nudgis_front_system_user }}" -t '{
-        "id": "1_{{ nudgis_front_system_user }}",
-        "domain": "{{ nudgis_front_domain }}",
-        "api_key": "{{ nudgis_front_api_key }}",
-        "superuser_pwd": "{{ nudgis_front_user_ubicast_password }}",
-        "admin_pwd": "{{ nudgis_front_user_admin_password }}",
-        "skyreach_url": "{{ manager_domain }}"
-      }'
-    creates: "/etc/nginx/sites-available/mediaserver-{{ nudgis_front_system_user }}.conf"
-  throttle: 1
-
-- name: synchronize configuration between servers # noqa command-instead-of-module
-  # Cannot use the ansible synchronization module, cause there is no way to set a destination IP intead of the destination ansible hostname
-  # noqa command-instead-of-module = warn to use the synchronization module instead of rsync in the command module
-  when:
-    - groups['mediaserver'] | length > 1
-    - inventory_hostname != groups['mediaserver'][0]
-  loop:
-    - /etc/mediaserver
-    - /etc/nginx
-    - /etc/sysusers.d
-    - /var/www
-  ansible.builtin.command: |
-    rsync \
-      -avh \
-      -e "ssh -o StrictHostKeyChecking=no" \
-      --delete \
-      "{{ item }}/" \
-      "root@{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}:{{ item }}/"
-  notify:
-    - restart systemd-sysusers
-    - restart nginx
-    - restart mediaserver
-  delegate_to: "{{ groups['mediaserver'][0] }}"
-  changed_when: false
-  tags: mediaserver-synchronize
-
-- name: synchronize letsencrypt configuration between servers # noqa command-instead-of-module
-  # Cannot use the ansible synchronization module, cause there is no way to set a destination IP intead of the destination ansible hostname
-  # noqa command-instead-of-module = warn to use the synchronization module instead of rsync in the command module
-  when:
-    - groups['mediaserver'] | length > 1
-    - inventory_hostname != groups['mediaserver'][0]
-    - letsencrypt_enabled | d(false)
-  loop:
-    - /etc/letsencrypt
-  ansible.builtin.command: |
-    rsync \
-      -avh \
-      -e "ssh -o StrictHostKeyChecking=no" \
-      --delete \
-      "{{ item }}/" \
-      "root@{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}:{{ item }}/"
-  notify:
-    - restart nginx
-  delegate_to: "{{ groups['mediaserver'][0] }}"
-  changed_when: false
-  tags: mediaserver-synchronize
-
-- name: configure email sender address
-  notify:
-    - restart mediaserver
-  ansible.builtin.lineinfile:
-    path: /etc/mediaserver/msconf.py
-    backup: true
-    create: true
-    owner: root
-    group: root
-    # 644 as all the instances must reach this file
-    # The instances cannot be in a common group as of now => https://redmine.ubicast.net/issues/33046
-    mode: "0644"
-    regexp: ^#? ?DEFAULT_FROM_EMAIL.*
-    line: DEFAULT_FROM_EMAIL = '{{ nudgis_front_email_from }}'
-    validate: python3 -m py_compile %s
-
-- name: configure domain name in nginx conf
-  notify: restart nginx
-  ansible.builtin.replace:
-    path: /etc/nginx/sites-available/mediaserver-{{ nudgis_front_system_user }}.conf
-    regexp: ^(\s*server_name).*;$
-    replace: \1 {{ nudgis_front_domain }};
-    backup: true
-
-- name: configure domain name in database
-  ansible.builtin.shell:
-    cmd: |
-      python3 /usr/lib/python3/dist-packages/mediaserver/scripts/mssiteconfig.py {{ nudgis_front_system_user }} site_url=https://{{ nudgis_front_domain }} ;
-      mscontroller.py restart -u {{ nudgis_front_system_user }} ;
-      touch /etc/mediaserver/.{{ nudgis_front_domain }}.mssiteconfig.log ;
-    creates: /etc/mediaserver/.{{ nudgis_front_domain }}.mssiteconfig.log
-
-- name: reset service resources
-  ansible.builtin.shell:
-    cmd: |
-      python3 /usr/lib/python3/dist-packages/mediaserver/scripts/reset_service_resources.py {{ nudgis_front_system_user }} local ;
-      mscontroller.py restart -u {{ nudgis_front_system_user }} ;
-      touch /etc/mediaserver/.{{ nudgis_front_domain }}.reset_service_resources.log ;
-    creates: /etc/mediaserver/.{{ nudgis_front_domain }}.reset_service_resources.log
-
-- name: ensure clamav-freshclam service is enabled and running
-  ansible.builtin.service:
-    name: clamav-freshclam
-    enabled: true
-    state: started
-
-- name: ensure mediaserver is running
-  ansible.builtin.service:
-    name: mediaserver
-    enabled: true
-    state: started
-
-- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
-
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
+
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
+
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/mediaserver/vars/main.yml b/roles/mediaserver/vars/main.yml
index 6fbdcc35b6ed6a82713b1d837aef34c8fc427012..82140a526e22570b6f32ba1a3c932a7de6788ddd 100644
--- a/roles/mediaserver/vars/main.yml
+++ b/roles/mediaserver/vars/main.yml
@@ -1,5 +1,15 @@
 ---
 
+# Defines the default deployed Nudgis portal short name (and linked unix user) **shared with celerity and mediaworker roles**
+nudgis_front_system_user: "msuser"
+
+# Directory to store the Nudgis portals data
+nudgis_front_instances_dir: "/data"
+
+# Directory to store the Nudgis lock files. This directory should be shared between the Nudgis Fronts in HA cases. 
+nudgis_front_lock_dir: "/data"
+
+# Packages required for the group. rsync is used for HA only (copy of instance data)
 nudgis_front_packages:
   - postgresql-client
   - cron
@@ -7,8 +17,10 @@ nudgis_front_packages:
   - nginx
   - postfix
   - ubicast-mediaserver
-  - rsync  # used for HA only (copy of instance data)
+  - rsync
 
-firewall_rules_files: ['http']
+# Group firewall rules filename, see roles/shared/files/nftables/
+firewall_rules_files:
+  - http
 
 ...
diff --git a/roles/mediavault/defaults/main.yml b/roles/mediavault/defaults/main.yml
index 50c0074cdf5b369d6bd03023c97b11a7dfd050b7..8da91245d3138cbd94d3affc01c56d8392e54439 100644
--- a/roles/mediavault/defaults/main.yml
+++ b/roles/mediavault/defaults/main.yml
@@ -1,5 +1,12 @@
 ---
-vault_email_enabled: true
+
+# Boolean to activate the mail notifications
+vault_email_enabled: True
+
+# From fields for email sending (as defined in RFC2822)
 vault_email_from: "{{ ansible_fqdn }} <backup@{{ ansible_fqdn }}>"
+
+# Destination address for the Nudgis Vault emails
 vault_email_to: "noreply@example.com"
+
 ...
diff --git a/roles/mediavault/tasks/base.yml b/roles/mediavault/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c8d2137d7fbd17e5e248f05a5a41496d63e21320
--- /dev/null
+++ b/roles/mediavault/tasks/base.yml
@@ -0,0 +1,10 @@
+---
+
+- name: generate ssh keys pair
+  ansible.builtin.user:
+    name: root
+    generate_ssh_key: true
+    ssh_key_type: ed25519
+    ssh_key_file: .ssh/id_ed25519
+
+...
diff --git a/roles/mediavault/tasks/configure.yml b/roles/mediavault/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0af1a9957715c8ae551fdc4c1a9e1289ee6a13bd
--- /dev/null
+++ b/roles/mediavault/tasks/configure.yml
@@ -0,0 +1,10 @@
+---
+
+- name: CONFIGURE MAILER
+  ansible.builtin.include_tasks: "configure/mailer.yml"
+  when:
+    - vault_email_enabled is defined
+    - vault_email_from is defined
+    - vault_email_to is defined
+
+...
diff --git a/roles/mediavault/tasks/mailer.yml b/roles/mediavault/tasks/configure/mailer.yml
similarity index 100%
rename from roles/mediavault/tasks/mailer.yml
rename to roles/mediavault/tasks/configure/mailer.yml
diff --git a/roles/mediavault/tasks/install.yml b/roles/mediavault/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ca843559f06b7334fee691078dc7185dc7590a87
--- /dev/null
+++ b/roles/mediavault/tasks/install.yml
@@ -0,0 +1,10 @@
+---
+
+- name: install packages
+  ansible.builtin.package:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ vault_packages }}"
+    state: present
+
+...
diff --git a/roles/mediavault/tasks/main.yml b/roles/mediavault/tasks/main.yml
index b3c5e51d80ccaa7327f45a7bf2f48bb1636211af..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/mediavault/tasks/main.yml
+++ b/roles/mediavault/tasks/main.yml
@@ -1,23 +1,33 @@
 ---
 
-- name: install packages
-  ansible.builtin.package:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ vault_packages }}"
-    state: present
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: generate ssh keys pair
-  ansible.builtin.user:
-    name: root
-    generate_ssh_key: true
-    ssh_key_type: ed25519
-    ssh_key_file: .ssh/id_ed25519
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
-# MAILER
-- name: MAILER
-  ansible.builtin.include_tasks: mailer.yml
-
-- ansible.builtin.meta: flush_handlers # noqa name[missing]
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/mediavault/vars/main.yml b/roles/mediavault/vars/main.yml
index 85792145b76a94e7e7108f0f6b68784371c58739..7235605a981c56c740e7a643fcae208401bbb8f6 100644
--- a/roles/mediavault/vars/main.yml
+++ b/roles/mediavault/vars/main.yml
@@ -1,10 +1,16 @@
 ---
 
+# Packages required for the group
 vault_packages:
   - ubicast-mediavault
 
+# Systemd email service name. The service is triggered on service backup failure
 vault_email_service_name: "status-email-admin"
+
+# Systemd email service path.
 vault_email_service_path: "/etc/systemd/system/{{ vault_email_service_name }}@.service"
+
+# Systemd email script path executed by systemd email service.
 vault_email_script_path: "/usr/local/sbin/systemd-mailer"
 
 ...
diff --git a/roles/mediaworker/README.md b/roles/mediaworker/README.md
index 3f205eb4741848575fbe07c8ff4721f3e5451d24..77e922032e8c4f6ac4de683949d00292c8d327fb 100644
--- a/roles/mediaworker/README.md
+++ b/roles/mediaworker/README.md
@@ -14,11 +14,6 @@ Available variables are listed below, along with the descriptions and the defaul
 nudgis_front_domain: "nudgis.example.com"
 ```
 
-`nudgis_front_system_user`: Nudgis system username for the application portal, used as a JSON key in celerity config for nudgis API usage (see also nudgis_front_api_key) (Optional)
-```
-nudgis_front_system_user: "msuser"
-```
-
 `nudgis_front_api_key`: Nudgis API key, used to communicate with the nudgis portal
 ```
 nudgis_front_api_key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX"
diff --git a/roles/mediaworker/defaults/main.yml b/roles/mediaworker/defaults/main.yml
index 27e5f6f691c48ed410ade32ab9f55839644ed594..cee9e39ee679a2a28468b14778d1a0dd904187a0 100644
--- a/roles/mediaworker/defaults/main.yml
+++ b/roles/mediaworker/defaults/main.yml
@@ -1,8 +1,15 @@
 ---
-celerity_signing_key: "changeit"
-celerity_server_domain: "celerity.example.com"
 
+# URL of the default Nudgis "msuser" portal used to populate the /etc/celerity/config.py file
 nudgis_front_domain: "nudgis.example.com"
+
+# Nudgis API key, used to communicate with the nudgis portal
 nudgis_front_api_key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX"
-nudgis_front_system_user: "msuser"
+
+# IP or domain on which the celerity server service can be joined
+celerity_server_domain: "celerity.example.com"
+
+# Secret key shared between celerity server and workers for communications (should be the same everywhere for communication)
+celerity_signing_key: "changeit"
+
 ...
diff --git a/roles/mediaworker/handlers/main.yml b/roles/mediaworker/handlers/main.yml
index 8b10bf7f7a1a79f73fe08402fe76727fcc32ce4e..41c27f7ea08c5d4bb8dd25d6680cd02a9d477ad5 100644
--- a/roles/mediaworker/handlers/main.yml
+++ b/roles/mediaworker/handlers/main.yml
@@ -1,11 +1,8 @@
 ---
-- name: restart celerity-server
-  ansible.builtin.service:
-    name: celerity-server
-    state: restarted
 
 - name: restart celerity-workers
   ansible.builtin.service:
     name: celerity-workers
     state: restarted
+
 ...
diff --git a/roles/mediaworker/tasks/configure.yml b/roles/mediaworker/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..30c1837f22af65fa275208b8f7da4a5141ad960d
--- /dev/null
+++ b/roles/mediaworker/tasks/configure.yml
@@ -0,0 +1,12 @@
+---
+
+- name: CONFIGURE CELERITY
+  ansible.builtin.include_tasks: "configure/celerity-conf.yml"
+  when:
+    - nudgis_front_system_user is defined
+    - nudgis_front_api_key is defined
+    - nudgis_front_domain is defined
+    - celerity_server_domain is defined
+    - celerity_signing_key is defined
+
+...
diff --git a/roles/celerity/tasks/celerity_base_config.yml b/roles/mediaworker/tasks/configure/celerity-conf.yml
similarity index 83%
rename from roles/celerity/tasks/celerity_base_config.yml
rename to roles/mediaworker/tasks/configure/celerity-conf.yml
index fde98bae52859161bb826e0b2dfaf1f77519a587..166fc8029a53761fefc41906724f7d02b30a03c8 100644
--- a/roles/celerity/tasks/celerity_base_config.yml
+++ b/roles/mediaworker/tasks/configure/celerity-conf.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: copy celerity example configuration
-  notify: "restart {{ _celerity_service }}"
+  notify: "restart celerity-workers"
   ansible.builtin.copy:
     remote_src: true
     src: /etc/celerity/config.example.py
@@ -10,7 +10,7 @@
     force: false
 
 - name: celerity base configuration
-  notify: "restart {{ _celerity_service }}"
+  notify: "restart celerity-workers"
   ansible.builtin.replace:
     path: /etc/celerity/config.py
     regexp: '^(\s*)#?{{ item.name }}\s*=.*$'
@@ -21,7 +21,7 @@
   when: item.value != "" and item.value != '""'
 
 - name: celerity add nudgis portal
-  notify: "restart {{ _celerity_service }}"
+  notify: "restart celerity-workers"
   ansible.builtin.command:
     cmd: >
       celerity-configurator add-portal
@@ -31,9 +31,9 @@
   register: celerity_add_portal
   changed_when: celerity_add_portal.stdout != 'The configuration file is already up to date.'
 
-- name: "ensure {{ _celerity_service }} is running"
+- name: "ensure celerity-workers is running"
   ansible.builtin.service:
-    name: "{{ _celerity_service }}"
+    name: "celerity-workers"
     enabled: true
     state: started
 
diff --git a/roles/mediaworker/tasks/install.yml b/roles/mediaworker/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..26b2230da2076cc00b933d27c75800e986b97654
--- /dev/null
+++ b/roles/mediaworker/tasks/install.yml
@@ -0,0 +1,12 @@
+---
+
+- name: install celerity worker
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ worker_packages }}"
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/mediaworker/tasks/main.yml b/roles/mediaworker/tasks/main.yml
index b7ca14f390033e94bd045e827dba0a93e33aadf3..4ca9b1ce59bf99a101c275a40afab2c0fc69e94a 100644
--- a/roles/mediaworker/tasks/main.yml
+++ b/roles/mediaworker/tasks/main.yml
@@ -1,20 +1,23 @@
 ---
 
-- name: install celerity worker
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ worker_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: configure celerity
-  ansible.builtin.include_tasks: celerity_base_config.yml
-  vars:
-    _celerity_service: "celerity-workers"
-
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/mediaworker/vars/main.yml b/roles/mediaworker/vars/main.yml
index 30be75a2ade2e8a0799f2bfffd84560d61450819..7ad097abb83f3fb13bf0d615eb4373469c99918a 100644
--- a/roles/mediaworker/vars/main.yml
+++ b/roles/mediaworker/vars/main.yml
@@ -1,6 +1,10 @@
 ---
 
+# Packages required for the group
 worker_packages:
   - ubicast-celerity-workers
 
+# Nudgis system username for the application portal, used as a JSON key in celerity config for nudgis API usage (see also nudgis_front_api_key) **shared with mediaserver and celerity roles**
+nudgis_front_system_user: "msuser"
+
 ...
diff --git a/roles/mirismanager/README.md b/roles/mirismanager/README.md
index 3baff07a104d89b5cd0bce642bead10aa0546aa9..d2f01c2fd00b0ab60724ca48d0a8b3f85678c4c8 100644
--- a/roles/mirismanager/README.md
+++ b/roles/mirismanager/README.md
@@ -1,7 +1,7 @@
 # Mirismanager
 ## Description
 
-The mediaserver group is used to configure all hosts with the UbiCast platform to control and manage video recorders.  
+The mirismanager group is used to configure all hosts with the UbiCast platform to control and manage video recorders.  
  * In a standard Ubicast case, the host is the same as mediaserver
  * In a HA Ubicast case, it is usually a dedicated server
 
@@ -19,9 +19,9 @@ manager_domain: "manager.example.com"
 manager_email_from: "noreply@{{ manager_domain }}"
 ```
 
-`http_proxy`: Mandatory proxy to use in apt-cacher-ng (Optional)
+`proxy_http`: Mandatory proxy to use in apt-cacher-ng (Optional)
 ```
-http_proxy: ""
+proxy_http: ""
 ```
 
 `manager_database_domain`: Domain to reach PostgreSQL database
diff --git a/roles/mirismanager/defaults/main.yml b/roles/mirismanager/defaults/main.yml
index 0f9b91673ef12b46cf4dfe4493e39af7070c50c5..cc09307502ecbcf7cdceac6ae85f374ddb0dd5a5 100644
--- a/roles/mirismanager/defaults/main.yml
+++ b/roles/mirismanager/defaults/main.yml
@@ -1,12 +1,27 @@
 ---
+
+# Defines the default domain for the Nudgis Manager
 manager_domain: "manager.example.com"
+
+# Defines the default address for the Nudgis Manager emails sender
 manager_email_from: "noreply@{{ manager_domain }}"
+
+# Mandatory proxy to use in apt-cacher-ng
 proxy_http: ""
 
+# Domain to reach PostgreSQL database
 manager_database_domain: "database.manager.example.com"
+
+# Port to reach PostgreSQL database
 manager_database_port: "5432"
+
+# Port to connect to PostgreSQL database with superuser rights
 manager_database_password: "changeit"
 
+# Application ubicast user password
 manager_user_ubicast_password: "changeit"
+
+# Application admin user password
 manager_user_admin_password: "changeit"
+
 ...
diff --git a/roles/mirismanager/tasks/base.yml b/roles/mirismanager/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..80d1d9cbf846117e39ad4c68b14d0d883977d308
--- /dev/null
+++ b/roles/mirismanager/tasks/base.yml
@@ -0,0 +1,21 @@
+---
+
+- name: Stats Nudgis Manager main configuration file
+  ansible.builtin.stat:
+    path: "/etc/skyreach/install.ini"
+  register: manager_config
+
+- name: Populate Nudgis Manager base configuration with example values
+  when: manager_config.stat.size == 0
+  ansible.builtin.copy:
+    src: "/etc/skyreach/install.example.ini"
+    dest: "/etc/skyreach/install.ini"
+    mode: "preserve"
+    remote_src: true
+
+- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+...
diff --git a/roles/mirismanager/tasks/configure.yml b/roles/mirismanager/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..03790074e22af8b29e1b9bfd6ad28d8c2c4c3143
--- /dev/null
+++ b/roles/mirismanager/tasks/configure.yml
@@ -0,0 +1,32 @@
+---
+
+- name: CONFIGURE DATABASE
+  ansible.builtin.include_tasks: "configure/database.yml"
+  when:
+    - manager_database_domain is defined
+    - manager_database_port is defined
+    - manager_database_password is defined
+
+- name: CONFIGURE DOMAIN
+  ansible.builtin.include_tasks: "configure/domain.yml"
+  when:
+    - manager_domain is defined
+
+- name: CONFIGURE ADMIN USERS
+  ansible.builtin.include_tasks: "configure/users.yml"
+  when:
+    - manager_user_ubicast_password is defined
+    - manager_user_admin_password is defined
+
+- name: CONFIGURE NUDGIS MANAGER EMAIL SENDER ADDRESS
+  ansible.builtin.include_tasks: "configure/email.yml"
+  when:
+    - manager_email_from is defined
+
+- name: CONFIGURE APT CACHER MANDATORY PROXY
+  ansible.builtin.include_tasks: "configure/apt-cacher-proxy.yml"
+  when:
+    - proxy_http is defined
+    - proxy_http != ""
+
+...
diff --git a/roles/mirismanager/tasks/configure/apt-cacher-proxy.yml b/roles/mirismanager/tasks/configure/apt-cacher-proxy.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b0bc4b4cd83dc3339f3dbd1eeef2d23454dee516
--- /dev/null
+++ b/roles/mirismanager/tasks/configure/apt-cacher-proxy.yml
@@ -0,0 +1,17 @@
+---
+
+- name: check apt cacher ng config exists
+  register: manager_apt_cacher_conf
+  ansible.builtin.stat:
+    path: /etc/apt-cacher-ng/acng.conf
+
+- name: configure apt-cacher-ng
+  when:
+    - manager_apt_cacher_conf.stat.exists
+  notify: restart apt-cacher-ng
+  ansible.builtin.lineinfile:
+    path: /etc/apt-cacher-ng/acng.conf
+    regexp: "^Proxy: .*"
+    line: "Proxy: {{ proxy_http }}"
+
+...
diff --git a/roles/mirismanager/tasks/configure/database.yml b/roles/mirismanager/tasks/configure/database.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0c2bd02c29f178fed0f07536a0ca041b35373d49
--- /dev/null
+++ b/roles/mirismanager/tasks/configure/database.yml
@@ -0,0 +1,15 @@
+---
+
+- name: Configure database
+  ansible.builtin.replace:
+    path: /etc/skyreach/install.ini
+    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
+    replace: '\1{{ item.name }} = {{ item.value }}'
+  loop:
+    - { name: 'DB_HOST',     value: '{{ manager_database_domain }}' }   # noqa: yaml[commas]
+    - { name: 'DB_PORT',     value: '{{ manager_database_port }}' }     # noqa: yaml[commas]
+    - { name: 'DB_ROOT_PWD', value: '{{ manager_database_password }}' }
+
+# /!\ Does not alter the already created instances /!\
+
+...
diff --git a/roles/mirismanager/tasks/configure/domain.yml b/roles/mirismanager/tasks/configure/domain.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a1d1dd3ab8df893783badcc4d907ea90c75bd42d
--- /dev/null
+++ b/roles/mirismanager/tasks/configure/domain.yml
@@ -0,0 +1,13 @@
+---
+
+- name: Configure domain
+  ansible.builtin.replace:
+    path: /etc/skyreach/install.ini
+    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
+    replace: '\1{{ item.name }} = {{ item.value }}'
+  loop:
+    - { name: 'DOMAIN', value: '{{ manager_domain }}' }  # noqa: yaml[commas]
+
+# /!\ Does not alter the already created instances /!\
+
+...
diff --git a/roles/mirismanager/tasks/configure/email.yml b/roles/mirismanager/tasks/configure/email.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7fefbc2f7de01f933c64e8a215a372d4cbda886c
--- /dev/null
+++ b/roles/mirismanager/tasks/configure/email.yml
@@ -0,0 +1,11 @@
+---
+
+- name: configure email sender address
+  notify: restart skyreach
+  ansible.builtin.lineinfile:
+    path: /home/skyreach/skyreach_data/private/settings_override.py
+    regexp: ^#? ?DEFAULT_FROM_EMAIL.*
+    line: DEFAULT_FROM_EMAIL = '{{ manager_email_from }}'
+    backup: true
+
+...
diff --git a/roles/mirismanager/tasks/configure/users.yml b/roles/mirismanager/tasks/configure/users.yml
new file mode 100644
index 0000000000000000000000000000000000000000..713415bfddeb48191d7a4aa68932cc11b261290b
--- /dev/null
+++ b/roles/mirismanager/tasks/configure/users.yml
@@ -0,0 +1,14 @@
+---
+
+- name: Configure application users
+  ansible.builtin.replace:
+    path: /etc/skyreach/install.ini
+    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
+    replace: '\1{{ item.name }} = {{ item.value }}'
+  loop:
+    - { name: 'SUPERUSER_PWD', value: '{{ manager_user_ubicast_password }}' } # noqa: yaml[commas]
+    - { name: 'ADMIN_PWD',     value: '{{ manager_user_admin_password }}' }   # noqa: yaml[commas]
+
+# /!\ Does not alter the already created instances /!\
+
+...
diff --git a/roles/mirismanager/tasks/install.yml b/roles/mirismanager/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e57d308cc9812ae87a0fb1c7f9891c713781bccd
--- /dev/null
+++ b/roles/mirismanager/tasks/install.yml
@@ -0,0 +1,21 @@
+---
+
+- name: mirismanager dependencies install
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ dependencies_packages }}"
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+- name: mirismanager install
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ manager_packages }}"
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/mirismanager/tasks/main.yml b/roles/mirismanager/tasks/main.yml
index 20ffd8bfa55887415651facf6b554c86dbc319df..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/mirismanager/tasks/main.yml
+++ b/roles/mirismanager/tasks/main.yml
@@ -1,98 +1,33 @@
 ---
 
-- name: mirismanager dependencies install
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ dependencies_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: start postgresql
-  ansible.builtin.systemd:
-    name: postgresql
-    state: started
-
-- name: ensure /etc/skyreach directory exist
-  ansible.builtin.file:
-    path: /etc/skyreach
-    state: directory
-    mode: "750"  # use skyreach postinstall rights...
-# TOFIX: There is a problem with postgres password (mediaserver and mirismanager)
-- name: replace default database password with empty one
-  when: manager_database_password == "changeit"
-  ansible.builtin.set_fact:
-    manager_database_password: ""
-
-- name: copy mirismanager install.ini file from sample
-  ansible.builtin.copy:
-    src: install.example.ini
-    dest: /etc/skyreach/install.ini
-    force: false
-    mode: "640"  # use skyreach package post-install rights...
-
-- name: prepare mirismanager variables in install.ini file
-  ansible.builtin.replace:
-    path: /etc/skyreach/install.ini
-    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
-    replace: '\1{{ item.name }} = {{ item.value }}'
-  loop:
-    - { name: 'DB_HOST',       value: '{{ manager_database_domain }}' }       # noqa: yaml[commas]
-    - { name: 'DB_PORT',       value: '{{ manager_database_port }}' }         # noqa: yaml[commas]
-    - { name: 'DB_ROOT_PWD',   value: '{{ manager_database_password }}' }     # noqa: yaml[commas]
-    - { name: 'DOMAIN'     ,   value: '{{ manager_domain }}' }                # noqa: yaml[commas]
-    - { name: 'SUPERUSER_PWD', value: '{{ manager_user_ubicast_password }}' } # noqa: yaml[commas]
-    - { name: 'ADMIN_PWD',     value: '{{ manager_user_admin_password }}' }   # noqa: yaml[commas]
-  when: item.value != "" and item.value != '""'
-
-- name: mirismanager install
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ manager_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: configure email sender address in settings
-  notify: restart skyreach
-  ansible.builtin.lineinfile:
-    path: /home/skyreach/skyreach_data/private/settings_override.py
-    regexp: ^#? ?DEFAULT_FROM_EMAIL.*
-    line: DEFAULT_FROM_EMAIL = '{{ manager_email_from }}'
-    backup: true
-
-- name: ensure skyreach is running
-  ansible.builtin.service:
-    name: skyreach
-    enabled: true
-    state: started
-
-- name: check apt cacher ng config exists
-  register: manager_apt_cacher_conf
-  ansible.builtin.stat:
-    path: /etc/apt-cacher-ng/acng.conf
-
-- name: configure apt-cacher-ng
-  when:
-    - manager_apt_cacher_conf.stat.exists
-    - proxy_http | d(false)
-  notify: restart apt-cacher-ng
-  ansible.builtin.lineinfile:
-    path: /etc/apt-cacher-ng/acng.conf
-    regexp: "^Proxy: .*"
-    line: "Proxy: {{ proxy_http }}"
-
-- name: ensure apt-cacher-ng is running
-  ansible.builtin.service:
-    name: apt-cacher-ng
-    enabled: true
-    state: started
-
-- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
-
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
+
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
+
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/mirismanager/vars/main.yml b/roles/mirismanager/vars/main.yml
index c805e7c07b1ff1e9b4b4b57c088c930b75d94261..e917782acf9f230b4a3658f828c019192fba066e 100644
--- a/roles/mirismanager/vars/main.yml
+++ b/roles/mirismanager/vars/main.yml
@@ -1,6 +1,6 @@
 ---
 
-# ubicast-skyreach-runtime packages: todo: manage the database in inventory (/!\ can be idenpendent of the MS database)
+# Packages required for the group. Comes from ubicast-skyreach-runtime packages
 dependencies_packages:
   - apt-cacher-ng
   - cron
@@ -8,9 +8,13 @@ dependencies_packages:
   - postfix
   - postgresql
 
+# Packages required for the group
 manager_packages:
   - ubicast-skyreach
 
-firewall_rules_files: ['http', 'apt-cacher']
+# Group firewall rules filename, see roles/shared/files/nftables/
+firewall_rules_files:
+  - http
+  - apt-cacher
 
 ...
diff --git a/roles/monitor/msmonitor/defaults/main.yml b/roles/monitor/msmonitor/defaults/main.yml
index 71373ae9fad9c5b94d559f68bdf77956857f51f2..c1060c49606da6b6f2946ca801f9ff7428ded726 100644
--- a/roles/monitor/msmonitor/defaults/main.yml
+++ b/roles/monitor/msmonitor/defaults/main.yml
@@ -1,5 +1,12 @@
 ---
+
+# Defines the default domain for monitor
 monitor_domain: "monitor.example.com"
+
+# Password for the webmonitor ubicast user
 monitor_user_ubicast_password: "changeit"
+
+# Password for the webmonitor admin user
 monitor_user_admin_password: "changeit"
+
 ...
diff --git a/roles/monitor/msmonitor/tasks/base.yml b/roles/monitor/msmonitor/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6e73ba886bb40c405e2892fcd3d4b0c4f3a9ed6e
--- /dev/null
+++ b/roles/monitor/msmonitor/tasks/base.yml
@@ -0,0 +1,18 @@
+---
+
+- name: Stats Webmonitor main configuration file
+  ansible.builtin.stat:
+    path: "/etc/webmonitor/install.ini"
+  register: monitor_config
+
+- name: Populate Webmonitor base configuration with example values
+  when: monitor_config.stat.size == 0
+  ansible.builtin.copy:
+    src: "/etc/webmonitor/install.example.ini"
+    dest: "/etc/webmonitor/install.ini"
+    mode: "preserve"
+    remote_src: true
+
+- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+
+...
diff --git a/roles/monitor/msmonitor/tasks/configure.yml b/roles/monitor/msmonitor/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8174e74e59297a1f154b28a3f28ff5f71ea26c1a
--- /dev/null
+++ b/roles/monitor/msmonitor/tasks/configure.yml
@@ -0,0 +1,14 @@
+---
+
+- name: CONFIGURE DOMAIN
+  ansible.builtin.include_tasks: "configure/domain.yml"
+  when:
+    - monitor_domain is defined
+
+- name: CONFIGURE ADMIN USERS
+  ansible.builtin.include_tasks: "configure/users.yml"
+  when:
+    - monitor_user_admin_password is defined
+    - monitor_user_ubicast_password is defined
+
+...
diff --git a/roles/monitor/msmonitor/tasks/configure/domain.yml b/roles/monitor/msmonitor/tasks/configure/domain.yml
new file mode 100644
index 0000000000000000000000000000000000000000..05fc3ec3741eae732dbe9fb1df7b6e0011ea382e
--- /dev/null
+++ b/roles/monitor/msmonitor/tasks/configure/domain.yml
@@ -0,0 +1,13 @@
+---
+
+- name: Configure domain
+  ansible.builtin.replace:
+    path: /etc/webmonitor/install.ini
+    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
+    replace: '\1{{ item.name }} = {{ item.value }}'
+  loop:
+    - { name: 'DOMAIN',        value: '{{ monitor_domain }}' }                # noqa: yaml[commas]
+
+# /!\ Does not alter the already created instances /!\
+
+...
diff --git a/roles/monitor/msmonitor/tasks/configure/users.yml b/roles/monitor/msmonitor/tasks/configure/users.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ffff544e3aec573417936b93afef621903986551
--- /dev/null
+++ b/roles/monitor/msmonitor/tasks/configure/users.yml
@@ -0,0 +1,14 @@
+---
+
+- name: Configure application users
+  ansible.builtin.replace:
+    path: /etc/webmonitor/install.ini
+    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
+    replace: '\1{{ item.name }} = {{ item.value }}'
+  loop:
+    - { name: 'ADMIN_PWD',     value: '{{ monitor_user_admin_password }}' }   # noqa: yaml[commas]
+    - { name: 'SUPERUSER_PWD', value: '{{ monitor_user_ubicast_password }}' } # noqa: yaml[commas]
+
+# /!\ Does not alter the already created instances /!\
+
+...
diff --git a/roles/monitor/msmonitor/tasks/install.yml b/roles/monitor/msmonitor/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..81795287237f12498b8cc94961a3fdb5851bc74c
--- /dev/null
+++ b/roles/monitor/msmonitor/tasks/install.yml
@@ -0,0 +1,13 @@
+---
+
+- name: install ubicast msmonitor
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    state: present
+    name: "{{ msmonitor_packages }}"
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/monitor/msmonitor/tasks/main.yml b/roles/monitor/msmonitor/tasks/main.yml
index 524055bc1bc4159559b01db292c340ab00ae252c..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/monitor/msmonitor/tasks/main.yml
+++ b/roles/monitor/msmonitor/tasks/main.yml
@@ -1,44 +1,33 @@
 ---
 
-- name: ensure /etc/webmonitor directory exist
-  ansible.builtin.file:
-    path: /etc/webmonitor
-    state: directory
-    mode: "750"  # use webmonitor package post-install rights...
-- name: deploy monitor install.ini file
-  ansible.builtin.copy:
-    src: install.example.ini
-    dest: /etc/webmonitor/install.ini
-    force: false
-    mode: "640"  # use webmonitor package post-install rights...
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: prepare webmonitor variables in install.ini file
-  ansible.builtin.replace:
-    path: /etc/webmonitor/install.ini
-    regexp: '^(\s*)#?\s*{{ item.name }}\s*=.*$'
-    replace: '\1{{ item.name }} = {{ item.value }}'
-  loop:
-    - { name: 'DOMAIN',        value: '{{ monitor_domain }}' }                # noqa: yaml[commas]
-    - { name: 'ADMIN_PWD',     value: '{{ monitor_user_admin_password }}' }   # noqa: yaml[commas]
-    - { name: 'SUPERUSER_PWD', value: '{{ monitor_user_ubicast_password }}' } # noqa: yaml[commas]
-  when: item.value != "" and item.value != '""'
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
-- name: install ubicast msmonitor
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    state: present
-    name: "{{ msmonitor_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: ensure msmonitor is running
-  ansible.builtin.service:
-    name: webmonitor
-    enabled: true
-    state: started
-
-- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/monitor/msmonitor/vars/main.yml b/roles/monitor/msmonitor/vars/main.yml
index 29f30c5df5457adf5169ba043bac48a51efc5e88..9e677903bf98f805d343c8559bb1e0577ce2d883 100644
--- a/roles/monitor/msmonitor/vars/main.yml
+++ b/roles/monitor/msmonitor/vars/main.yml
@@ -1,9 +1,12 @@
 ---
 
+# Packages required for the group
 msmonitor_packages:
   - ubicast-webmonitor
   - ubicast-webmonitor-runtime
 
-firewall_rules_files: ['http']
+# Group firewall rules filename, see roles/shared/files/nftables/
+firewall_rules_files:
+  - http
 
 ...
diff --git a/roles/monitor/munin_node/README.md b/roles/monitor/munin_node/README.md
index f45c4c4793af35e81d406b1b5f7a7eb9fc25f289..118b0be5ae3db4d56a362d1a43ccdef780910375 100644
--- a/roles/monitor/munin_node/README.md
+++ b/roles/monitor/munin_node/README.md
@@ -14,3 +14,8 @@ Available variables are listed below, along with the descriptions and the defaul
 ```
 ubicast_customer_name: "example"
 ```
+
+`munin_server_ip`: IP of the munin server to authorize in munin_node (Optional)
+```
+munin_server_ip: ""
+```
diff --git a/roles/monitor/munin_node/defaults/main.yml b/roles/monitor/munin_node/defaults/main.yml
index e8e946e783efd5911df75ff93247a1641af1ae13..742da3116e5ca191f81e8675cb08c7687fd41a5d 100644
--- a/roles/monitor/munin_node/defaults/main.yml
+++ b/roles/monitor/munin_node/defaults/main.yml
@@ -1,3 +1,9 @@
 ---
+
+# Short name of the customer, with no spaces. This name is used in munin to group the graphs under the same name.
 ubicast_customer_name: "example"
+
+# IP of the munin server to authorize in munin_node
+munin_server_ip: ""
+
 ...
diff --git a/roles/monitor/munin_node/handlers/main.yml b/roles/monitor/munin_node/handlers/main.yml
index 5cfe8c57853b94abbd6e15a210c61f589c112117..c57c008ad4979a2c9caceed733170a4c4944f85d 100644
--- a/roles/monitor/munin_node/handlers/main.yml
+++ b/roles/monitor/munin_node/handlers/main.yml
@@ -1,11 +1,6 @@
 ---
 
-- name: restart munin-node service
-  ansible.builtin.service:
-    name: munin-node
-    state: restarted
-  listen: restart munin-node
-
+- import_tasks: ../../shared/handlers/munin-node.yml  # noqa: name[missing]
 - import_tasks: ../../shared/handlers/nftables.yml  # noqa: name[missing]
 
 ...
diff --git a/roles/monitor/munin_node/tasks/base.yml b/roles/monitor/munin_node/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fb96aaff0d855ecacec4c0a0699a29ed0e7f81c9
--- /dev/null
+++ b/roles/monitor/munin_node/tasks/base.yml
@@ -0,0 +1,5 @@
+---
+
+- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+
+...
diff --git a/roles/monitor/munin_node/tasks/configure.yml b/roles/monitor/munin_node/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8c4ea21f1f6fb50c836c5d9f905fc5b67d39d716
--- /dev/null
+++ b/roles/monitor/munin_node/tasks/configure.yml
@@ -0,0 +1,9 @@
+---
+
+- name: CONFIGURE MUNIN-NODE
+  ansible.builtin.include_tasks: "configure/main-configuration.yml"
+  when:
+    - ubicast_customer_name is defined
+    - munin_server_ip is defined
+
+...
diff --git a/roles/monitor/munin_node/tasks/configure/main-configuration.yml b/roles/monitor/munin_node/tasks/configure/main-configuration.yml
new file mode 100644
index 0000000000000000000000000000000000000000..35b0b0ca4cbab0468badce221da75d67a4d9835a
--- /dev/null
+++ b/roles/monitor/munin_node/tasks/configure/main-configuration.yml
@@ -0,0 +1,12 @@
+---
+
+- name: copy munin_node configuration
+  notify:
+    - Setup munin-node plugins link
+    - Restart munin-node service
+  ansible.builtin.template:
+    src: munin_node.conf.j2
+    dest: /etc/munin/munin-node.conf
+    mode: "644"
+
+...
diff --git a/roles/monitor/munin_node/tasks/install.yml b/roles/monitor/munin_node/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..00f19ccba2882fbe5f7dbd0b3b361308e374216b
--- /dev/null
+++ b/roles/monitor/munin_node/tasks/install.yml
@@ -0,0 +1,13 @@
+---
+
+- name: install required packages for munin-node
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    state: present
+    name: "{{ munin_node_packages }}"
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/monitor/munin_node/tasks/main.yml b/roles/monitor/munin_node/tasks/main.yml
index 34949350ca9953c6f5a684789a8b2dc00e5c491f..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/monitor/munin_node/tasks/main.yml
+++ b/roles/monitor/munin_node/tasks/main.yml
@@ -1,30 +1,33 @@
 ---
 
-- name: install required packages for munin-node
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    state: present
-    name: "{{ munin_node_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: copy munin_node configuration
-  notify: restart munin-node
-  ansible.builtin.template:
-    src: munin_node.conf.j2
-    dest: /etc/munin/munin-node.conf
-    mode: "644"
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
-- name: setup munin-node plugins link
-  notify: restart munin-node
-  ansible.builtin.shell:
-    munin_node-configure --shell --remove-also 2>&1 || true | sh -x  # noqa risky-shell-pipe
-  # sh -x print executed cmd to stderr
-  register: munin_plugin_linked
-  changed_when: munin_plugin_linked.stderr | length > 0
-
-- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/monitor/munin_node/templates/munin_node.conf.j2 b/roles/monitor/munin_node/templates/munin_node.conf.j2
index 21db0fe735c877b4857481ca57dae20c7b66c924..8d2aeb2355bbad2dca72f7391ca70be14831854f 100644
--- a/roles/monitor/munin_node/templates/munin_node.conf.j2
+++ b/roles/monitor/munin_node/templates/munin_node.conf.j2
@@ -19,9 +19,11 @@ ignore_file \.dpkg-(tmp|new|old|dist)$
 ignore_file \.rpm(save|new)$
 ignore_file \.pod$
 
-host_name {{ inventory_hostname }}.{{ ubicast_customer_name }}
+host_name {{ munin_node_hostname }}.{{ ubicast_customer_name }}
 
-{% if groups['munin_server'] is defined %}
+{% if munin_server_ip %}
+allow ^{{ munin_server_ip | replace('.', '\.') }}$
+{% elif groups['munin_server'] is defined %}
 {% for host in groups['munin_server'] %}
 allow ^{{ hostvars[host]['ansible_facts']['default_ipv4']['address'] | replace('.','\.') }}$
 {% endfor %}
diff --git a/roles/monitor/munin_node/vars/main.yml b/roles/monitor/munin_node/vars/main.yml
index ea26a1a8a2e44a7229232e9a14d99842eabea636..4df399c0b4bf70161c9f5200094f0b3033fc841e 100644
--- a/roles/monitor/munin_node/vars/main.yml
+++ b/roles/monitor/munin_node/vars/main.yml
@@ -1,12 +1,21 @@
 ---
 
+# Group firewall rules filename, see roles/shared/files/nftables/
+firewall_rules_files:
+  - munin-node
+
+# Munin node hostname of the server
+munin_node_hostname: "{{ inventory_hostname }}"
+
+# Path of the munin-node log file
+munin_node_logfile: "/var/log/munin/munin_node.log"
+
+# Packages required for the group
 munin_node_packages:
   - munin-node
   - ubicast-munin
 
-munin_node_logfile: /var/log/munin/munin_node.log
-munin_node_pidfile: /var/run/munin/munin_node.pid
-
-firewall_rules_files: ['munin-node']
+# Path of the munin-node PID file
+munin_node_pidfile: "/var/run/munin/munin_node.pid"
 
 ...
diff --git a/roles/monitor/munin_server/README.md b/roles/monitor/munin_server/README.md
index d91b3645ea37cae84f011c23df0b2ce90cdcd2bb..ef6ed29a48ee3230c8105e8cba205cd39e1f7b1d 100644
--- a/roles/monitor/munin_server/README.md
+++ b/roles/monitor/munin_server/README.md
@@ -14,3 +14,8 @@ Available variables are listed below, along with the descriptions and the defaul
 ```
 ubicast_customer_name: "example"
 ```
+
+`munin_nodes`: List of munin_nodes with their names and IP (each element of the list is a dictionary with a "name" and a "ip" key) (Optional)
+```
+munin_nodes: []
+```
diff --git a/roles/monitor/munin_server/defaults/main.yml b/roles/monitor/munin_server/defaults/main.yml
index e8e946e783efd5911df75ff93247a1641af1ae13..7021226c64ce372160c2ed194cf113842e669ebf 100644
--- a/roles/monitor/munin_server/defaults/main.yml
+++ b/roles/monitor/munin_server/defaults/main.yml
@@ -1,3 +1,9 @@
 ---
+
+# Short name of the customer, with no spaces. This name is used in munin to group the graphs under the same name.
 ubicast_customer_name: "example"
+
+# List of munin_nodes with their names and IP (each element of the list is a dictionary with a "name" and a "ip" key)
+munin_nodes: []
+
 ...
diff --git a/roles/monitor/munin_server/tasks/base.yml b/roles/monitor/munin_server/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d305cf71f2c01776bd010899bd63f0378e43480d
--- /dev/null
+++ b/roles/monitor/munin_server/tasks/base.yml
@@ -0,0 +1,12 @@
+---
+
+# Force munin_server restart to avoid default localdomain graph creation after remove
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+- name: remove default localdomain files
+  ansible.builtin.file:
+    path: /var/cache/munin/www/localdomain
+    state: absent
+
+...
diff --git a/roles/monitor/munin_server/tasks/configure.yml b/roles/monitor/munin_server/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2bcca3086f4b6add056245e93a0d409f71e751f2
--- /dev/null
+++ b/roles/monitor/munin_server/tasks/configure.yml
@@ -0,0 +1,8 @@
+---
+
+- name: CONFIGURE MUNIN-SERVER
+  ansible.builtin.include_tasks: "configure/main-configuration.yml"
+  when:
+    - ubicast_customer_name is defined
+
+...
diff --git a/roles/monitor/munin_server/tasks/configure/main-configuration.yml b/roles/monitor/munin_server/tasks/configure/main-configuration.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8631b009f874daf69eeb6916233f294efa1547e5
--- /dev/null
+++ b/roles/monitor/munin_server/tasks/configure/main-configuration.yml
@@ -0,0 +1,26 @@
+---
+
+- name: copy munin_server configuration
+  notify: restart munin-server
+  ansible.builtin.template:
+    src: munin.conf.j2
+    dest: /etc/munin/munin.conf
+    mode: "644"
+
+# Force munin_server restart to avoid default localdomain graph creation after remove
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+# Remove default localdomain graph created after installation and before configuration
+- name: Check the existence of the localdomain folder
+  ansible.builtin.stat:
+    path: /var/cache/munin/www/localdomain
+  register: localdomain_dir
+
+- name: Remove default localdomain directory and files
+  ansible.builtin.command:
+    cmd: /bin/rm -r /var/cache/munin/www/localdomain
+    removes: /var/cache/munin/www/localdomain
+  when: localdomain_dir.stat.exists
+
+...
diff --git a/roles/monitor/munin_server/tasks/install.yml b/roles/monitor/munin_server/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b156f4c58e930fcb9edaaf22fc78c145d328bbfc
--- /dev/null
+++ b/roles/monitor/munin_server/tasks/install.yml
@@ -0,0 +1,13 @@
+---
+
+- name: install required packages for munin server
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    state: present
+    name: "{{ munin_server_packages }}"
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/monitor/munin_server/tasks/main.yml b/roles/monitor/munin_server/tasks/main.yml
index 371bbd1a4b79a35a24dd128255b2ebed29a7b388..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/monitor/munin_server/tasks/main.yml
+++ b/roles/monitor/munin_server/tasks/main.yml
@@ -1,29 +1,33 @@
 ---
 
-- name: install required packages for munin server
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    state: present
-    name: "{{ munin_server_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: copy munin_server configuration
-  notify: restart munin-server
-  ansible.builtin.template:
-    src: munin.conf.j2
-    dest: /etc/munin/munin.conf
-    mode: "644"
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
-# Force munin_server restart to avoid default localdomain graph creation after remove
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
-
-- name: remove default localdomain files
-  ansible.builtin.file:
-    path: /var/cache/munin/www/localdomain
-    state: absent
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/monitor/munin_server/templates/munin.conf.j2 b/roles/monitor/munin_server/templates/munin.conf.j2
index fb9659707262961f4aa3cd0b50ac08480015d416..a7d7dcce4aa82e4e8559141d722102dadafe1d21 100644
--- a/roles/monitor/munin_server/templates/munin.conf.j2
+++ b/roles/monitor/munin_server/templates/munin.conf.j2
@@ -4,7 +4,14 @@
 # (Exactly one) directory to include all files from.
 includedir /etc/munin/munin-conf.d
 
-{% if groups['munin_node'] is defined %}
+{% if munin_nodes %}
+{% for munin_node in munin_nodes %}
+[{{ munin_node.name }}.{{ ubicast_customer_name }}]
+    address {{ munin_node.ip }}
+    use_node_name yes
+
+{% endfor %}
+{% elif groups['munin_node'] is defined %}
 {% for host in groups['munin_node'] %}
 [{{ host }}.{{ ubicast_customer_name }}]
     address {{hostvars[host]['ansible_facts']['default_ipv4']['address']}}
diff --git a/roles/monitor/munin_server/vars/main.yml b/roles/monitor/munin_server/vars/main.yml
index 2b2d0be891482bc11e3fc506f1fbc6959f92cc9f..6c971875045f3c50288890353e08e2b4f9fb92cd 100644
--- a/roles/monitor/munin_server/vars/main.yml
+++ b/roles/monitor/munin_server/vars/main.yml
@@ -1,4 +1,7 @@
 ---
+
+# Packages required for the group
 munin_server_packages:
   - munin
+
 ...
diff --git a/roles/netcapture/README.md b/roles/netcapture/README.md
index 6b1b86d531bcd35285941868111165177fa13e0a..e220c492d330c02905e909463fafed2761f6fb29 100644
--- a/roles/netcapture/README.md
+++ b/roles/netcapture/README.md
@@ -1,7 +1,7 @@
 # Netcapture
 ## Description
 
-The netcapture group is used to configure the server which will host UbiCast virtual recorder instances.
+The netcapture group is used to configure the server which will host UbiCast virtual recorders instances.
 
 ## Role Variables
 
@@ -17,27 +17,7 @@ netcapture_miris_user_pwd: "changeme"
 netcapture_mm_url: "mirismanager.example.com"
 ```
 
-`netcapture_pkg_folder`: Folder used to store the packages (Optional)
-```
-netcapture_pkg_folder: "/data/netcapture/packages"
-```
-
-`netcapture_conf_folder`: Folder used to store the configurations (Optional)
-```
-netcapture_conf_folder: "/etc/miris/conf"
-```
-
-`netcapture_media_folder`: Folder used to store the medias (Optional)
-```
-netcapture_media_folder: "/data/netcapture/media"
-```
-
 `netcapture_mm_ssl`: Activates the SSL verification when calling the Nudgis Manager (Optional)
 ```
 netcapture_mm_ssl: True
 ```
-
-`netcapture_miris_auth`: Activates the authentication for the deployed netcapture miris API (Optional)
-```
-netcapture_miris_auth: True
-```
diff --git a/roles/netcapture/defaults/main.yml b/roles/netcapture/defaults/main.yml
index 1e2e031ce863629812af7ecbed0343b428a48e64..4ad3190fe9d7fa268d02851c2322c0fa6e8018d5 100644
--- a/roles/netcapture/defaults/main.yml
+++ b/roles/netcapture/defaults/main.yml
@@ -1,9 +1,12 @@
 ---
-netcapture_mm_url: "mirismanager.example.com"
-netcapture_mm_ssl: true
-netcapture_conf_folder: "/etc/miris/conf"
-netcapture_media_folder: "/data/netcapture/media"
-netcapture_pkg_folder: "/data/netcapture/packages"
+
+# Password of the deployed netcapture miris API
 netcapture_miris_user_pwd: "changeme"
-netcapture_miris_auth: true
+
+# URL of the mirismanager to use for packages
+netcapture_mm_url: "mirismanager.example.com"
+
+# Activates the SSL verification when calling the Nudgis Manager
+netcapture_mm_ssl: True
+
 ...
diff --git a/roles/netcapture/tasks/configure.yml b/roles/netcapture/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..81bf3a24cb27f8c75312b9e1d95a25bb405abee7
--- /dev/null
+++ b/roles/netcapture/tasks/configure.yml
@@ -0,0 +1,14 @@
+---
+
+- name: MAIN NETCAPTURE CONFIGURATION FILE
+  ansible.builtin.include_tasks: "configure/main-configure.yml"
+  when:
+    - netcapture_mm_url is defined
+    - netcapture_mm_ssl is defined
+
+- name: MIRIS API CONFIGURATION FILE
+  ansible.builtin.include_tasks: "configure/miris-configure.yml"
+  when:
+    - netcapture_miris_user_pwd is defined
+
+...
diff --git a/roles/netcapture/tasks/configure/main-configure.yml b/roles/netcapture/tasks/configure/main-configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e042eb39d8cf431a3165f8d37da634a388b2a9fe
--- /dev/null
+++ b/roles/netcapture/tasks/configure/main-configure.yml
@@ -0,0 +1,29 @@
+---
+
+- name: create netcapture config
+  ansible.builtin.template:
+    src: netcapture.json.j2
+    dest: /etc/miris/netcapture.json
+    mode: u=rw,g=r,o=r
+
+- name: netcapture config dir
+  ansible.builtin.file:
+    path: "{{ netcapture_conf_folder }}"
+    group: video
+    mode: u=rwX,g=rwX,o=r
+    state: directory
+
+- name: netcapture media dir
+  ansible.builtin.file:
+    path: "{{ netcapture_media_folder }}"
+    group: video
+    mode: u=rwX,g=rwX,o=rx
+    state: directory
+
+- name: netcapture package dir
+  ansible.builtin.file:
+    path: "{{ netcapture_pkg_folder }}"
+    mode: u=rwX,g=rwX,o=rx
+    state: directory
+
+...
diff --git a/roles/netcapture/tasks/configure/miris-configure.yml b/roles/netcapture/tasks/configure/miris-configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..11ef62791ab7997af59fb8d41691130d422f99f4
--- /dev/null
+++ b/roles/netcapture/tasks/configure/miris-configure.yml
@@ -0,0 +1,15 @@
+---
+
+- name: check if miris api config exists
+  ansible.builtin.stat:
+    path: "{{ netcapture_conf_folder }}/api.json"
+  register: miris_config_file
+
+- name: create netcapture miris api config
+  ansible.builtin.template:
+    src: miris-api.json.j2
+    dest: "{{ netcapture_conf_folder }}/api.json"
+    mode: u=rw,g=r,o=r
+  when: not miris_config_file.stat.exists
+
+...
diff --git a/roles/netcapture/tasks/install.yml b/roles/netcapture/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6eefafc63dd25b44bc2d6cd266d7e0646060be69
--- /dev/null
+++ b/roles/netcapture/tasks/install.yml
@@ -0,0 +1,12 @@
+---
+
+- name: netcapture install
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: ubicast-netcapture
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/netcapture/tasks/main.yml b/roles/netcapture/tasks/main.yml
index ccd365aa4c4549f8db96006f874ed86aacd1222a..4ca9b1ce59bf99a101c275a40afab2c0fc69e94a 100644
--- a/roles/netcapture/tasks/main.yml
+++ b/roles/netcapture/tasks/main.yml
@@ -1,53 +1,23 @@
 ---
 
-- name: netcapture install
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: ubicast-netcapture
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: create netcapture config
-  ansible.builtin.template:
-    src: netcapture.json.j2
-    dest: /etc/miris/netcapture.json
-    mode: u=rw,g=r,o=r
-
-- name: check if miris api config exists
-  ansible.builtin.stat:
-    path: "{{ netcapture_conf_folder }}/api.json"
-  register: miris_config_file
-
-- name: create netcapture miris api config
-  ansible.builtin.template:
-    src: miris-api.json.j2
-    dest: "{{ netcapture_conf_folder }}/api.json"
-    mode: u=rw,g=r,o=r
-  when: not miris_config_file.stat.exists
-
-- name: netcapture config dir
-  ansible.builtin.file:
-    path: "{{ netcapture_conf_folder }}"
-    group: video
-    mode: u=rwX,g=rwX,o=r
-    state: directory
-
-- name: netcapture media dir
-  ansible.builtin.file:
-    path: "{{ netcapture_media_folder }}"
-    group: video
-    mode: u=rwX,g=rwX,o=rx
-    state: directory
-
-- name: netcapture package dir
-  ansible.builtin.file:
-    path: "{{ netcapture_pkg_folder }}"
-    mode: u=rwX,g=rwX,o=rx
-    state: directory
-
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
+
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/netcapture/vars/main.yml b/roles/netcapture/vars/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..11c1cf67c8e391f49f6db23665e61a650093739c
--- /dev/null
+++ b/roles/netcapture/vars/main.yml
@@ -0,0 +1,15 @@
+---
+
+# Folder used to store the packages
+netcapture_pkg_folder: "/data/netcapture/packages"
+
+# Folder used to store the configurations
+netcapture_conf_folder: "/etc/miris/conf"
+
+# Folder used to store the medias
+netcapture_media_folder: "/data/netcapture/media"
+
+# Activates the authentication for the deployed netcapture miris API
+netcapture_miris_auth: True
+
+...
diff --git a/roles/nftables/README.md b/roles/nftables/README.md
index 01e37593be4048ddc1ea7c2c9e262e3bf4d3cf96..8632dd85f1657644a2df3f82c6612caeba1fbdc4 100644
--- a/roles/nftables/README.md
+++ b/roles/nftables/README.md
@@ -1,5 +1,4 @@
-# Firewall
+# Nftables
 ## Description
 
-The firewall group handles the installation and configuration of nftables. Using this group also triggers the deployment of the UbiCast applications firewall rules.  
- To install and activate the firewall, you should only have to declare it as a meta group in the inventory with the target hosts/groups as childrens.
+Install and configure the nftables firewall
diff --git a/roles/nftables/vars/main.yml b/roles/nftables/vars/main.yml
index 78e855cf0ecbffbf585e16f2ae42520021b1bc0f..9fb65ce4073771d62de3350243566bf99bb52347 100644
--- a/roles/nftables/vars/main.yml
+++ b/roles/nftables/vars/main.yml
@@ -1,6 +1,7 @@
 ---
 
+# Packages required for the group
 nftables_packages:
-  - "nftables"
+  - nftables
 
 ...
diff --git a/roles/nginx/README.md b/roles/nginx/README.md
index dc25b471746f79c36769d16e7650b8ce89357538..cc07005973f3157f3840a05d8c198b0559bebd38 100644
--- a/roles/nginx/README.md
+++ b/roles/nginx/README.md
@@ -1,20 +1,4 @@
 # Nginx
+## Description
 
-## Role Variables
-
-Available variables are listed below, along with the descriptions and the default values.
-
-`nginx_ssl_certificate`: Path of the SSL certificate for nginx configuration (Optional)
-```
-nginx_ssl_certificate: "/etc/ssl/certs/ssl-cert-snakeoil.pem"
-```
-
-`nginx_ssl_certificate_key`: Path of the SSL key for nginx configuration (Optional)
-```
-nginx_ssl_certificate_key: "/etc/ssl/private/ssl-cert-snakeoil.key"
-```
-
-`nginx_real_ip_from`: IPv4 address of the reverse-proxy or loadbalancer above the server(s) (Optional)
-```
-nginx_real_ip_from: ""
-```
+Used by roles needing the nginx webserver for nginx installation/configuration
diff --git a/roles/nginx/defaults/main.yml b/roles/nginx/defaults/main.yml
deleted file mode 100644
index 6901a16c2ac6c09e0b128902b2befccf05399fc9..0000000000000000000000000000000000000000
--- a/roles/nginx/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-nginx_ssl_certificate: /etc/ssl/certs/ssl-cert-snakeoil.pem
-nginx_ssl_certificate_key: /etc/ssl/private/ssl-cert-snakeoil.key
-nginx_real_ip_from: ''
-...
diff --git a/roles/nginx/tasks/base.yml b/roles/nginx/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a8d6adab1d831214277beb4d5c270aa195ace4b3
--- /dev/null
+++ b/roles/nginx/tasks/base.yml
@@ -0,0 +1,27 @@
+---
+
+- name: nginx remove default vhost
+  notify: restart nginx
+  loop:
+    - /etc/nginx/sites-enabled/default
+    - /etc/nginx/sites-enabled/default.conf
+  ansible.builtin.file:
+    path: "{{ item }}"
+    state: absent
+
+# deploy self-signed (snakeoil certificates)
+- name: nginx update ssl certificate conf
+  notify: restart nginx
+  ansible.builtin.lineinfile:
+    path: /etc/nginx/conf.d/ssl_certificate.conf
+    regexp: ssl_certificate\s+([\w/\-\_\.]+);
+    line: ssl_certificate {{ nginx_ssl_certificate }};
+
+- name: nginx update ssl certificate key conf
+  notify: restart nginx
+  ansible.builtin.lineinfile:
+    path: /etc/nginx/conf.d/ssl_certificate.conf
+    regexp: ssl_certificate_key\s+([\w/\-\_\.]+);
+    line: ssl_certificate_key {{ nginx_ssl_certificate_key }};
+
+...
diff --git a/roles/nginx/tasks/install.yml b/roles/nginx/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f0125cb767bd270506fba9d0b92ef29e8e6d2b85
--- /dev/null
+++ b/roles/nginx/tasks/install.yml
@@ -0,0 +1,13 @@
+---
+
+- name: nginx install
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ nginx_packages }}"
+    state: present
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/nginx/tasks/main.yml b/roles/nginx/tasks/main.yml
index ae07f894b67f26a08d883c2274cad4c6e7675a57..5489c96f5fdafb340fd2c1d566cb768977d6e6f9 100644
--- a/roles/nginx/tasks/main.yml
+++ b/roles/nginx/tasks/main.yml
@@ -1,71 +1,23 @@
 ---
 
-- name: nginx install
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ nginx_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: nginx remove default vhost
-  notify: restart nginx
-  loop:
-    - /etc/nginx/sites-enabled/default
-    - /etc/nginx/sites-enabled/default.conf
-  ansible.builtin.file:
-    path: "{{ item }}"
-    state: absent
-
-# NOTE: /etc/nginx/conf.d/ssl.conf does not exist after current nginx package installation
-# - name: nginx check old ssl conf exists
-#   register: nginx_old_ssl_conf
-#   ansible.builtin.stat:
-#     path: /etc/nginx/conf.d/ssl.conf
-#
-# - name: nginx migrate old ssl certificate conf
-#   when: nginx_old_ssl_conf.stat.exists
-#   notify: restart nginx
-#   loop:
-#     - grep ssl_certificate /etc/nginx/conf.d/ssl.conf > /etc/nginx/conf.d/ssl_certificate.conf
-#     - mv /etc/nginx/conf.d/ssl.conf /etc/nginx/conf.d/ssl.conf.old
-#   ansible.builtin.command:
-#     cmd: "{{ item }}"
-
-- name: nginx check ssl cert conf exists
-  register: nginx_ssl_cert_conf
-  ansible.builtin.stat:
-    path: /etc/nginx/conf.d/ssl_certificate.conf
-
-- name: nginx update ssl certificate conf
-  when:
-    - nginx_ssl_cert_conf.stat.exists
-    - nginx_ssl_certificate != "/etc/ssl/certs/ssl-cert-snakeoil.pem"
-  notify: restart nginx
-  ansible.builtin.lineinfile:
-    path: /etc/nginx/conf.d/ssl_certificate.conf
-    regexp: ssl_certificate\s+([\w/\-\_\.]+);
-    line: ssl_certificate {{ nginx_ssl_certificate }};
-
-- name: nginx update ssl certificate key conf
-  when:
-    - nginx_ssl_cert_conf.stat.exists
-    - nginx_ssl_certificate_key != "/etc/ssl/private/ssl-cert-snakeoil.key"
-  notify: restart nginx
-  ansible.builtin.lineinfile:
-    path: /etc/nginx/conf.d/ssl_certificate.conf
-    regexp: ssl_certificate_key\s+([\w/\-\_\.]+);
-    line: ssl_certificate_key {{ nginx_ssl_certificate_key }};
-
-- name: add realip configuration (for LoadBalancer in HA configuration)
-  notify: restart nginx
-  when:
-    - nginx_real_ip_from | length > 0
-  ansible.builtin.template:
-    src: realip.conf.j2
-    dest: /etc/nginx/conf.d/realip.conf
-    mode: "644"
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
+
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
 ...
diff --git a/roles/nginx/vars/main.yml b/roles/nginx/vars/main.yml
index 98f55a38c7b1bc2dc5e22a15cbe7d67caee24904..8324cb9b3dacacdf611e711e537d0c3d7d3b4895 100644
--- a/roles/nginx/vars/main.yml
+++ b/roles/nginx/vars/main.yml
@@ -1,6 +1,16 @@
 ---
+
+# Packages required for the group. ssl-cert is used for Debian SSL snakeoil certificates
 nginx_packages:
   - nginx
   - uwsgi
   - uwsgi-plugin-python3
+  - ssl-cert
+
+# Path of the SSL certificate for nginx configuration
+nginx_ssl_certificate: "/etc/ssl/certs/ssl-cert-snakeoil.pem"
+
+# Path of the SSL key for nginx configuration
+nginx_ssl_certificate_key: "/etc/ssl/private/ssl-cert-snakeoil.key"
+
 ...
diff --git a/roles/postfix/defaults/main.yml b/roles/postfix/defaults/main.yml
index 2eea927cc93830d932844b9d3b9f42baa658acec..17140f92d6bad7db299bcfc42133171bd2ad7e2c 100644
--- a/roles/postfix/defaults/main.yml
+++ b/roles/postfix/defaults/main.yml
@@ -1,8 +1,21 @@
 ---
+
+# Define the specified email address for the unix root account (in /etc/aliases)
+postfix_admin: "admin@example.com"
+
+# Default sender domain, used to complete both postfix configuration and the /etc/mailname content
 postfix_mailname: "domain.example.com"
+
+# Email address used by postfix to send emails
 postfix_email_from: "noreply@{{ postfix_mailname }}"
+
+# SMTP relay host
 postfix_relay_host: ""
-postfix_relay_user: ""
+
+# User of the SMTP SASL account
 postfix_relay_pass: ""
-postfix_admin: "admin@example.com"
+
+# Password of the SMTP SASL account
+postfix_relay_user: ""
+
 ...
diff --git a/roles/postfix/handlers/main.yml b/roles/postfix/handlers/main.yml
index ad545a82dbe4721bb95fa6f68af77918fb5c2479..1100eb60dee9bcd12f28735956c3b616226c2a8f 100644
--- a/roles/postfix/handlers/main.yml
+++ b/roles/postfix/handlers/main.yml
@@ -2,22 +2,18 @@
 
 - name: postmap sasl
   ansible.builtin.command: postmap hash:/etc/postfix/sasl-passwords
-  # we want to generate db file at every execution
   changed_when: false
 
 - name: postmap generic
   ansible.builtin.command: postmap hash:/etc/postfix/generic
-  # we want to generate db file at every execution
   changed_when: false
 
 - name: postmap virtual
   ansible.builtin.command: postmap hash:/etc/postfix/virtual
-  # we want to generate db file at every execution
   changed_when: false
 
-- name: newaliases
+- name: run newaliases
   ansible.builtin.command: newaliases
-  # we want to generate aliases at every execution
   changed_when: false
 
 - name: restart postfix
diff --git a/roles/postfix/tasks/configure.yml b/roles/postfix/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c52cd37d3be66d71ba34dffdc30ec25b9165722a
--- /dev/null
+++ b/roles/postfix/tasks/configure.yml
@@ -0,0 +1,38 @@
+---
+
+- name: Populate postfix main configuration
+  ansible.builtin.include_tasks: "configure/postfix_main.yml"
+  when:
+    - postfix_mailname is defined
+    - postfix_relay_host is defined
+    - postfix_relay_user is defined
+
+- name: Populate /etc/mailname file
+  ansible.builtin.include_tasks: "configure/mailname.yml"
+  when:
+    - postfix_mailname is defined
+
+- name: Populate /etc/aliases file
+  ansible.builtin.include_tasks: "configure/aliases.yml"
+  when:
+    - postfix_admin is defined
+
+- name: Populate postfix virtual aliases
+  ansible.builtin.include_tasks: "configure/postfix_virtual.yml"
+  when:
+    - postfix_mailname is defined
+
+- name: Populate postfix generic
+  ansible.builtin.include_tasks: "configure/postfix_generic.yml"
+  when:
+    - postfix_mailname is defined
+    - postfix_email_from is defined
+
+- name: Populate postfix authentication file
+  ansible.builtin.include_tasks: "configure/postfix_authentication.yml"
+  when:
+    - postfix_relay_host is defined
+    - postfix_relay_user is defined
+    - postfix_relay_pass is defined
+
+...
diff --git a/roles/postfix/tasks/configure/aliases.yml b/roles/postfix/tasks/configure/aliases.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f96fe13cf0ab057694cd00583b82513616b77532
--- /dev/null
+++ b/roles/postfix/tasks/configure/aliases.yml
@@ -0,0 +1,11 @@
+---
+
+- name: mailname configuration
+  notify: run newaliases
+  ansible.builtin.template:
+    backup: true
+    src: aliases.j2
+    dest: /etc/aliases
+    mode: "644"
+
+...
diff --git a/roles/postfix/tasks/configure/mailname.yml b/roles/postfix/tasks/configure/mailname.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3306141e40edb8d7dfe7f7ff1d492a24b10c30cd
--- /dev/null
+++ b/roles/postfix/tasks/configure/mailname.yml
@@ -0,0 +1,11 @@
+---
+
+- name: mailname configuration
+  notify: restart postfix
+  ansible.builtin.template:
+    backup: true
+    src: mailname.j2
+    dest: /etc/mailname
+    mode: "644"
+
+...
diff --git a/roles/postfix/tasks/configure/postfix_authentication.yml b/roles/postfix/tasks/configure/postfix_authentication.yml
new file mode 100644
index 0000000000000000000000000000000000000000..80850c13a79061271d83e6686174276eaec319ec
--- /dev/null
+++ b/roles/postfix/tasks/configure/postfix_authentication.yml
@@ -0,0 +1,11 @@
+---
+
+- name: postfix virtual aliases
+  notify: postmap sasl
+  ansible.builtin.template:
+    backup: true
+    src: sasl-passwords.j2
+    dest: /etc/postfix/sasl-passwords
+    mode: "644"
+
+...
diff --git a/roles/postfix/tasks/configure/postfix_generic.yml b/roles/postfix/tasks/configure/postfix_generic.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1fd0e84e75baaf3d635afe9b1703ecaa8f15e8ba
--- /dev/null
+++ b/roles/postfix/tasks/configure/postfix_generic.yml
@@ -0,0 +1,15 @@
+---
+
+- name: gather hostname
+  ansible.builtin.setup:
+    filter: ansible_hostname
+
+- name: postfix generic
+  notify: postmap generic
+  ansible.builtin.template:
+    backup: true
+    src: generic.j2
+    dest: /etc/postfix/generic
+    mode: "644"
+
+...
diff --git a/roles/postfix/tasks/configure/postfix_main.yml b/roles/postfix/tasks/configure/postfix_main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a600f78877cf1db0cbe43ebb1ba1ce64dceb8528
--- /dev/null
+++ b/roles/postfix/tasks/configure/postfix_main.yml
@@ -0,0 +1,15 @@
+---
+
+- name: gather hostname
+  ansible.builtin.setup:
+    filter: ansible_hostname
+
+- name: postfix main config
+  notify: restart postfix
+  ansible.builtin.template:
+    backup: true
+    src: main.cf.j2
+    dest: /etc/postfix/main.cf
+    mode: "644"
+
+...
diff --git a/roles/postfix/tasks/configure/postfix_virtual.yml b/roles/postfix/tasks/configure/postfix_virtual.yml
new file mode 100644
index 0000000000000000000000000000000000000000..688522c93787839653e56bd5e911cfbf47925575
--- /dev/null
+++ b/roles/postfix/tasks/configure/postfix_virtual.yml
@@ -0,0 +1,11 @@
+---
+
+- name: postfix virtual aliases
+  notify: postmap virtual
+  ansible.builtin.template:
+    backup: true
+    src: virtual.j2
+    dest: /etc/postfix/virtual
+    mode: "644"
+
+...
diff --git a/roles/postfix/tasks/install.yml b/roles/postfix/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9fabafc664e32db8f7f5b964344dab4800477726
--- /dev/null
+++ b/roles/postfix/tasks/install.yml
@@ -0,0 +1,28 @@
+---
+
+- name: remove postfix conflicting packages
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name:
+      - exim4
+      - exim4-base
+      - exim4-config
+      - exim4-daemon-light
+    state: absent
+    purge: true
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+- name: install postfix
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ postfix_packages }}"
+    state: present
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/postfix/tasks/main.yml b/roles/postfix/tasks/main.yml
index a9fc2da1403d6ee3c3195a38d91d5718a54b272b..4ca9b1ce59bf99a101c275a40afab2c0fc69e94a 100644
--- a/roles/postfix/tasks/main.yml
+++ b/roles/postfix/tasks/main.yml
@@ -1,96 +1,23 @@
 ---
 
-- name: create postfix dir
-  ansible.builtin.file:
-    path: /etc/postfix
-    state: directory
-    mode: "755"
-
-- name: postfix main config
-  notify: restart postfix
-  ansible.builtin.template:
-    backup: true
-    src: main.cf.j2
-    dest: /etc/postfix/main.cf
-    mode: "644"
-
-- name: postfix mailname
-  notify: restart postfix
-  ansible.builtin.copy:
-    backup: true
-    dest: /etc/mailname
-    content: "{{ postfix_mailname }}"
-    mode: "644"
-
-- name: postfix local aliases
-  notify:
-    - newaliases
-    - restart postfix
-  ansible.builtin.copy:
-    backup: true
-    dest: /etc/aliases
-    mode: "644"
-    content: |
-      devnull: /dev/null
-      clamav: root
-      root: {{ postfix_admin }}
-
-- name: postfix virtual aliases
-  notify:
-    - postmap virtual
-    - restart postfix
-  ansible.builtin.copy:
-    backup: true
-    dest: /etc/postfix/virtual
-    mode: "644"
-    content: |
-      postmaster@{{ postfix_mailname }} root
-      bounces@{{ postfix_mailname }} root
-      noreply@{{ postfix_mailname }} devnull
-
-- name: postfix generic aliases, sender rewriting
-  notify:
-    - postmap generic
-    - restart postfix
-  ansible.builtin.copy:
-    backup: true
-    dest: /etc/postfix/generic
-    mode: "644"
-    content: |
-      root@localhost {{ postfix_email_from }}
-      root@{{ postfix_mailname }} {{ postfix_email_from }}
-      root@{{ ansible_hostname }} {{ postfix_email_from }}
-      @{{ postfix_mailname }} {{ postfix_email_from }}
-      @{{ ansible_hostname }} {{ postfix_email_from }}
-
-- name: postfix authentication
-  when:
-    - postfix_relay_host | d(false)
-    - postfix_relay_user | d(false)
-    - postfix_relay_pass | d(false)
-  notify:
-    - postmap sasl
-    - restart postfix
-  ansible.builtin.copy:
-    backup: true
-    dest: /etc/postfix/sasl-passwords
-    mode: "644"
-    content: "{{ postfix_relay_host }} {{ postfix_relay_user }}:{{ postfix_relay_pass }}"
-
-- name: install postfix
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ postfix_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: ensure postfix is running
-  ansible.builtin.service:
-    name: postfix
-    enabled: true
-    state: started
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
+
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/postfix/templates/aliases.j2 b/roles/postfix/templates/aliases.j2
new file mode 100644
index 0000000000000000000000000000000000000000..64c08014ffe930fcc76ef328ce5afac19aeebdeb
--- /dev/null
+++ b/roles/postfix/templates/aliases.j2
@@ -0,0 +1,3 @@
+devnull: /dev/null
+clamav: root
+root: {{ postfix_admin }}
diff --git a/roles/postfix/templates/generic.j2 b/roles/postfix/templates/generic.j2
new file mode 100644
index 0000000000000000000000000000000000000000..cd494c652a6495d70e7f838bcade5dd520339844
--- /dev/null
+++ b/roles/postfix/templates/generic.j2
@@ -0,0 +1,5 @@
+root@localhost {{ postfix_email_from }}
+root@{{ postfix_mailname }} {{ postfix_email_from }}
+root@{{ ansible_hostname }} {{ postfix_email_from }}
+@{{ postfix_mailname }} {{ postfix_email_from }}
+@{{ ansible_hostname }} {{ postfix_email_from }}
diff --git a/roles/postfix/templates/mailname.j2 b/roles/postfix/templates/mailname.j2
new file mode 100644
index 0000000000000000000000000000000000000000..422f887bd470508d9d44c5137ccd5a76c9100ce7
--- /dev/null
+++ b/roles/postfix/templates/mailname.j2
@@ -0,0 +1 @@
+{{ postfix_mailname }}
diff --git a/roles/postfix/templates/sasl-passwords.j2 b/roles/postfix/templates/sasl-passwords.j2
new file mode 100644
index 0000000000000000000000000000000000000000..a0883b30bea20b8ed56cd24fc2cae0122105d93c
--- /dev/null
+++ b/roles/postfix/templates/sasl-passwords.j2
@@ -0,0 +1 @@
+{{ postfix_relay_host }} {{ postfix_relay_user }}:{{ postfix_relay_pass }}
diff --git a/roles/postfix/templates/virtual.j2 b/roles/postfix/templates/virtual.j2
new file mode 100644
index 0000000000000000000000000000000000000000..e811d9f6b0b94a98607ef67e2bd0f9493fb06b14
--- /dev/null
+++ b/roles/postfix/templates/virtual.j2
@@ -0,0 +1,3 @@
+postmaster@{{ postfix_mailname }} root
+bounces@{{ postfix_mailname }} root
+noreply@{{ postfix_mailname }} devnull
diff --git a/roles/postfix/vars/main.yml b/roles/postfix/vars/main.yml
index e78b6b719ce37cb01bd070fd92c22cef81b80477..be218ae74bb0bc241c75861739becd72edc20179 100644
--- a/roles/postfix/vars/main.yml
+++ b/roles/postfix/vars/main.yml
@@ -1,5 +1,8 @@
 ---
+
+# Packages required for the group
 postfix_packages:
   - postfix
   - bsd-mailx
+
 ...
diff --git a/roles/postgres-ha/README.md b/roles/postgres-ha/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..617b962c0da9318c6d10f48ebbf6e6e00f41d87b
--- /dev/null
+++ b/roles/postgres-ha/README.md
@@ -0,0 +1,13 @@
+# Postgres-ha
+## Description
+
+Override the postgres role with High availibility elements
+
+## Role Variables
+
+Available variables are listed below, along with the descriptions and the default values.
+
+`repmgr_password`: [HA only] Password of the repmgr DB user (Optional)
+```
+repmgr_password: ""
+```
diff --git a/roles/postgres-ha/defaults/main.yml b/roles/postgres-ha/defaults/main.yml
index 25b077d3263b7590773f13c5f69a015b62e5ae73..7a525979b237e50be8cf243989a58adb4d02054d 100644
--- a/roles/postgres-ha/defaults/main.yml
+++ b/roles/postgres-ha/defaults/main.yml
@@ -1,9 +1,6 @@
 ---
-repmgr_database: repmgr
-repmgr_user: repmgr
-repmgr_password:
-repmgr_roles: LOGIN,REPLICATION,SUPERUSER
-repmgr_timeout: 5
-repmgr_repha_port: 8543
-repmgr_conninfo: host={{ ansible_default_ipv4.address }} dbname={{ repmgr_database }} user={{ repmgr_user }} connect_timeout={{ repmgr_timeout }}
+
+# [HA only] Password of the repmgr DB user
+repmgr_password: ""
+
 ...
diff --git a/roles/postgres-ha/tasks/base.yml b/roles/postgres-ha/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6a88968d5ff72c79544f754b6ee763434ed873bc
--- /dev/null
+++ b/roles/postgres-ha/tasks/base.yml
@@ -0,0 +1,18 @@
+---
+
+- name: BASE CONFIGURE REPGMR
+  ansible.builtin.include_tasks: "base/repmgr.yml"
+
+- name: BASE CONFIGURE SWAPPINESS
+  ansible.builtin.include_tasks: "base/swappiness.yml"
+
+- name: BASE CONFIGURE REPHACHECK
+  ansible.builtin.include_tasks: "base/rephacheck.yml"
+
+- name: BASE CONFIGURE FIREWALL RULES
+  import_tasks: ../../shared/tasks/firewall_rules_files.yml
+
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+...
diff --git a/roles/postgres-ha/tasks/base/rephacheck.yml b/roles/postgres-ha/tasks/base/rephacheck.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7a54dad0f7ad240c7d4bc148fed76c74b40aa653
--- /dev/null
+++ b/roles/postgres-ha/tasks/base/rephacheck.yml
@@ -0,0 +1,63 @@
+---
+
+- name: install rephacheck
+  ansible.builtin.template:
+    src: rephacheck.py.j2
+    dest: /usr/bin/rephacheck
+    mode: "0755"
+
+- name: register variables needed by rephacheck as facts
+  ansible.builtin.set_fact:
+    repmgr_node_name: "{{ repmgr_node_name }}"
+    repmgr_node_id: "{{ repmgr_node_id }}"
+
+- name: configure rephacheck
+  ansible.builtin.template:
+    src: rephacheck.conf.j2
+    dest: /etc/postgresql/{{ repmgr_database_version }}/{{ repmgr_database_cluster }}/rephacheck.conf
+    owner: postgres
+    group: postgres
+    mode: "0644"
+
+- name: configure rephacheck socket
+  notify:
+    - reload systemd
+    - restart rephacheck
+  ansible.builtin.copy:
+    dest: /etc/systemd/system/rephacheck.socket
+    mode: "644"
+    content: |
+      [Unit]
+      Description=RepHACheck socket
+
+      [Socket]
+      ListenStream={{ repmgr_repha_port }}
+      Accept=yes
+
+      [Install]
+      WantedBy=sockets.target
+
+- name: configure rephacheck service
+  notify:
+    - reload systemd
+    - restart rephacheck
+  ansible.builtin.copy:
+    dest: /etc/systemd/system/rephacheck@.service
+    mode: "644"
+    content: |
+      [Unit]
+      Description=RepHACheck - Health check for PostgreSQL cluster managed by repmgr
+
+      [Service]
+      ExecStart=-/usr/bin/rephacheck
+      StandardInput=socket
+      User=postgres
+      Group=postgres
+
+- name: enable and start rephacheck
+  ansible.builtin.service:
+    name: rephacheck.socket
+    state: started
+    enabled: true
+
+...
diff --git a/roles/postgres-ha/tasks/base/repmgr.yml b/roles/postgres-ha/tasks/base/repmgr.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5902c272247d81ea1b61a6aea299800048684c84
--- /dev/null
+++ b/roles/postgres-ha/tasks/base/repmgr.yml
@@ -0,0 +1,37 @@
+---
+
+- name: configure repmgr
+  notify: restart repmgrd
+  ansible.builtin.template:
+    src: repmgr.conf.j2
+    dest: "{{ repmgr_conf_file }}"
+    owner: postgres
+    group: postgres
+    mode: "644"
+
+- name: configure debian default
+  notify: restart repmgrd
+  loop:
+    - key: REPMGRD_ENABLED
+      value: "yes"
+    - key: REPMGRD_CONF
+      value: "{{ repmgr_conf_file }}"
+  ansible.builtin.replace:
+    path: /etc/default/repmgrd
+    regexp: ^#?{{ item.key }}=.*$
+    replace: "{{ item.key }}={{ item.value }}"
+
+- name: configure sudo
+  ansible.builtin.copy:
+    dest: /etc/sudoers.d/postgres
+    validate: visudo -cf %s
+    mode: "440"
+    content: |
+      Defaults:postgres !requiretty
+      postgres ALL=NOPASSWD: \
+        /bin/systemctl start postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}, \
+        /bin/systemctl stop postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}, \
+        /bin/systemctl restart postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}, \
+        /bin/systemctl reload postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}
+
+...
diff --git a/roles/postgres-ha/tasks/base/swappiness.yml b/roles/postgres-ha/tasks/base/swappiness.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c163eda5f860cee8259f6b939d768a54d52258fc
--- /dev/null
+++ b/roles/postgres-ha/tasks/base/swappiness.yml
@@ -0,0 +1,21 @@
+---
+
+- name: configure swappiness
+  ansible.builtin.copy:
+    dest: /etc/sysctl.d/40-swappiness.conf
+    owner: "root"
+    group: "root"
+    mode: "644"
+    content: |
+      # UbiCast Swappiness settings configuration for PostgreSQL on Linux
+
+      # Parameter that controls the relative weight given to swapping out of runtime memory,
+      # as opposed to dropping pages from the system page cache.
+      # A low value causes the kernel to prefer to evict pages from the page cache while
+      # a higher value causes the kernel to prefer to swap out "cold" memory pages.
+      # Swappiness can be set to a value from 0 to 200. Default is 60.
+
+      # See RM#38809 and https://dali.bo/j1_html#configuration-du-swap
+      vm.swappiness = 10
+
+...
diff --git a/roles/postgres-ha/tasks/configure.yml b/roles/postgres-ha/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0a10554bfab73bc4a600d4c77fc9962ec1c58c8e
--- /dev/null
+++ b/roles/postgres-ha/tasks/configure.yml
@@ -0,0 +1,21 @@
+---
+
+- name: CONFIGURE POSTGRESQL-HA
+  ansible.builtin.include_tasks: "configure/postgres-role.yml"
+
+- name: CONFIGURE SSH BETWEEN SERVERS
+  ansible.builtin.include_tasks: "configure/ssh-key.yml"
+
+- name: CONFIGURE POSTGRESQL PRIMARY NODE
+  ansible.builtin.include_tasks: "configure/register-primary.yml"
+
+- name: CONFIGURE POSTGRESQL STANDBY NODE
+  ansible.builtin.include_tasks: "configure/register-standby.yml"
+
+- name: CONFIGURE POSTGRESQL WITNESS NODE
+  ansible.builtin.include_tasks: "configure/register-witness.yml"
+
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+...
diff --git a/roles/postgres-ha/tasks/configure/postgres-role.yml b/roles/postgres-ha/tasks/configure/postgres-role.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0f74513bdf3a15647e63e0807ca4bfb6097b82b3
--- /dev/null
+++ b/roles/postgres-ha/tasks/configure/postgres-role.yml
@@ -0,0 +1,48 @@
+---
+
+- name: postgresql
+  vars:
+    database_host_authentification:
+      - type: local
+        method: peer
+      - type: hostssl
+        address: 127.0.0.1/32
+      - type: hostssl
+        address: ::1/128
+      - type: hostssl
+        address: 0.0.0.0/0
+      - type: hostssl
+        address: ::/0
+      - type: local
+        database: replication
+        method: peer
+      - type: hostssl
+        database: replication
+        address: 127.0.0.1/32
+      - type: hostssl
+        database: replication
+        address: ::1/128
+      - type: hostssl
+        database: replication
+        address: 0.0.0.0/0
+      - type: hostssl
+        database: replication
+        address: ::/0
+    database_conf:
+      - name: main
+        content: |
+          listen_addresses = '*'
+      - name: modules
+        content: |
+          shared_preload_libraries = 'repmgr'
+    database_users:
+      - name: "{{ repmgr_user }}"
+        password: "{{ repmgr_password }}"
+        roles: "{{ repmgr_roles }}"
+    database_databases:
+      - name: "{{ repmgr_database }}"
+        owner: "{{ repmgr_user }}"
+  ansible.builtin.include_role:
+    name: postgres
+
+...
diff --git a/roles/postgres-ha/tasks/configure/register-primary.yml b/roles/postgres-ha/tasks/configure/register-primary.yml
new file mode 100644
index 0000000000000000000000000000000000000000..88e4a499df7988c7b6404296a671e3949b643ec4
--- /dev/null
+++ b/roles/postgres-ha/tasks/configure/register-primary.yml
@@ -0,0 +1,28 @@
+---
+
+- name: setup primary
+  when: >
+    (database_role is defined and database_role == "primary") or
+    (database_role is undefined and inventory_hostname == groups['postgres'][0]) or
+    (database_role is defined and database_role == "" and inventory_hostname == groups['postgres'][0])
+  block:
+    - name: check if primary already joined
+      become: true
+      become_user: postgres
+      register: repmgr_check_primary
+      community.general.postgresql_query:
+        db: repmgr
+        query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
+
+    - name: register primary
+      become: true
+      become_user: postgres
+      changed_when: true
+      when: repmgr_check_primary.query_result | length == 0
+      notify: restart repmgrd
+      ansible.builtin.command:
+        cmd: repmgr --config-file={{ repmgr_conf_file }} primary register
+
+- ansible.builtin.meta: flush_handlers # noqa name[missing]
+
+...
diff --git a/roles/postgres-ha/tasks/configure/register-standby.yml b/roles/postgres-ha/tasks/configure/register-standby.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1c1a7ec03b673be5ea2c148daf187a2c508592e3
--- /dev/null
+++ b/roles/postgres-ha/tasks/configure/register-standby.yml
@@ -0,0 +1,87 @@
+---
+
+- name: setup standby
+  when: >
+    (database_role is defined and database_role == "standby") or
+    (database_role is undefined and inventory_hostname == groups['postgres'][1]) or
+    (database_role is defined and database_role == "" and inventory_hostname == groups['postgres'][1])
+  block:
+    - name: check if standby already joined
+      become: true
+      become_user: postgres
+      register: repmgr_check_standby
+      community.general.postgresql_query:
+        db: repmgr
+        query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
+
+    - name: stop postgresql service
+      when: repmgr_check_standby.query_result | length == 0
+      ansible.builtin.systemd:
+        name: postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}
+        state: stopped
+
+    - name: remove existing pgdata
+      when: repmgr_check_standby.query_result | length == 0
+      ansible.builtin.command:
+        cmd: mv -vf {{ repmgr_database_data_dir }} {{ repmgr_database_data_dir }}.save
+        removes: "{{ repmgr_database_data_dir }}"
+
+    - name: clone from primary to standby
+      become: true
+      become_user: postgres
+      changed_when: true
+      when: repmgr_check_standby.query_result | length == 0
+      ignore_errors: true
+      register: repmgr_clone_standby
+      ansible.builtin.shell:
+        cmd: |
+          repmgr \
+            --config-file={{ repmgr_conf_file }} \
+            --force \
+            --dbname={{ repmgr_database }} \
+            --host={{ repmgr_primary_node }} \
+            --port=5432 \
+            --username={{ repmgr_user }} \
+            --pgdata={{ repmgr_database_data_dir }} \
+            standby clone --fast-checkpoint
+
+    - name: remove pgdata backup
+      when: repmgr_clone_standby is succeeded
+      ansible.builtin.file:
+        path: "{{ repmgr_database_data_dir }}.save"
+        state: absent
+
+    - name: remove failed clone pgdata
+      when: repmgr_clone_standby is failed
+      ansible.builtin.file:
+        path: "{{ repmgr_database_data_dir }}"
+        state: absent
+
+    - name: restore pgdata backup
+      when: repmgr_clone_standby is failed
+      ansible.builtin.command:
+        cmd: mv -vf {{ repmgr_database_data_dir }}.save {{ repmgr_database_data_dir }}
+        removes: "{{ repmgr_database_data_dir }}.save"
+
+    - name: start postgresql service
+      ansible.builtin.systemd:
+        name: postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}
+        state: started
+
+    - name: standby clone failed
+      when: repmgr_clone_standby is failed
+      ansible.builtin.fail:
+        msg: "{{ repmgr_clone_standby.stderr }}"
+
+    - name: register standby
+      become: true
+      become_user: postgres
+      changed_when: true
+      when: repmgr_check_standby.query_result | length == 0
+      notify: restart repmgrd
+      ansible.builtin.command:
+        cmd: repmgr --config-file={{ repmgr_conf_file }} standby register
+
+- ansible.builtin.meta: flush_handlers # noqa name[missing]
+
+...
diff --git a/roles/postgres-ha/tasks/configure/register-witness.yml b/roles/postgres-ha/tasks/configure/register-witness.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c64039f287c3b05d5e60d094af86cb82510ffc20
--- /dev/null
+++ b/roles/postgres-ha/tasks/configure/register-witness.yml
@@ -0,0 +1,28 @@
+---
+
+- name: setup witness
+  when: >
+    (database_role is defined and database_role == "witness") or
+    (database_role is undefined and inventory_hostname == groups['postgres'][2]) or
+    (database_role is defined and database_role == "" and inventory_hostname == groups['postgres'][2])
+  block:
+    - name: check if witness already joined
+      become: true
+      become_user: postgres
+      register: repmgr_check_witness
+      community.general.postgresql_query:
+        db: repmgr
+        query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
+
+    - name: register witness
+      become: true
+      become_user: postgres
+      changed_when: true
+      when: repmgr_check_witness.query_result | length == 0
+      notify: restart repmgrd
+      ansible.builtin.command:
+        cmd: repmgr --config-file={{ repmgr_conf_file }} --host={{ repmgr_primary_node }} witness register
+
+- ansible.builtin.meta: flush_handlers # noqa name[missing]
+
+...
diff --git a/roles/postgres-ha/tasks/configure/ssh-key.yml b/roles/postgres-ha/tasks/configure/ssh-key.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6236354b6942aaad7f14c0961722fd69ace5298d
--- /dev/null
+++ b/roles/postgres-ha/tasks/configure/ssh-key.yml
@@ -0,0 +1,36 @@
+---
+
+- name: ensure postgres account have a ssh keypair
+  ansible.builtin.user:
+    name: postgres
+    generate_ssh_key: true
+    ssh_key_type: ed25519
+    ssh_key_file: ~postgres/.ssh/id_ed25519
+
+- name: fetch postgres ssh public key
+  register: repmgr_postgres_ssh_pubkey
+  ansible.builtin.slurp:
+    path: ~postgres/.ssh/id_ed25519.pub
+
+- name: register postgres ssh public key as an ansible fact
+  ansible.builtin.set_fact:
+    pubkey: "{{ repmgr_postgres_ssh_pubkey['content'] | b64decode }}"
+
+- name: share postgres ssh public key between cluster members
+  loop: "{{ groups['postgres'] }}"
+  ansible.posix.authorized_key:
+    user: postgres
+    key: "{{ hostvars[item]['pubkey'] }}"
+
+- name: postgres ssh client configuration
+  ansible.builtin.copy:
+    dest: ~postgres/.ssh/config
+    owner: postgres
+    group: postgres
+    mode: "640"
+    content: |
+      IdentityFile ~/.ssh/id_ed25519
+      StrictHostKeyChecking no
+      UserKnownHostsFile /dev/null
+
+...
diff --git a/roles/postgres-ha/tasks/install.yml b/roles/postgres-ha/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..24cfbec887d76901cfce8a1c97dbddac0e6007ef
--- /dev/null
+++ b/roles/postgres-ha/tasks/install.yml
@@ -0,0 +1,12 @@
+---
+
+- name: install packages
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ repmgr_packages }}"
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/postgres-ha/tasks/main.yml b/roles/postgres-ha/tasks/main.yml
index 81bc005be20d04a937f6e3d5106841f157a46238..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/postgres-ha/tasks/main.yml
+++ b/roles/postgres-ha/tasks/main.yml
@@ -1,353 +1,33 @@
 ---
-# INSTALLATION
 
-- name: install packages
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ repmgr_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-# POSTGRESQL
-
-- name: postgresql
-  vars:
-    database_host_authentification:
-      - type: local
-        method: peer
-      - type: hostssl
-        address: 127.0.0.1/32
-      - type: hostssl
-        address: ::1/128
-      - type: hostssl
-        address: 0.0.0.0/0
-      - type: hostssl
-        address: ::/0
-      - type: local
-        database: replication
-        method: peer
-      - type: hostssl
-        database: replication
-        address: 127.0.0.1/32
-      - type: hostssl
-        database: replication
-        address: ::1/128
-      - type: hostssl
-        database: replication
-        address: 0.0.0.0/0
-      - type: hostssl
-        database: replication
-        address: ::/0
-    database_conf:
-      - name: main
-        content: |
-          listen_addresses = '*'
-      - name: modules
-        content: |
-          shared_preload_libraries = 'repmgr'
-    database_users:
-      - name: "{{ repmgr_user }}"
-        password: "{{ repmgr_password }}"
-        roles: "{{ repmgr_roles }}"
-    database_databases:
-      - name: "{{ repmgr_database }}"
-        owner: "{{ repmgr_user }}"
-  ansible.builtin.include_role:
-    name: postgres
-
-# CONFIGURATION
-
-- name: configure repmgr
-  notify: restart repmgrd
-  ansible.builtin.template:
-    src: repmgr.conf.j2
-    dest: "{{ repmgr_conf_file }}"
-    owner: postgres
-    group: postgres
-    mode: "644"
-
-- name: configure debian default
-  notify: restart repmgrd
-  loop:
-    - key: REPMGRD_ENABLED
-      value: "yes"
-    - key: REPMGRD_CONF
-      value: "{{ repmgr_conf_file }}"
-  ansible.builtin.replace:
-    path: /etc/default/repmgrd
-    regexp: ^#?{{ item.key }}=.*$
-    replace: "{{ item.key }}={{ item.value }}"
-
-- name: configure sudo
-  ansible.builtin.copy:
-    dest: /etc/sudoers.d/postgres
-    validate: visudo -cf %s
-    mode: "440"
-    content: |
-      Defaults:postgres !requiretty
-      postgres ALL=NOPASSWD: \
-        /bin/systemctl start postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}, \
-        /bin/systemctl stop postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}, \
-        /bin/systemctl restart postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}, \
-        /bin/systemctl reload postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}
-
-- name: configure swappiness
-  ansible.builtin.copy:
-    dest: /etc/sysctl.d/40-swappiness.conf
-    owner: "root"
-    group: "root"
-    mode: "644"
-    content: |
-      # UbiCast Swappiness settings configuration for PostgreSQL on Linux
-
-      # Parameter that controls the relative weight given to swapping out of runtime memory,
-      # as opposed to dropping pages from the system page cache.
-      # A low value causes the kernel to prefer to evict pages from the page cache while
-      # a higher value causes the kernel to prefer to swap out "cold" memory pages.
-      # Swappiness can be set to a value from 0 to 200. Default is 60.
-
-      # See RM#38809 and https://dali.bo/j1_html#configuration-du-swap
-      vm.swappiness = 10
-
-# SSH
-
-- name: ensure postgres account have a ssh keypair
-  ansible.builtin.user:
-    name: postgres
-    generate_ssh_key: true
-    ssh_key_type: ed25519
-    ssh_key_file: ~postgres/.ssh/id_ed25519
-
-- name: fetch postgres ssh public key
-  register: repmgr_postgres_ssh_pubkey
-  ansible.builtin.slurp:
-    path: ~postgres/.ssh/id_ed25519.pub
-
-- name: register postgres ssh public key as an ansible fact
-  ansible.builtin.set_fact:
-    pubkey: "{{ repmgr_postgres_ssh_pubkey['content'] | b64decode }}"
-
-- name: share postgres ssh public key between cluster members
-  loop: "{{ groups['postgres'] }}"
-  ansible.posix.authorized_key:
-    user: postgres
-    key: "{{ hostvars[item]['pubkey'] }}"
-
-- name: postgres ssh client configuration
-  ansible.builtin.copy:
-    dest: ~postgres/.ssh/config
-    owner: postgres
-    group: postgres
-    mode: "640"
-    content: |
-      IdentityFile ~/.ssh/id_ed25519
-      StrictHostKeyChecking no
-      UserKnownHostsFile /dev/null
-
-# REGISTER PRIMARY
-
-- name: setup primary
-  when: >
-    (database_role is defined and database_role == "primary") or
-    (database_role is undefined and inventory_hostname == groups['postgres'][0]) or
-    (database_role is defined and database_role == "" and inventory_hostname == groups['postgres'][0])
-  block:
-    - name: check if primary already joined
-      become: true
-      become_user: postgres
-      register: repmgr_check_primary
-      community.general.postgresql_query:
-        db: repmgr
-        query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
-
-    - name: register primary
-      become: true
-      become_user: postgres
-      changed_when: true
-      when: repmgr_check_primary.query_result | length == 0
-      notify: restart repmgrd
-      ansible.builtin.command:
-        cmd: repmgr --config-file={{ repmgr_conf_file }} primary register
-
-- ansible.builtin.meta: flush_handlers # noqa name[missing]
-
-# REGISTER STANDBY
-
-- name: setup standby
-  when: >
-    (database_role is defined and database_role == "standby") or
-    (database_role is undefined and inventory_hostname == groups['postgres'][1]) or
-    (database_role is defined and database_role == "" and inventory_hostname == groups['postgres'][1])
-  block:
-    - name: check if standby already joined
-      become: true
-      become_user: postgres
-      register: repmgr_check_standby
-      community.general.postgresql_query:
-        db: repmgr
-        query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
-
-    - name: stop postgresql service
-      when: repmgr_check_standby.query_result | length == 0
-      ansible.builtin.systemd:
-        name: postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}
-        state: stopped
-
-    - name: remove existing pgdata
-      when: repmgr_check_standby.query_result | length == 0
-      ansible.builtin.command:
-        cmd: mv -vf {{ repmgr_database_data_dir }} {{ repmgr_database_data_dir }}.save
-        removes: "{{ repmgr_database_data_dir }}"
-
-    - name: clone from primary to standby
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
       become: true
-      become_user: postgres
-      changed_when: true
-      when: repmgr_check_standby.query_result | length == 0
-      ignore_errors: true
-      register: repmgr_clone_standby
-      ansible.builtin.shell:
-        cmd: |
-          repmgr \
-            --config-file={{ repmgr_conf_file }} \
-            --force \
-            --dbname={{ repmgr_database }} \
-            --host={{ repmgr_primary_node }} \
-            --port=5432 \
-            --username={{ repmgr_user }} \
-            --pgdata={{ repmgr_database_data_dir }} \
-            standby clone --fast-checkpoint
-
-    - name: remove pgdata backup
-      when: repmgr_clone_standby is succeeded
-      ansible.builtin.file:
-        path: "{{ repmgr_database_data_dir }}.save"
-        state: absent
-
-    - name: remove failed clone pgdata
-      when: repmgr_clone_standby is failed
-      ansible.builtin.file:
-        path: "{{ repmgr_database_data_dir }}"
-        state: absent
-
-    - name: restore pgdata backup
-      when: repmgr_clone_standby is failed
-      ansible.builtin.command:
-        cmd: mv -vf {{ repmgr_database_data_dir }}.save {{ repmgr_database_data_dir }}
-        removes: "{{ repmgr_database_data_dir }}.save"
-
-    - name: start postgresql service
-      ansible.builtin.systemd:
-        name: postgresql@{{ repmgr_database_version }}-{{ repmgr_database_cluster }}
-        state: started
-
-    - name: standby clone failed
-      when: repmgr_clone_standby is failed
-      ansible.builtin.fail:
-        msg: "{{ repmgr_clone_standby.stderr }}"
-
-    - name: register standby
-      become: true
-      become_user: postgres
-      changed_when: true
-      when: repmgr_check_standby.query_result | length == 0
-      notify: restart repmgrd
-      ansible.builtin.command:
-        cmd: repmgr --config-file={{ repmgr_conf_file }} standby register
-
-- ansible.builtin.meta: flush_handlers # noqa name[missing]
-
-# REGISTER WITNESS
-
-- name: setup witness
-  when: >
-    (database_role is defined and database_role == "witness") or
-    (database_role is undefined and inventory_hostname == groups['postgres'][2]) or
-    (database_role is defined and database_role == "" and inventory_hostname == groups['postgres'][2])
-  block:
-    - name: check if witness already joined
+      tags:
+        - install
+  tags:
+    - always
+
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
       become: true
-      become_user: postgres
-      register: repmgr_check_witness
-      community.general.postgresql_query:
-        db: repmgr
-        query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
-
-    - name: register witness
+      tags:
+        - base
+  tags:
+    - always
+
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
       become: true
-      become_user: postgres
-      changed_when: true
-      when: repmgr_check_witness.query_result | length == 0
-      notify: restart repmgrd
-      ansible.builtin.command:
-        cmd: repmgr --config-file={{ repmgr_conf_file }} --host={{ repmgr_primary_node }} witness register
-
-- ansible.builtin.meta: flush_handlers # noqa name[missing]
-
-# REPHACHECK
-
-- name: install rephacheck
-  ansible.builtin.template:
-    src: rephacheck.py.j2
-    dest: /usr/bin/rephacheck
-    mode: "0755"
-
-- name: register variables needed by rephacheck as facts
-  ansible.builtin.set_fact:
-    repmgr_node_name: "{{ repmgr_node_name }}"
-    repmgr_node_id: "{{ repmgr_node_id }}"
-
-- name: configure rephacheck
-  ansible.builtin.template:
-    src: rephacheck.conf.j2
-    dest: /etc/postgresql/{{ repmgr_database_version }}/{{ repmgr_database_cluster }}/rephacheck.conf
-    owner: postgres
-    group: postgres
-    mode: "0644"
-
-- name: configure rephacheck socket
-  notify:
-    - reload systemd
-    - restart rephacheck
-  ansible.builtin.copy:
-    dest: /etc/systemd/system/rephacheck.socket
-    mode: "644"
-    content: |
-      [Unit]
-      Description=RepHACheck socket
-
-      [Socket]
-      ListenStream={{ repmgr_repha_port }}
-      Accept=yes
-
-      [Install]
-      WantedBy=sockets.target
-
-- name: configure rephacheck service
-  notify:
-    - reload systemd
-    - restart rephacheck
-  ansible.builtin.copy:
-    dest: /etc/systemd/system/rephacheck@.service
-    mode: "644"
-    content: |
-      [Unit]
-      Description=RepHACheck - Health check for PostgreSQL cluster managed by repmgr
-
-      [Service]
-      ExecStart=-/usr/bin/rephacheck
-      StandardInput=socket
-      User=postgres
-      Group=postgres
-
-- name: enable and start rephacheck
-  ansible.builtin.service:
-    name: rephacheck.socket
-    state: started
-    enabled: true
-
-- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/postgres-ha/vars/main.yml b/roles/postgres-ha/vars/main.yml
index a9ccf705a19ad7b262e9d243f05b7e32ea3795e9..6dadc0ad4419124e72e76aa8012a530d001e04f8 100644
--- a/roles/postgres-ha/vars/main.yml
+++ b/roles/postgres-ha/vars/main.yml
@@ -1,23 +1,56 @@
 ---
 
-repmgr_packages:
-  - repmgr
-  # rephacheck:
-  - python3
-  - python3-psycopg2
-  - python3-toml
+# [HA only] Define database role on this host. Possible values: primary, standby or witness (first server of "postgres" group is primary, second "standby" and the third is "witness" automagically)
+database_role: ""
 
-repmgr_database_version: "{{ database_version | default('15') }}"
-repmgr_database_cluster: "{{ database_cluster | default('main') }}"
-repmgr_database_data_dir: /var/lib/postgresql/{{ repmgr_database_version }}/{{ repmgr_database_cluster }}
-repmgr_conf_file: /etc/postgresql/{{ repmgr_database_version }}/{{ repmgr_database_cluster }}/repmgr.conf
+# [HA only] Conninfo parameter populated in the repmgr configuration file
+repmgr_conninfo: "host={{ ansible_default_ipv4.address }} dbname={{ repmgr_database }} user={{ repmgr_user }} connect_timeout={{ repmgr_timeout }}"
 
+# [HA only] Name of the repmgr database
+repmgr_database: "repmgr"
+
+# [HA only] Listening port for rephacheck
+repmgr_repha_port: 8543
+
+# [HA only] List of roles for the repmgr user PostgreSQL pg_hba configuration
+repmgr_roles: "LOGIN,REPLICATION,SUPERUSER"
+
+# [HA only] Timeout value for the repmgr connections
+repmgr_timeout: 5
+
+# [HA only] Username of the repmgr DB user
+repmgr_user: "repmgr"
+
+# [HA only] IP address of the cluster primary node
 repmgr_primary_node: "{{ hostvars[groups['postgres'][0]]['ansible_default_ipv4']['address'] }}"
+
+# [HA only] Cluster node index (0, 1, ...)
 repmgr_node_id: "{{ (groups['postgres'].index(inventory_hostname)) + 1 | int }}"
+
+# [HA only] Cluster node name
 repmgr_node_name: "{{ ansible_hostname }}"
 
-repmgr_conninfo: host={{ ansible_default_ipv4.address }} dbname={{ repmgr_database }} user={{ repmgr_user }} connect_timeout={{ repmgr_timeout }}
+# [HA only] PostgreSQL database version
+repmgr_database_version: "{{ database_version | default('15') }}"
+
+# [HA only] PostgreSQL database cluster name
+repmgr_database_cluster: "{{ database_cluster | default('main') }}"
+
+# [HA only] PostgreSQL database cluster path
+repmgr_database_data_dir: "/var/lib/postgresql/{{ repmgr_database_version }}/{{ repmgr_database_cluster }}"
+
+# [HA only] Repmgr cluster configuration file
+repmgr_conf_file: "/etc/postgresql/{{ repmgr_database_version }}/{{ repmgr_database_cluster }}/repmgr.conf"
+
+# [HA only] Packages required for the group. python3-* packages are used for rephacheck
+repmgr_packages:
+  - repmgr
+  - python3
+  - python3-psycopg2
+  - python3-toml
 
-firewall_rules_files: ['rephacheck']
+# [HA only] Group firewall rules filename, see roles/shared/files/nftables/
+firewall_rules_files:
+  - rephacheck
 
 ...
diff --git a/roles/postgres/README.md b/roles/postgres/README.md
index c7895f604f65b9083b2c8002d945b3dae36c70d7..1863d6d68466f3135b833d78f5a46b40118fbe91 100644
--- a/roles/postgres/README.md
+++ b/roles/postgres/README.md
@@ -9,84 +9,7 @@ The postgres group is used to configure the host with a postgresql database.
 
 Available variables are listed below, along with the descriptions and the default values.
 
-`database_host_authentification`: PostgreSQL user/host connection file (Optional)
-```
-database_host_authentification:
-  - method: peer
-    type: local
-  - address: 127.0.0.1/32
-    type: hostssl
-  - address: ::1/128
-    type: hostssl
-  - database: replication
-    method: peer
-    type: local
-  - address: 127.0.0.1/32
-    database: replication
-    type: hostssl
-  - address: ::1/128
-    database: replication
-    type: hostssl
-```
-
-`database_conf`: PostgreSQL configuration key/values (Optional)
-```
-database_conf:
-  - content: ''
-    name: main
-```
-
-`database_databases`: Dictionnary of extra databases to deploy (with `name` and `owner` keys) (Optional)
-```
-database_databases: []
-```
-
 `database_password`: Password for the postgres admin account
 ```
 database_password: "changeit"
 ```
-
-`database_users`: Dictionnary of extra PostgreSQL users to deploy (with `name`, `password`, `db`, `priv` and `roles` keys) (Optional)
-```
-database_users: []
-```
-
-`database_role`: [HA only] Define database role on this host. Possible values: primary, standby or witness (Optional)
-```
-database_role: ""
-```
-
-`repmgr_conninfo`: [HA only] Conninfo parameter populated in the repmgr configuration file (Optional)
-```
-repmgr_conninfo: "host={{ ansible_default_ipv4.address }} dbname={{ repmgr_database }} user={{ repmgr_user }} connect_timeout={{ repmgr_timeout }}"
-```
-
-`repmgr_database`: [HA only] Name of the repmgr database (Optional)
-```
-repmgr_database: "repmgr"
-```
-
-`repmgr_password`: [HA only] Password of the repmgr DB user (Optional)
-```
-repmgr_password: ""
-```
-
-`repmgr_repha_port`: [HA only] Listening port for rephacheck (Optional)
-```
-repmgr_repha_port: 8543
-```
-
-`repmgr_roles`: [HA only] List of roles for the repmgr user PostgreSQL pg_hba configuration (Optional)
-```
-repmgr_roles: "LOGIN,REPLICATION,SUPERUSER"
-```
-
-`repmgr_timeout`: [HA only] Timeout value for the repmgr connections (Optional)
-```
-repmgr_timeout: 5
-```
-
-`repmgr_user`: [HA only] Username of the repmgr DB user (Optional)
-```
-repmgr_user: "repmgr"
-```
diff --git a/roles/postgres/defaults/main.yml b/roles/postgres/defaults/main.yml
index b047120bc0228aaffcdeeda0c3993b8c6c7dbd2b..6ab81de446a5c5eeb56a09e701ecef8f91892574 100644
--- a/roles/postgres/defaults/main.yml
+++ b/roles/postgres/defaults/main.yml
@@ -1,26 +1,6 @@
 ---
-database_password: "changeit"
-database_users: []
-database_databases: []
 
-database_conf:
-  - name: main
-    content:
+# Password for the postgres admin account
+database_password: "changeit"
 
-database_host_authentification:
-  - type: local
-    method: peer
-  - type: hostssl
-    address: 127.0.0.1/32
-  - type: hostssl
-    address: ::1/128
-  - type: local
-    database: replication
-    method: peer
-  - type: hostssl
-    database: replication
-    address: 127.0.0.1/32
-  - type: hostssl
-    database: replication
-    address: ::1/128
 ...
diff --git a/roles/postgres/tasks/base.yml b/roles/postgres/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d4b789a09c62777afb42dedc32e769a08e8701a2
--- /dev/null
+++ b/roles/postgres/tasks/base.yml
@@ -0,0 +1,11 @@
+---
+
+- name: BASE CONFIGURE POSTGRESQL LOGROTATE
+  ansible.builtin.include_tasks: "base/logrotate.yml"
+
+- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
+
+- name: flush handlers
+  ansible.builtin.meta: flush_handlers
+
+...
diff --git a/roles/postgres/tasks/base/logrotate.yml b/roles/postgres/tasks/base/logrotate.yml
new file mode 100644
index 0000000000000000000000000000000000000000..10d7be3deaff96cfe88c43765704057afdad8057
--- /dev/null
+++ b/roles/postgres/tasks/base/logrotate.yml
@@ -0,0 +1,12 @@
+---
+
+- name: update logrotate config
+  ansible.builtin.copy:
+    src: logrotate-postgresql
+    dest: /etc/logrotate.d/postgresql-common
+    owner: root
+    group: root
+    backup: false
+    mode: "644"
+
+...
diff --git a/roles/postgres/tasks/configure.yml b/roles/postgres/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5d2cf0f1ada0c9258f2d5ee90a701e7f14d4c381
--- /dev/null
+++ b/roles/postgres/tasks/configure.yml
@@ -0,0 +1,24 @@
+---
+
+# Configurartions files must stay here even without variables
+# This role can be called from postgres-ha (with configure tag) with custom variables
+
+- name: CONFIGURE POSTGRESQL CUSTOM SETTINGS
+  ansible.builtin.include_tasks: "configure/custom-settings.yml"
+
+- name: CONFIGURE POSTGRESQL EXTERNAL AUTHENTICATIONS
+  ansible.builtin.include_tasks: "configure/authentication.yml"
+
+- name: CONFIGURE POSTGRESQL INTERNAL USERS
+  ansible.builtin.include_tasks: "configure/users.yml"
+  when:
+    - database_password is defined
+
+- name: CREATE POSTGRESQL DATABASES
+  ansible.builtin.include_tasks: "configure/databases.yml"
+
+# Flush to restart postgresql in time for HA deployment
+- name: Flush handlers
+  meta: flush_handlers
+
+...
diff --git a/roles/postgres/tasks/configure/authentication.yml b/roles/postgres/tasks/configure/authentication.yml
new file mode 100644
index 0000000000000000000000000000000000000000..39fab0029e9af3662ec0939364d255857fda3738
--- /dev/null
+++ b/roles/postgres/tasks/configure/authentication.yml
@@ -0,0 +1,13 @@
+---
+
+- name: configure authentication
+  notify: restart postgresql
+  ansible.builtin.template:
+    src: pg_hba.conf.j2
+    dest: "{{ database_conf_dir }}/pg_hba.conf"
+    owner: postgres
+    group: postgres
+    mode: "0640"
+    backup: true
+
+...
diff --git a/roles/postgres/tasks/configure/custom-settings.yml b/roles/postgres/tasks/configure/custom-settings.yml
new file mode 100644
index 0000000000000000000000000000000000000000..19dafc34064c083a0d3e45ceb514c8fc11b54116
--- /dev/null
+++ b/roles/postgres/tasks/configure/custom-settings.yml
@@ -0,0 +1,15 @@
+---
+
+- name: configure custom settings
+  notify: restart postgresql
+  when: item.content | d(false)
+  ansible.builtin.copy:
+    dest: "{{ database_conf_dir }}/conf.d/{{ item.name }}.conf"
+    owner: postgres
+    group: postgres
+    backup: true
+    content: "{{ item.content }}"
+    mode: "644"
+  loop: "{{ database_conf }}"
+
+...
diff --git a/roles/postgres/tasks/configure/databases.yml b/roles/postgres/tasks/configure/databases.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f5424435156109399981c460e34bfe45e8292c4a
--- /dev/null
+++ b/roles/postgres/tasks/configure/databases.yml
@@ -0,0 +1,11 @@
+---
+
+- name: create databases
+  become: true
+  become_user: postgres
+  loop: "{{ database_databases }}"
+  community.general.postgresql_db:
+    name: "{{ item.name }}"
+    owner: "{{ item.owner | d(omit) }}"
+
+...
diff --git a/roles/postgres/tasks/configure/users.yml b/roles/postgres/tasks/configure/users.yml
new file mode 100644
index 0000000000000000000000000000000000000000..42dbcc01311cd0f825d0760308acc04b6b1c93f9
--- /dev/null
+++ b/roles/postgres/tasks/configure/users.yml
@@ -0,0 +1,34 @@
+---
+
+- name: set superuser password
+  become: true
+  become_user: postgres
+  no_log: true
+  community.general.postgresql_user:
+    name: postgres
+    password: "{{ database_password | d(omit) }}"
+
+- name: manage users
+  become: true
+  become_user: postgres
+  no_log: true
+  loop: "{{ database_users }}"
+  community.general.postgresql_user:
+    name: "{{ item.name }}"
+    password: "{{ item.password | d(omit) }}"
+    db: "{{ item.db | d(omit) }}"
+    priv: "{{ item.priv | d(omit) }}"
+    role_attr_flags: "{{ item.roles | d(omit) }}"
+
+- name: set .pgpass to allow passwordless connection
+  loop: "{{ query('nested', ['root', 'postgres'], database_users) }}"
+  ansible.builtin.blockinfile:
+    path: ~{{ item.0 }}/.pgpass
+    block: "*:*:*:{{ item.1.name }}:{{ item.1.password }}"
+    marker: "# {mark} {{ item.1.name }}"
+    create: true
+    owner: "{{ item.0 }}"
+    group: "{{ item.0 }}"
+    mode: "0600"
+
+...
diff --git a/roles/postgres/tasks/install.yml b/roles/postgres/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f43a8cd47931284ab9ce4b22e644697f10717227
--- /dev/null
+++ b/roles/postgres/tasks/install.yml
@@ -0,0 +1,12 @@
+---
+
+- name: install packages
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ database_packages }}"
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/postgres/tasks/main.yml b/roles/postgres/tasks/main.yml
index f94f838d4638186597979b4a546fba6361ecc28d..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/postgres/tasks/main.yml
+++ b/roles/postgres/tasks/main.yml
@@ -1,114 +1,33 @@
 ---
 
-- name: install packages
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ database_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-# CONFIGURATION
-
-- name: update logrotate config
-  ansible.builtin.copy:
-    src: logrotate-postgresql
-    dest: /etc/logrotate.d/postgresql-common
-    owner: root
-    group: root
-    backup: false
-    mode: "644"
-
-- name: ensure conf directory exists
-  ansible.builtin.file:
-    path: "{{ database_conf_dir }}/conf.d"
-    owner: postgres
-    group: postgres
-    state: directory
-    mode: "755"
-
-- name: ensure conf directory is included
-  ansible.builtin.replace:
-    path: "{{ database_conf_dir }}/postgresql.conf"
-    backup: true
-    regexp: ^#?include_dir = '[A-Za-z\.]+'(\s+.*)$
-    replace: include_dir = 'conf.d'\1
-
-- name: configure custom settings
-  notify: restart postgresql
-  loop: "{{ database_conf }}"
-  when: item.content | d(false)
-  ansible.builtin.copy:
-    dest: "{{ database_conf_dir }}/conf.d/{{ item.name }}.conf"
-    owner: postgres
-    group: postgres
-    backup: true
-    content: "{{ item.content }}"
-    mode: "644"
-
-- name: configure authentication
-  notify: restart postgresql
-  ansible.builtin.template:
-    src: pg_hba.conf.j2
-    dest: "{{ database_conf_dir }}/pg_hba.conf"
-    owner: postgres
-    group: postgres
-    mode: "0640"
-    backup: true
-
-- name: ensure service is enabled and running
-  ansible.builtin.systemd:
-    name: postgresql@{{ database_version }}-{{ database_cluster }}
-    enabled: true
-    state: started
-
-# USERS
-
-- name: set superuser password
-  become: true
-  become_user: postgres
-  no_log: true
-  community.general.postgresql_user:
-    name: postgres
-    password: "{{ database_password | d(omit) }}"
-
-- name: manage users
-  become: true
-  become_user: postgres
-  no_log: true
-  loop: "{{ database_users }}"
-  community.general.postgresql_user:
-    name: "{{ item.name }}"
-    password: "{{ item.password | d(omit) }}"
-    db: "{{ item.db | d(omit) }}"
-    priv: "{{ item.priv | d(omit) }}"
-    role_attr_flags: "{{ item.roles | d(omit) }}"
-
-- name: set .pgpass to allow passwordless connection
-  loop: "{{ query('nested', ['root', 'postgres'], database_users) }}"
-  ansible.builtin.blockinfile:
-    path: ~{{ item.0 }}/.pgpass
-    block: "*:*:*:{{ item.1.name }}:{{ item.1.password }}"
-    marker: "# {mark} {{ item.1.name }}"
-    create: true
-    owner: "{{ item.0 }}"
-    group: "{{ item.0 }}"
-    mode: "0600"
-
-# DATABASES
-
-- name: create databases
-  become: true
-  become_user: postgres
-  loop: "{{ database_databases }}"
-  community.general.postgresql_db:
-    name: "{{ item.name }}"
-    owner: "{{ item.owner | d(omit) }}"
-
-- import_tasks: ../../shared/tasks/firewall_rules_files.yml  # noqa: name[missing]
-
-- name: flush handlers
-  ansible.builtin.meta: flush_handlers
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
+
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
+
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/postgres/vars/main.yml b/roles/postgres/vars/main.yml
index 8a7e782fc85c1f9bea15acee4f007c0d98b3c12a..b292e3bad66cad7c216b7f026b405caf5efebf79 100644
--- a/roles/postgres/vars/main.yml
+++ b/roles/postgres/vars/main.yml
@@ -1,14 +1,51 @@
 ---
 
+# Packages required for the group. python3-psycopg2 is used by ansible
 database_packages:
   - acl
   - postgresql
-  - python3-psycopg2 # required by ansible
+  - python3-psycopg2
 
-database_version: 15
-database_cluster: main
-database_conf_dir: /etc/postgresql/{{ database_version }}/{{ database_cluster }}
+# PostgreSQL database version
+database_version: "15"
 
-firewall_rules_files: ['postgres']
+# PostgreSQL database cluster name
+database_cluster: "main"
+
+# PostgreSQL database cluster path
+database_conf_dir: "/etc/postgresql/{{ database_version }}/{{ database_cluster }}"
+
+# PostgreSQL user/host connection file
+database_host_authentification:
+  - method: peer
+    type: local
+  - address: 127.0.0.1/32
+    type: hostssl
+  - address: ::1/128
+    type: hostssl
+  - database: replication
+    method: peer
+    type: local
+  - address: 127.0.0.1/32
+    database: replication
+    type: hostssl
+  - address: ::1/128
+    database: replication
+    type: hostssl
+
+# PostgreSQL configuration key/values
+database_conf:
+  - content: ''
+    name: main
+
+# Dictionnary of extra databases to deploy (with `name` and `owner` keys)
+database_databases: []
+
+# Dictionnary of extra PostgreSQL users to deploy (with `name`, `password`, `db`, `priv` and `roles` keys)
+database_users: []
+
+# Group firewall rules filename, see roles/shared/files/nftables/
+firewall_rules_files:
+  - postgres
 
 ...
diff --git a/roles/proxy/README.md b/roles/proxy/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..f3bed31994b4c7618c34b50c76cd0f7f7895be65
--- /dev/null
+++ b/roles/proxy/README.md
@@ -0,0 +1,29 @@
+# Proxy
+## Description
+
+Install and configure a mandatory proxy for several applications
+
+## Role Variables
+
+Available variables are listed below, along with the descriptions and the default values.
+
+`proxy_http`: Proxy URL for HTTP calls (complete URL with protocol) (Optional)
+```
+proxy_http: ""
+```
+
+`proxy_https`: Proxy URL for HTTPS calls (complete URL with protocol) (Optional)
+```
+proxy_https: ""
+```
+
+`proxy_exclude`: List of URL that does not use the proxy (Optional)
+```
+proxy_exclude:
+  - localhost
+  - 127.0.0.1
+  - ::1
+  - nudgis.example.com
+  - manager.example.com
+  - monitor.example.com
+```
diff --git a/roles/proxy/defaults/main.yml b/roles/proxy/defaults/main.yml
index a6c7f8e11039d889fadf7b21db8a7314cae8f835..ab71ca12a88bb6b76016cbe0cdfe1612c18ada59 100644
--- a/roles/proxy/defaults/main.yml
+++ b/roles/proxy/defaults/main.yml
@@ -1,13 +1,18 @@
 ---
 
+# Proxy URL for HTTP calls (complete URL with protocol)
 proxy_http: ""
+
+# Proxy URL for HTTPS calls (complete URL with protocol)
 proxy_https: ""
+
+# List of URL that does not use the proxy
 proxy_exclude:
   - localhost
   - 127.0.0.1
   - ::1
   - nudgis.example.com
-  - mirismanager.example.com
+  - manager.example.com
   - monitor.example.com
 
 ...
diff --git a/roles/proxy/tasks/configure.yml b/roles/proxy/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1c36181a439a2c02ef5003f90e87f4013f5a3051
--- /dev/null
+++ b/roles/proxy/tasks/configure.yml
@@ -0,0 +1,10 @@
+---
+
+- name: CONFIGURE MANDATORY PROXY
+  ansible.builtin.include_tasks: "configure/proxy-configure.yml"
+  when:
+    - proxy_http is defined
+    - proxy_https is defined
+    - proxy_exclude is defined
+
+...
diff --git a/roles/proxy/tasks/configure/proxy-configure.yml b/roles/proxy/tasks/configure/proxy-configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4564c3e643c931d1bb4d8c01aa32df040f6736f0
--- /dev/null
+++ b/roles/proxy/tasks/configure/proxy-configure.yml
@@ -0,0 +1,54 @@
+---
+
+- name: Gather the package facts
+  ansible.builtin.package_facts:
+    manager: auto
+
+- name: environment
+  ansible.builtin.blockinfile:
+    path: /etc/environment
+    create: true
+    marker_begin: BEGIN PROXY
+    marker_end: END PROXY
+    mode: "644"
+    block: |
+      http_proxy={{ proxy_http }}
+      HTTP_PROXY={{ proxy_http }}
+      https_proxy={{ proxy_https }}
+      HTTPS_PROXY={{ proxy_https }}
+      no_proxy={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
+      NO_PROXY={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
+
+- name: apt
+  ansible.builtin.copy:
+    dest: /etc/apt/apt.conf.d/proxy
+    mode: "644"
+    content: |
+      Acquire::http::Proxy "{{ proxy_http }}";
+      Acquire::https::Proxy "{{ proxy_https }}";
+
+- name: wget
+  when: "'wget' in ansible_facts.packages"
+  ansible.builtin.copy:
+    dest: /etc/wgetrc
+    mode: "644"
+    content: |
+      use_proxy=yes
+      http_proxy={{ proxy_http }}
+      https_proxy={{ proxy_https }}
+      no_proxy={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
+
+- name: git
+  when: "'git' in ansible_facts.packages"
+  community.general.git_config:
+    name: "{{ item.name }}"
+    scope: global
+    value: "{{ item.value }}"
+    state: present
+  loop:
+    - name: http.proxy
+      value: "{{ proxy_http }}"
+    - name: https.proxy
+      value: "{{ proxy_https }}"
+
+...
diff --git a/roles/proxy/tasks/main.yml b/roles/proxy/tasks/main.yml
index 79d7549043f18ba8815609b2361fa78985fc68bd..ed7b95b464b45e6978ac2fab24921337540abc67 100644
--- a/roles/proxy/tasks/main.yml
+++ b/roles/proxy/tasks/main.yml
@@ -1,62 +1,13 @@
 ---
 
-- name: if proxy settings are set
-  when:
-    - proxy_http | d(false)
-    - proxy_https | d(false)
-  block:
-    - name: environment
-      ansible.builtin.blockinfile:
-        path: /etc/environment
-        create: true
-        marker_begin: BEGIN PROXY
-        marker_end: END PROXY
-        mode: "644"
-        block: |
-          http_proxy={{ proxy_http }}
-          HTTP_PROXY={{ proxy_http }}
-          https_proxy={{ proxy_https }}
-          HTTPS_PROXY={{ proxy_https }}
-          no_proxy={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
-          NO_PROXY={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
-
-    - name: apt
-      ansible.builtin.copy:
-        dest: /etc/apt/apt.conf.d/proxy
-        mode: "644"
-        content: |
-          Acquire::http::Proxy "{{ proxy_http }}";
-          Acquire::https::Proxy "{{ proxy_https }}";
-
-    - name: wget
-      ansible.builtin.copy:
-        dest: /etc/wgetrc
-        mode: "644"
-        content: |
-          use_proxy=yes
-          http_proxy={{ proxy_http }}
-          https_proxy={{ proxy_https }}
-          no_proxy={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
-
-    - name: install git
-      ansible.builtin.apt:
-        force_apt_get: true
-        install_recommends: false
-        name: git
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: git
-      loop:
-        - name: http.proxy
-          value: "{{ proxy_http }}"
-        - name: https.proxy
-          value: "{{ proxy_https }}"
-      community.general.git_config:
-        name: "{{ item.name }}"
-        scope: global
-        value: "{{ item.value }}"
-        state: present
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/shared/handlers/munin-node.yml b/roles/shared/handlers/munin-node.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3a71cbb8efcc92679684d76ea3c1059a61bd9ad5
--- /dev/null
+++ b/roles/shared/handlers/munin-node.yml
@@ -0,0 +1,15 @@
+---
+
+- name: Setup munin-node plugins link
+  ansible.builtin.shell:
+    munin-node-configure --shell --remove-also 2>&1 | sh -x  # noqa risky-shell-pipe
+  # sh -x print executed cmd to stderr
+  register: munin_plugin_linked
+  changed_when: munin_plugin_linked.stderr | length > 0
+
+- name: Restart munin-node service
+  ansible.builtin.service:
+    name: munin-node
+    state: restarted
+
+...
diff --git a/roles/shared/tasks/celerity_base_config.yml b/roles/shared/tasks/celerity_base_config.yml
deleted file mode 100644
index fde98bae52859161bb826e0b2dfaf1f77519a587..0000000000000000000000000000000000000000
--- a/roles/shared/tasks/celerity_base_config.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-
-- name: copy celerity example configuration
-  notify: "restart {{ _celerity_service }}"
-  ansible.builtin.copy:
-    remote_src: true
-    src: /etc/celerity/config.example.py
-    dest: /etc/celerity/config.py
-    mode: preserve
-    force: false
-
-- name: celerity base configuration
-  notify: "restart {{ _celerity_service }}"
-  ansible.builtin.replace:
-    path: /etc/celerity/config.py
-    regexp: '^(\s*)#?{{ item.name }}\s*=.*$'
-    replace: '\1{{ item.name }} = {{ item.value }}'
-  loop:
-    - { name: 'SERVER_URL',  value: '"https://{{ celerity_server_domain }}:6200"' } # noqa: yaml[commas]
-    - { name: 'SIGNING_KEY', value: '"{{ celerity_signing_key }}"' }
-  when: item.value != "" and item.value != '""'
-
-- name: celerity add nudgis portal
-  notify: "restart {{ _celerity_service }}"
-  ansible.builtin.command:
-    cmd: >
-      celerity-configurator add-portal
-        'ms_id=1_{{ nudgis_front_system_user }}'
-        'url=https://{{ nudgis_front_domain }}'
-        'api_key={{ nudgis_front_api_key }}'
-  register: celerity_add_portal
-  changed_when: celerity_add_portal.stdout != 'The configuration file is already up to date.'
-
-- name: "ensure {{ _celerity_service }} is running"
-  ansible.builtin.service:
-    name: "{{ _celerity_service }}"
-    enabled: true
-    state: started
-
-...
diff --git a/roles/sysconfig/README.md b/roles/sysconfig/README.md
index 9214652c4b8e301fe9640edcbaa27f9bc8e991e8..d517e97d15d340fc344d4546dd7e6a98140df878 100644
--- a/roles/sysconfig/README.md
+++ b/roles/sysconfig/README.md
@@ -1,7 +1,7 @@
 # Sysconfig
 ## Description
 
-Used by the "base" metagroup to provide core system utilities and their configuration
+The sysconfig group handles the installation and configuration of system components.
 
 ## Role Variables
 
@@ -12,11 +12,6 @@ Available variables are listed below, along with the descriptions and the defaul
 offline_mode: False
 ```
 
-`repos_release`: Debian distribution short name (example: "bullseye") (Optional)
-```
-repos_release: "{{ ansible_distribution_release }}"
-```
-
 `repos_debian_prefix`: Prefix of the debian repositories, with the protocol (example: "http://"). Used when the apt-cacher-ng of the Nudgis Manager proxifies the debian repositories. (Optional)
 ```
 repos_debian_prefix: "http://"
diff --git a/roles/sysconfig/defaults/main.yml b/roles/sysconfig/defaults/main.yml
index c75c20c193dede3f791592508b26bcecd7189ffe..feda680433ebf2f5dc6ce1a27af39aae9744467f 100644
--- a/roles/sysconfig/defaults/main.yml
+++ b/roles/sysconfig/defaults/main.yml
@@ -1,14 +1,34 @@
 ---
 
-offline_mode: false
+# Do not configure any repository and use local repository
+offline_mode: False
 
-repos_release: "{{ ansible_distribution_release }}"
-repos_ubicast_packages_domain: "manager.example.com"
+# Prefix of the debian repositories, with the protocol (example: "http://"). Used when the apt-cacher-ng of the Nudgis Manager proxifies the debian repositories.
+repos_debian_prefix: "http://"
+
+# Value for the system locale
+init_locale: "en_GB.UTF-8"
+
+# List of NTP servers to use on the systems
+ntp_servers:
+  - 0.debian.pool.ntp.org
+  - 1.debian.pool.ntp.org
+  - 2.debian.pool.ntp.org
+  - 3.debian.pool.ntp.org
+
+# Token used in the UbiCast debian repository URL
 repos_ubicast_packages_token: "XXXX-XXXX-XXXX-XXXX-XXXX"
 
-init_locale: 'en_GB.UTF-8'
+# Domain of the UbiCast debian packages repository URL
+repos_ubicast_packages_domain: "manager.example.com"
+
+# Timezone to set on the servers (`timedatectl list-timezones` for the complete list)
 init_timezone: "Europe/Paris"
 
-ntp_servers: "0.pool.ntp.org,1.pool.ntp.org,2.pool.ntp.org,3.pool.ntp.org"
+# Domain to use for the Debian repositories
+repos_debian_packages_domain: "deb.debian.org"
+
+# Domain to use for the Debian security repositories
+repos_debian_security_packages_domain: "security.debian.org"
 
 ...
diff --git a/roles/sysconfig/handlers/main.yml b/roles/sysconfig/handlers/main.yml
index fdd91e45e7e7bd9ad76b881ed1622b753e8450f5..482ca72a658268ee6f080a0e3962692cc76745d6 100644
--- a/roles/sysconfig/handlers/main.yml
+++ b/roles/sysconfig/handlers/main.yml
@@ -1,26 +1,9 @@
 ---
-- name: update cache
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    update_cache: true
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: systemd daemon reload
-  ansible.builtin.systemd:
-    daemon_reload: true
 
-- name: update locale
-  ansible.builtin.command: locale-gen
-  # we want to execute locale generation at every execution
-  changed_when: false
-
-- name: restart cron
-  ansible.builtin.service:
-    name: cron
-    state: restarted
+# - name: restart cron
+#   ansible.builtin.service:
+#     name: cron
+#     state: restarted
 
 - name: restart sshd
   ansible.builtin.service:
@@ -37,12 +20,17 @@
     name: systemd-timesyncd
     state: restarted
 
-- name: update cache
+- name: trigger dpkg-reconfigure tzdata
+  # https://unix.stackexchange.com/questions/451709/timedatectl-set-timezone-doesnt-update-etc-timezone
+  # timedatectl do not update the /etc/timezone file...
+  ansible.builtin.command:
+    cmd: dpkg-reconfigure --frontend noninteractive tzdata
+
+- name: trigger apt update
   ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
     update_cache: true
   register: apt_status
   retries: 60
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
 ...
diff --git a/roles/sysconfig/tasks/base.yml b/roles/sysconfig/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0b1dff7c219499c8477cca7dbc0a2768f33daf51
--- /dev/null
+++ b/roles/sysconfig/tasks/base.yml
@@ -0,0 +1,37 @@
+---
+
+- name: DEBIAN APT REPOSITORIES
+  ansible.builtin.include_tasks: "base/apt_debian.yml"
+  when:
+    - offline_mode is defined and not offline_mode
+    - repos_debian_prefix is defined
+    - repos_debian_packages_domain is defined
+    - repos_debian_security_packages_domain is defined
+
+- name: UBICAST APT REPOSITORIES
+  ansible.builtin.include_tasks: "base/apt_ubicast.yml"
+  when:
+    - offline_mode is defined and not offline_mode
+    - repos_ubicast_packages_token is defined
+    - repos_ubicast_packages_domain is defined
+
+# Flush to force `apt update` with the news repositories
+- name: Flush handlers
+  meta: flush_handlers
+
+- name: BASE CONFIGURE SSH CLIENT
+  ansible.builtin.include_tasks: "base/ssh-client.yml"
+
+- name: BASE CONFIGURE UNATTENDED-UPGRADES
+  ansible.builtin.include_tasks: "base/unattended_upgrades.yml"
+
+- name: BASE CONFIGURE SSH SERVER
+  ansible.builtin.include_tasks: "base/ssh-server.yml"
+
+- name: BASE CONFIURE PROMPT
+  ansible.builtin.include_tasks: "base/prompt.yml"
+
+- name: BASE CONFIGURE LOGS
+  ansible.builtin.include_tasks: "base/logs.yml"
+
+...
diff --git a/roles/sysconfig/tasks/base/apt_debian.yml b/roles/sysconfig/tasks/base/apt_debian.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9dde8847a96a7a3ef9afee405a2d679fe0eab518
--- /dev/null
+++ b/roles/sysconfig/tasks/base/apt_debian.yml
@@ -0,0 +1,31 @@
+---
+
+- name: "Add Debian packages repository"
+  when: not offline_mode
+  notify: trigger apt update
+  ansible.builtin.deb822_repository:
+    name: "debian"
+    types: "deb"
+    uris: "{{ repos_debian_prefix }}{{ repos_debian_packages_domain }}/debian"
+    suites:
+      - "{{ repos_release }}"
+      - "{{ repos_release }}-updates"
+    components: "main"
+
+- name: "Add Debian security repository"
+  when: not offline_mode
+  notify: trigger apt update
+  ansible.builtin.deb822_repository:
+    name: "debian-security"
+    types: "deb"
+    uris: "{{ repos_debian_prefix }}{{ repos_debian_packages_domain }}/debian-security"
+    suites: "{{ repos_release }}-security"
+    components: "main"
+
+# The file is emptied and not removed, because unattended-upgrades recreates it, if it is absent
+- name: "Empty non-deb822 base repository file"
+  ansible.builtin.copy:
+    content: ""
+    dest: "/etc/apt/sources.list"
+
+...
diff --git a/roles/sysconfig/tasks/base/apt_ubicast.yml b/roles/sysconfig/tasks/base/apt_ubicast.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4bd02050496e21c8c71f935491aa33ed9d1cf513
--- /dev/null
+++ b/roles/sysconfig/tasks/base/apt_ubicast.yml
@@ -0,0 +1,41 @@
+---
+
+- name: "Prepare the {{ sysconfig_repos_keyrings_dir }} directory"
+  ansible.builtin.file:
+    path: "{{ sysconfig_repos_keyrings_dir }}"
+    state: directory
+    mode: "755"
+
+- name: "Add UbiCast repository public key"
+  ansible.builtin.get_url:
+    url: "https://{{ repos_ubicast_packages_domain }}/media/public.gpg"
+    dest: "{{ sysconfig_repos_keyrings_dir }}/{{ repos_ubicast_packages_domain }}.asc"
+    mode: "644"
+
+- name: "Add UbiCast solutions repository"
+  notify: trigger apt update
+  ansible.builtin.deb822_repository:
+    name: "ubicast"
+    types: "deb"
+    uris: "https://{{ repos_ubicast_packages_domain }}"
+    suites: "packaging/apt/{{ repos_ubicast_packages_token }}/{{ repos_release }}/"
+    signed_by: "{{ sysconfig_repos_keyrings_dir }}/{{ repos_ubicast_packages_domain }}.asc"
+
+- name: "Add UbiCast security repository"
+  notify: trigger apt update
+  ansible.builtin.deb822_repository:
+    name: "ubicast-security"
+    types: "deb"
+    uris: "https://{{ repos_ubicast_packages_domain }}"
+    suites: "packaging/apt/ubicast-security-updates/{{ repos_release }}/"
+    signed_by: "{{ sysconfig_repos_keyrings_dir }}/{{ repos_ubicast_packages_domain }}.asc"
+
+- name: allow automatic updates for ubicast security repo
+  ansible.builtin.lineinfile:
+    path: /etc/apt/apt.conf.d/50unattended-upgrades
+    insertafter: ^Unattended-Upgrade::Origins-Pattern {$
+    line: '        "origin=UbiCast,label=UbiCast-Security";'
+    backup: true
+  notify: restart unattended-upgrades
+
+...
diff --git a/roles/sysconfig/tasks/logs.yml b/roles/sysconfig/tasks/base/logs.yml
similarity index 100%
rename from roles/sysconfig/tasks/logs.yml
rename to roles/sysconfig/tasks/base/logs.yml
diff --git a/roles/sysconfig/tasks/base/prompt.yml b/roles/sysconfig/tasks/base/prompt.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1b0015f51baf61c266d1fb6af50f1728e034cf64
--- /dev/null
+++ b/roles/sysconfig/tasks/base/prompt.yml
@@ -0,0 +1,14 @@
+---
+
+- name: set issue file
+  ansible.builtin.copy:
+    content: |
+      \e{lightblue} © UBICAST\e{reset}
+
+      IP Address:
+      \e{bold}\4\e{reset}
+
+    dest: /etc/issue
+    mode: "644"
+
+...
diff --git a/roles/sysconfig/tasks/base/ssh-client.yml b/roles/sysconfig/tasks/base/ssh-client.yml
new file mode 100644
index 0000000000000000000000000000000000000000..857d539fa0d943f9a403e1356224651887223534
--- /dev/null
+++ b/roles/sysconfig/tasks/base/ssh-client.yml
@@ -0,0 +1,11 @@
+---
+
+- name: generate root ssh key pair
+  register: conf_root
+  ansible.builtin.user:
+    name: root
+    generate_ssh_key: true
+    ssh_key_type: ed25519
+    ssh_key_file: .ssh/id_ed25519
+
+...
diff --git a/roles/sysconfig/tasks/base/ssh-server.yml b/roles/sysconfig/tasks/base/ssh-server.yml
new file mode 100644
index 0000000000000000000000000000000000000000..66467befedd4489ce3a8ac669bd0e469f5ce349e
--- /dev/null
+++ b/roles/sysconfig/tasks/base/ssh-server.yml
@@ -0,0 +1,29 @@
+---
+
+- name: verify root user ssh authorized key file
+  ansible.builtin.stat:
+    path: /root/.ssh/authorized_keys
+  register: auth
+
+- name: fail if the root ssh authorized key is missing or empty
+  ansible.builtin.fail:
+    msg: "Error: root user does not have any ssh key configured !\n\
+          Cannot configure PermitRootLogin to without-password"
+  when: not auth.stat.exists or auth.stat.size == 0
+
+- name: enable root login via ssh with key only
+  ansible.builtin.replace:
+    dest: /etc/ssh/sshd_config
+    regexp: ^#?PermitRootLogin.*
+    replace: PermitRootLogin prohibit-password
+  notify: restart sshd
+
+- name: remove disabled root login
+  ansible.builtin.replace:
+    dest: /root/.ssh/authorized_keys
+    regexp: ^no-port-forwarding,(.+) ssh-
+    replace: ssh-
+    mode: "600"
+  failed_when: false
+
+...
diff --git a/roles/sysconfig/tasks/base/unattended_upgrades.yml b/roles/sysconfig/tasks/base/unattended_upgrades.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c8cab7db76bd8cab359fad2c18d3b14d4478e44c
--- /dev/null
+++ b/roles/sysconfig/tasks/base/unattended_upgrades.yml
@@ -0,0 +1,18 @@
+---
+
+- name: enable unattended upgrades
+  ansible.builtin.copy:
+    dest: /etc/apt/apt.conf.d/20auto-upgrades
+    content: |
+      APT::Periodic::Update-Package-Lists "1";
+      APT::Periodic::Unattended-Upgrade "1";
+    mode: "644"
+
+- name: remove old kernel with unattended-upgrades
+  ansible.builtin.replace:
+    dest: /etc/apt/apt.conf.d/50unattended-upgrades
+    regexp: ^//Unattended-Upgrade::Remove-Unused-Kernel-Packages.*$
+    replace: Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";
+  notify: restart unattended-upgrades
+
+...
diff --git a/roles/sysconfig/tasks/configure.yml b/roles/sysconfig/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d7f47fa7ec6978d61380a5dbd0e38226be08d0cb
--- /dev/null
+++ b/roles/sysconfig/tasks/configure.yml
@@ -0,0 +1,25 @@
+---
+
+- name: CONFIGURE PROXY
+  ansible.builtin.include_tasks: "configure/proxy.yml"
+
+- name: SYSTEM LOCALES
+  ansible.builtin.include_tasks: "configure/locale.yml"
+  when:
+    - init_locale is defined
+
+- name: SYSTEM TIMEZONE
+  ansible.builtin.include_tasks: "configure/timezone.yml"
+  when:
+    - init_timezone is defined
+
+- name: NTP
+  ansible.builtin.include_tasks: "configure/ntp.yml"
+  when:
+    - ntp_servers is defined
+
+# Flush to force `apt update` with the news repositories
+- name: Flush handlers
+  meta: flush_handlers
+
+...
diff --git a/roles/sysconfig/tasks/configure/locale.yml b/roles/sysconfig/tasks/configure/locale.yml
new file mode 100644
index 0000000000000000000000000000000000000000..003f321f76a309b7030e52ca56148878ecfeeb59
--- /dev/null
+++ b/roles/sysconfig/tasks/configure/locale.yml
@@ -0,0 +1,14 @@
+---
+
+- name: set default locale
+  ansible.builtin.template:
+    src: locale.j2
+    dest: /etc/default/locale
+    mode: "644"
+
+# Manages locales by editing /etc/locale.gen and invoking locale-gen
+- name: generate current locales
+  community.general.locale_gen:
+    name: "{{ init_locale }}"
+
+...
diff --git a/roles/sysconfig/tasks/ntp.yml b/roles/sysconfig/tasks/configure/ntp.yml
similarity index 100%
rename from roles/sysconfig/tasks/ntp.yml
rename to roles/sysconfig/tasks/configure/ntp.yml
diff --git a/roles/sysconfig/tasks/configure/proxy.yml b/roles/sysconfig/tasks/configure/proxy.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8059dc1d52f6eaf23e9d2b08679d2dce29653688
--- /dev/null
+++ b/roles/sysconfig/tasks/configure/proxy.yml
@@ -0,0 +1,11 @@
+---
+
+- name: proxy
+  when:
+    - proxy_http | d()
+    - proxy_https | d()
+  ansible.builtin.include_role:
+    name: proxy
+    allow_duplicates: true
+
+...
diff --git a/roles/sysconfig/tasks/configure/timezone.yml b/roles/sysconfig/tasks/configure/timezone.yml
new file mode 100644
index 0000000000000000000000000000000000000000..096901ba1f627c9cb270a01d39618439d55d8edc
--- /dev/null
+++ b/roles/sysconfig/tasks/configure/timezone.yml
@@ -0,0 +1,11 @@
+---
+
+- name: set timezone
+  # in order to execute cron task at new set timezone
+  # TODO: fix when there is no cron service
+  # notify: restart cron
+  notify: trigger dpkg-reconfigure tzdata
+  community.general.timezone:
+    name: "{{ init_timezone }}"
+
+...
diff --git a/roles/sysconfig/tasks/install.yml b/roles/sysconfig/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1b06657fd8bfda54a4774c3405c9076d099ff852
--- /dev/null
+++ b/roles/sysconfig/tasks/install.yml
@@ -0,0 +1,18 @@
+---
+
+- name: install system utilities
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ item }}"
+    state: present
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+  loop:
+    - "{{ init_packages }}"
+    - "{{ sysconfig_packages }}"
+    - "{{ sysconfig_repos_packages }}"
+    - "{{ locale_packages }}"
+
+...
diff --git a/roles/sysconfig/tasks/locale.yml b/roles/sysconfig/tasks/locale.yml
deleted file mode 100644
index 2edcf1bd690995bf5964c752f13417b7a4a687ee..0000000000000000000000000000000000000000
--- a/roles/sysconfig/tasks/locale.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-
-- name: install locale packages
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ locale_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: generate locale
-  when: init_locale != 'C.UTF-8'
-  community.general.locale_gen:
-    name: "{{ init_locale }}"
-
-- name: set locale
-  notify: update locale
-  ansible.builtin.copy:
-    dest: /etc/default/locale
-    mode: "644"
-    content: |
-      LANG={{ init_locale }}
-      LANGUAGE={{ init_locale }}
-      LC_ALL={{ init_locale }}
-
-- name: set locale.gen
-  notify: update locale
-  ansible.builtin.lineinfile:
-    path: /etc/locale.gen
-    regexp: ^(?:# )?({{ init_locale }}.*)$
-    backrefs: true
-    line: \1
-
-- name: set timezone (part.1)
-  # in order to execute cron task at new set timezone
-  # TODO: fix when there is no cron service
-  # notify: restart cron
-  community.general.timezone:
-    name: "{{ init_timezone }}"
-
-- name: set timezone (part.2)
-  # https://unix.stackexchange.com/questions/451709/timedatectl-set-timezone-doesnt-update-etc-timezone
-  # timedatectl do not update the /etc/timzone file...
-  ansible.builtin.command:
-    cmd: dpkg-reconfigure --frontend noninteractive tzdata
-  changed_when: false
-
-...
diff --git a/roles/sysconfig/tasks/main.yml b/roles/sysconfig/tasks/main.yml
index 928c2cfcd9c26cbfb2c4ef9361c95319f08b90e2..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/sysconfig/tasks/main.yml
+++ b/roles/sysconfig/tasks/main.yml
@@ -1,146 +1,33 @@
 ---
 
-- name: REPOS
-  ansible.builtin.include_tasks: repos.yml
-
-# Upgrade already installed packages to latest version and clean system
-
-- name: apt update
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    update_cache: true
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-  changed_when: false
-
-- name: apt dist upgrade
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    upgrade: dist
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-  changed_when: false
-
-- name: apt clean and autoremove
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    autoclean: true
-    autoremove: true
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-# Install new packages and remove conflicts
-
-- name: install system utilities
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ sysconfig_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: remove conflicting packages
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name:
-      - exim4
-      - exim4-base
-      - exim4-config
-      - exim4-daemon-light
-    state: absent
-    purge: true
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-# Enable automatic security upgrades
-
-- name: install unattended-upgrades
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: unattended-upgrades
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: enable unattended upgrades
-  ansible.builtin.copy:
-    dest: /etc/apt/apt.conf.d/20auto-upgrades
-    content: |
-      APT::Periodic::Update-Package-Lists "1";
-      APT::Periodic::Unattended-Upgrade "1";
-    mode: "644"
-
-- name: remove old kernel with unattended-upgrades
-  ansible.builtin.replace:
-    dest: /etc/apt/apt.conf.d/50unattended-upgrades
-    regexp: ^//Unattended-Upgrade::Remove-Unused-Kernel-Packages.*$
-    replace: Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";
-  notify: restart unattended-upgrades
-
-- name: allow automatic updates for ubicast security repo
-  ansible.builtin.lineinfile:
-    path: /etc/apt/apt.conf.d/50unattended-upgrades
-    insertafter: ^Unattended-Upgrade::Origins-Pattern {$
-    line: '        "origin=UbiCast,label=UbiCast-Security";'
-    backup: true
-  notify: restart unattended-upgrades
-
-- name: verify root user ssh authorized key file
-  ansible.builtin.stat:
-    path: /root/.ssh/authorized_keys
-  register: auth
-
-- name: fail if the root ssh authorized key is missing or empty
-  ansible.builtin.fail:
-    msg: "Error: root user does not have any ssh key configured !\n\
-          Cannot configure PermitRootLogin to without-password"
-  when: not auth.stat.exists or auth.stat.size == 0
-
-- name: enable root login via ssh with key only
-  ansible.builtin.replace:
-    dest: /etc/ssh/sshd_config
-    regexp: ^#?PermitRootLogin.*
-    replace: PermitRootLogin prohibit-password
-  notify: restart sshd
-
-- name: remove disabled root login
-  ansible.builtin.replace:
-    dest: /root/.ssh/authorized_keys
-    regexp: ^no-port-forwarding,(.+) ssh-
-    replace: ssh-
-    mode: "600"
-  failed_when: false
-
-- name: set issue file
-  ansible.builtin.copy:
-    content: |
-      \e{lightblue} © UBICAST\e{reset}
-
-      IP Address:
-      \e{bold}\4\e{reset}
-
-    dest: /etc/issue
-    mode: "644"
-
-- name: LOGS
-  ansible.builtin.include_tasks: logs.yml
-
-- name: LOCALE
-  ansible.builtin.include_tasks: locale.yml
-
-- name: NTP
-  ansible.builtin.include_tasks: ntp.yml
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
+
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
+
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/sysconfig/tasks/repos.yml b/roles/sysconfig/tasks/repos.yml
deleted file mode 100644
index 5ad425446ccab65e73c47250b25a8914471964eb..0000000000000000000000000000000000000000
--- a/roles/sysconfig/tasks/repos.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-
-- name: configure UbiCast repositories
-  when:
-    - not offline_mode | d(false)
-    - ansible_distribution == 'Debian'
-  block:
-    - name: "Install repos packages"
-      ansible.builtin.apt:
-        force_apt_get: true
-        install_recommends: false
-        name: "{{ sysconfig_repos_packages }}"
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: "Ensure {{ sysconfig_repos_keyrings_dir }} directory exist"
-      ansible.builtin.file:
-        path: "{{ sysconfig_repos_keyrings_dir }}"
-        state: directory
-        mode: "755"
-
-    - name: "Add UbiCast repository public key"
-      ansible.builtin.get_url:
-        url: "https://{{ repos_ubicast_packages_domain }}/media/public.gpg"
-        dest: "{{ sysconfig_repos_keyrings_dir }}/{{ repos_ubicast_packages_domain }}.asc"
-        mode: "644"
-
-    - name: "Add UbiCast repository public key"
-      ansible.builtin.get_url:
-        url: "https://{{ repos_ubicast_packages_domain }}/media/public.gpg"
-        dest: "{{ sysconfig_repos_keyrings_dir }}/{{ repos_ubicast_packages_domain }}.asc"
-        mode: "644"
-
-    - name: "Add UbiCast security repository"
-      ansible.builtin.deb822_repository:
-        name: "ubicast-security"
-        types: "deb"
-        uris: "https://{{ repos_ubicast_packages_domain }}"
-        suites: "packaging/apt/ubicast-security-updates/{{ repos_release }}/"
-        signed_by: "{{ sysconfig_repos_keyrings_dir }}/{{ repos_ubicast_packages_domain }}.asc"
-
-    - name: "Add UbiCast solutions repository"
-      when:
-        - repos_ubicast_packages_token | d(false)
-      ansible.builtin.deb822_repository:
-        name: "ubicast"
-        types: "deb"
-        uris: "https://{{ repos_ubicast_packages_domain }}"
-        suites: "packaging/apt/{{ repos_ubicast_packages_token }}/{{ repos_release }}/"
-        signed_by: "{{ sysconfig_repos_keyrings_dir }}/{{ repos_ubicast_packages_domain }}.asc"
-
-...
diff --git a/roles/sysconfig/templates/locale.j2 b/roles/sysconfig/templates/locale.j2
new file mode 100644
index 0000000000000000000000000000000000000000..9395aea7ee0280858d2fa6fa8e66aee3c711a2ca
--- /dev/null
+++ b/roles/sysconfig/templates/locale.j2
@@ -0,0 +1,4 @@
+# See https://unix.stackexchange.com/questions/149111/what-should-i-set-my-locale-to-and-what-are-the-implications-of-doing-so
+LANG={{ init_locale }}
+LC_COLLATE=C
+LC_NUMERIC=C
diff --git a/roles/sysconfig/templates/ntp.conf.j2 b/roles/sysconfig/templates/ntp.conf.j2
deleted file mode 100644
index c5cd679acff542448b55a9592826b945552cd9ee..0000000000000000000000000000000000000000
--- a/roles/sysconfig/templates/ntp.conf.j2
+++ /dev/null
@@ -1,27 +0,0 @@
-# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
-
-driftfile /var/lib/ntp/ntp.drift
-
-# Leap seconds definition provided by tzdata
-leapfile /usr/share/zoneinfo/leap-seconds.list
-
-# Specify one or more NTP servers.
-{% if ntp_servers is string %}
-{% set ntp_list = ntp_servers.split(',') %}
-{% else %}
-{% set ntp_list = ntp_servers %}
-{% endif %}
-{% for server in ntp_list %}
-pool {{ server }} iburst
-{% endfor %}
-
-# By default, exchange time with everybody, but don't allow configuration.
-restrict -4 default kod notrap nomodify nopeer noquery limited
-restrict -6 default kod notrap nomodify nopeer noquery limited
-
-# Local users may interrogate the ntp server more closely.
-restrict 127.0.0.1
-restrict ::1
-
-# Needed for adding pool entries
-restrict source notrap nomodify noquery
diff --git a/roles/sysconfig/vars/main.yml b/roles/sysconfig/vars/main.yml
index 8ecbba9223998a99b811aa1cdd0a0a49f590ab0f..a7ef7f1eeacbad7221e8f51f058fd8fde1520e0f 100644
--- a/roles/sysconfig/vars/main.yml
+++ b/roles/sysconfig/vars/main.yml
@@ -1,9 +1,19 @@
 ---
+
+# Packages required for the group
+init_packages:
+  - openssh-client
+  - ca-certificates
+  - apt-utils
+  - gnupg
+  - sudo
+
+# Packages required for the group
 sysconfig_packages:
   - openssh-server
   - bash-completion
+  - unattended-upgrades
   - man
-  - sudo
   - vim
   - ifupdown
   - lm-sensors
@@ -12,12 +22,19 @@ sysconfig_packages:
   - host
   - htop
 
+# Debian distribution short name (example: "bullseye")
+repos_release: "{{ ansible_distribution_release }}"
+
+# Packages required for the group
 locale_packages:
   - locales
   - tzdata
 
+# Packages required for the group
 sysconfig_repos_packages:
   - python3-debian
 
-sysconfig_repos_keyrings_dir: '/usr/local/share/keyrings'
+# Repository keyring path
+sysconfig_repos_keyrings_dir: "/usr/local/share/keyrings"
+
 ...
diff --git a/roles/sysuser/defaults/main.yml b/roles/sysuser/defaults/main.yml
index bdfb1e2146353286e16ce2b841a1cd074759dedb..202fa54f35698bd5dafc234505ee8624b0b734db 100644
--- a/roles/sysuser/defaults/main.yml
+++ b/roles/sysuser/defaults/main.yml
@@ -1,5 +1,12 @@
 ---
-offline_mode: false
+
+# Do not configure any ubicast ssh public key
+offline_mode: False
+
+# Password for the system user ubicast
 sysuser_ubicast_password: "changeit"
+
+# Password for the system user admin
 sysuser_admin_password: "changeit"
+
 ...
diff --git a/roles/sysuser/files/.bashrc b/roles/sysuser/files/bashrc
similarity index 100%
rename from roles/sysuser/files/.bashrc
rename to roles/sysuser/files/bashrc
diff --git a/roles/sysuser/files/.vimrc b/roles/sysuser/files/vimrc
similarity index 100%
rename from roles/sysuser/files/.vimrc
rename to roles/sysuser/files/vimrc
diff --git a/roles/sysuser/handlers/main.yml b/roles/sysuser/handlers/main.yml
deleted file mode 100644
index cbc6f332ac43dd2b59fe05cc1eadd3472a872924..0000000000000000000000000000000000000000
--- a/roles/sysuser/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart sshd
-  ansible.builtin.service:
-    name: sshd
-    state: restarted
-
-...
diff --git a/roles/sysuser/tasks/base.yml b/roles/sysuser/tasks/base.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b9dcdf0cc17b2420954c00a12787d7401ef7e625
--- /dev/null
+++ b/roles/sysuser/tasks/base.yml
@@ -0,0 +1,11 @@
+---
+
+- name: CONFIGURE SUDOERS
+  ansible.builtin.include_tasks: "base/sudoers.yml"
+
+- name: SETUP ROOT DOTFILES
+  ansible.builtin.include_tasks: "common/dotfiles.yml"
+  vars:
+    user: "root"
+
+...
diff --git a/roles/sysuser/tasks/base/sudoers.yml b/roles/sysuser/tasks/base/sudoers.yml
new file mode 100644
index 0000000000000000000000000000000000000000..564d9548bc5d187c4208a424f7b110a9cf94d91f
--- /dev/null
+++ b/roles/sysuser/tasks/base/sudoers.yml
@@ -0,0 +1,11 @@
+---
+
+- name: sudoers without password
+  ansible.builtin.copy:
+    dest: /etc/sudoers.d/nopasswd
+    validate: visudo -cf %s
+    mode: "440"
+    content: |
+      %sudo ALL=(ALL) NOPASSWD: ALL
+
+...
diff --git a/roles/sysuser/tasks/common/dotfiles.yml b/roles/sysuser/tasks/common/dotfiles.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c21230784a10a594d699604a5626cbef395ef98c
--- /dev/null
+++ b/roles/sysuser/tasks/common/dotfiles.yml
@@ -0,0 +1,15 @@
+---
+
+- name: "({{ user }}) copy .bashrc"
+  ansible.builtin.copy:
+    src: bashrc
+    dest: ~{{ user }}/.bashrc
+    mode: "644"
+
+- name: "({{ user }}) copy .vimrc"
+  ansible.builtin.copy:
+    src: vimrc
+    dest: ~{{ user }}/.vimrc
+    mode: "644"
+
+...
diff --git a/roles/sysuser/tasks/configure.yml b/roles/sysuser/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..63f3f42502565fea478d75de196234d33add3d23
--- /dev/null
+++ b/roles/sysuser/tasks/configure.yml
@@ -0,0 +1,17 @@
+---
+
+- name: CREATE ADMIN USER
+  ansible.builtin.include_tasks: "configure/create_user.yml"
+  vars:
+    user: "admin"
+  when:
+    - sysuser_admin_password is defined
+
+- name: CREATE UBICAST USER
+  ansible.builtin.include_tasks: "configure/create_user.yml"
+  vars:
+    user: "ubicast"
+  when:
+    - sysuser_ubicast_password is defined
+
+...
diff --git a/roles/sysuser/tasks/configure/create_user.yml b/roles/sysuser/tasks/configure/create_user.yml
new file mode 100644
index 0000000000000000000000000000000000000000..66d1e5a4382bca1cc356fa1896be4e4f2d7f3cc3
--- /dev/null
+++ b/roles/sysuser/tasks/configure/create_user.yml
@@ -0,0 +1,30 @@
+---
+
+- name: "({{ user }}) create user group"
+  ansible.builtin.group:
+    name: "{{ user }}"
+    state: present
+
+- name: "({{ user }}) create user"
+  ansible.builtin.user:
+    name: "{{ user }}"
+    group: "{{ user }}"
+    shell: /bin/bash
+    generate_ssh_key: true
+    ssh_key_type: ed25519
+    ssh_key_file: .ssh/id_ed25519
+    append: true
+    groups:
+      - sudo
+    state: present
+
+- name: "({{ user }}) set password"
+  ansible.builtin.user:
+    name: "{{ user }}"
+    password: "{{ lookup('vars', 'sysuser_' + user + '_password') | password_hash('sha512', 'ubicastsalt') }}"
+    update_password: always
+
+- name: "SETUP {{ user | upper }} DOTFILES"
+  ansible.builtin.include_tasks: "../common/dotfiles.yml"
+
+...
diff --git a/roles/sysuser/tasks/copy_dotfiles.yml b/roles/sysuser/tasks/copy_dotfiles.yml
deleted file mode 100644
index 6c153ae1af527868c435095d45cb25fd0c353f6f..0000000000000000000000000000000000000000
--- a/roles/sysuser/tasks/copy_dotfiles.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: "({{ item }}) copy .bashrc"
-  ansible.builtin.copy:
-    src: .bashrc
-    dest: ~{{ item }}/.bashrc
-    mode: "644"
-
-- name: "({{ item }}) copy .vimrc"
-  ansible.builtin.copy:
-    src: .vimrc
-    dest: ~{{ item }}/.vimrc
-    mode: "644"
-...
diff --git a/roles/sysuser/tasks/create_user.yml b/roles/sysuser/tasks/create_user.yml
deleted file mode 100644
index d3dabe8422b41d66b533313a7a8bd5eb423a61ca..0000000000000000000000000000000000000000
--- a/roles/sysuser/tasks/create_user.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-
-- name: "({{ item }}) create user group"
-  ansible.builtin.group:
-    name: "{{ item }}"
-    state: present
-
-- name: "({{ item }}) create user"
-  ansible.builtin.user:
-    name: "{{ item }}"
-    group: "{{ item }}"
-    shell: /bin/bash
-    generate_ssh_key: true
-    ssh_key_type: ed25519
-    ssh_key_file: .ssh/id_ed25519
-    append: true
-    groups:
-      - sudo
-    state: present
-
-- name: "({{ item }}) set password"
-  ansible.builtin.user:
-    name: "{{ item }}"
-    password: "{{ lookup('vars', 'sysuser_' + item + '_password') | password_hash('sha512', 'ubicastsalt') }}"
-    update_password: always
-
-...
diff --git a/roles/sysuser/tasks/install.yml b/roles/sysuser/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e5733c6dc77dd975d38e07879c6094784b4a7a83
--- /dev/null
+++ b/roles/sysuser/tasks/install.yml
@@ -0,0 +1,15 @@
+---
+
+- name: install ubicast ssh access
+  when: not offline_mode | d(false)
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    update_cache: true
+    name: "{{ users_packages }}"
+    state: present
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/sysuser/tasks/main.yml b/roles/sysuser/tasks/main.yml
index 190d1d60533b70965ff4e2302442b28676f999d4..21ffa13e2e09ad71a2c7b4debadf373c8dab60cf 100644
--- a/roles/sysuser/tasks/main.yml
+++ b/roles/sysuser/tasks/main.yml
@@ -1,35 +1,33 @@
 ---
 
-- name: create users
-  ansible.builtin.include_tasks: create_user.yml
-  loop:
-    - ubicast
-    - admin
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: copy dotfiles
-  ansible.builtin.include_tasks: copy_dotfiles.yml
-  loop:
-    - ubicast
-    - admin
-    - root
+- name: BASE CONFIGURATION
+  ansible.builtin.include_tasks:
+    file: "base.yml"
+    apply:
+      become: true
+      tags:
+        - base
+  tags:
+    - always
 
-- name: sudoers without password
-  ansible.builtin.copy:
-    dest: /etc/sudoers.d/nopasswd
-    validate: visudo -cf %s
-    mode: "440"
-    content: |
-      %sudo ALL=(ALL) NOPASSWD: ALL
-
-- name: install ubicast ssh access
-  when: not offline_mode | d(false)
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ users_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/sysuser/vars/main.yml b/roles/sysuser/vars/main.yml
index b12bf3b90e2609b45888c1f11b895eda80f16ca0..2d2d9f887df9f82a08206c28cc2101d6d885a559 100644
--- a/roles/sysuser/vars/main.yml
+++ b/roles/sysuser/vars/main.yml
@@ -1,5 +1,8 @@
 ---
+
+# Packages required for the group
 users_packages:
   - ubicast-ssh-access-manager
   - ubicast-ssh-access-support
+
 ...
diff --git a/roles/tester/README.md b/roles/tester/README.md
index 9af238a818fc6da4fca332442f85c121c010935f..a8cb9a1a3befb098ff5bd4100a7511cff87c6983 100644
--- a/roles/tester/README.md
+++ b/roles/tester/README.md
@@ -10,11 +10,6 @@ The tester group is used to configure all hosts with the UbiCast tester solution
 
 Available variables are listed below, along with the descriptions and the default values.
 
-`tester_system_name`: Name of the system in the reports (Optional)
-```
-tester_system_name: "{{ inventory_hostname }}"
-```
-
 `repos_ubicast_packages_token`: UbiCast repository token used to make API call to mirismanager.ubicast.net to retrieve system informations
 ```
 repos_ubicast_packages_token: "XXXX-XXXX-XXXX-XXXX-XXXX"
diff --git a/roles/tester/defaults/main.yml b/roles/tester/defaults/main.yml
index be6859759f4c1b3fff0d10e8854e311bdc85eb08..8da3b43cbcd3f248ee280ea8c3132a21e50c0de1 100644
--- a/roles/tester/defaults/main.yml
+++ b/roles/tester/defaults/main.yml
@@ -1,7 +1,10 @@
 ---
 
-# Name of the system in the reports
-tester_system_name: "{{ inventory_hostname }}"
+# UbiCast repository token used to make API call to mirismanager.ubicast.net to retrieve system informations
+repos_ubicast_packages_token: "XXXX-XXXX-XXXX-XXXX-XXXX"
+
+# UbiCast admin reciever of the email report for premiums
+tester_email_admin: "sysadmin+premium@ubicast.eu"
 
 # Sender of the email report
 tester_email_from: "ubicast.tester"
@@ -9,15 +12,7 @@ tester_email_from: "ubicast.tester"
 # Reciever of the email report
 tester_email_to: "example@example.com"
 
-# UbiCast admin reciever of the email report for premiums
-tester_email_admin: "sysadmin+premium@ubicast.eu"
-
 # List of tests to ignore when executing the ubicast-tester
 tester_tests_ignored: []
-#   - ntp.sh
-#   - email.sh
-
-# UbiCast repository token used to make API call to mirismanager.ubicast.net to retrieve system informations
-repos_ubicast_packages_token: "XXXX-XXXX-XXXX-XXXX-XXXX"
 
 ...
diff --git a/roles/tester/tasks/configure.yml b/roles/tester/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ece8ffbf439f13a7c0d014b4e76f7db39db951ac
--- /dev/null
+++ b/roles/tester/tasks/configure.yml
@@ -0,0 +1,12 @@
+---
+
+- name: CONFIGURE UBICAST-TESTER
+  ansible.builtin.include_tasks: "configure/tester-configure.yml"
+  when:
+    - repos_ubicast_packages_token is defined
+    - tester_email_admin is defined
+    - tester_email_from is defined
+    - tester_email_to is defined
+    - tester_tests_ignored is defined
+
+...
diff --git a/roles/tester/tasks/configure/tester-configure.yml b/roles/tester/tasks/configure/tester-configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..77d2a1fbf5f174f800658d5990dff37916cdb993
--- /dev/null
+++ b/roles/tester/tasks/configure/tester-configure.yml
@@ -0,0 +1,16 @@
+---
+
+- name: tester configuration
+  ansible.builtin.replace:
+    path: /etc/ubicast-tester/config.yml
+    regexp: '^(\s*)#?{{ item.name }}:(\s*).*$'
+    replace: '\1{{ item.name }}:\2{{ item.value }}'
+  loop:
+    - { name: 'name',    value: '"{{ tester_system_name }}"' }           # noqa: yaml[commas]
+    - { name: 'token',   value: '"{{ repos_ubicast_packages_token }}"' } # noqa: yaml[commas]
+    - { name: 'from',    value: '"{{ tester_email_from }}"' }            # noqa: yaml[commas]
+    - { name: 'to',      value: '"{{ tester_email_to }}"' }              # noqa: yaml[commas]
+    - { name: 'admin',   value: '"{{ tester_email_admin }}"' }           # noqa: yaml[commas]
+    - { name: 'ignored', value: '{{ tester_tests_ignored }}' }
+
+...
diff --git a/roles/tester/tasks/install.yml b/roles/tester/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0e07b005c3710c96d4d2f754f687f6a4040eb53c
--- /dev/null
+++ b/roles/tester/tasks/install.yml
@@ -0,0 +1,13 @@
+---
+
+- name: install tester packages
+  ansible.builtin.apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ tester_packages }}"
+    state: present
+  register: apt_status
+  retries: 60
+  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+
+...
diff --git a/roles/tester/tasks/main.yml b/roles/tester/tasks/main.yml
index c60ed3e624ee045f70a11222cfbb3c8f92c2f395..4ca9b1ce59bf99a101c275a40afab2c0fc69e94a 100644
--- a/roles/tester/tasks/main.yml
+++ b/roles/tester/tasks/main.yml
@@ -1,26 +1,23 @@
 ---
 
-- name: install tester packages
-  ansible.builtin.apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ tester_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
+- name: INSTALL
+  ansible.builtin.include_tasks:
+    file: "install.yml"
+    apply:
+      become: true
+      tags:
+        - install
+  tags:
+    - always
 
-- name: tester configuration
-  ansible.builtin.replace:
-    path: /etc/ubicast-tester/config.yml
-    regexp: '^(\s*)#?{{ item.name }}:(\s*).*$'
-    replace: '\1{{ item.name }}:\2{{ item.value }}'
-  loop:
-    - { name: 'name',    value: '"{{ tester_system_name }}"' }           # noqa: yaml[commas]
-    - { name: 'token',   value: '"{{ repos_ubicast_packages_token }}"' } # noqa: yaml[commas]
-    - { name: 'from',    value: '"{{ tester_email_from }}"' }            # noqa: yaml[commas]
-    - { name: 'to',      value: '"{{ tester_email_to }}"' }              # noqa: yaml[commas]
-    - { name: 'admin',   value: '"{{ tester_email_admin }}"' }           # noqa: yaml[commas]
-    - { name: 'ignored', value: '{{ tester_tests_ignored }}' }
+- name: CONFIGURE
+  ansible.builtin.include_tasks:
+    file: "configure.yml"
+    apply:
+      become: true
+      tags:
+        - configure
+  tags:
+    - always
 
 ...
diff --git a/roles/tester/vars/main.yml b/roles/tester/vars/main.yml
index 52d39b1b42cac7f7acbebf538cf8b93a1f50e5d6..22fa7345081b4c79b81a7f1169237651b9424c5a 100644
--- a/roles/tester/vars/main.yml
+++ b/roles/tester/vars/main.yml
@@ -1,7 +1,13 @@
 ---
+
+# Packages required for the group
 tester_packages:
   - ubicast-tester
   - ubicast-tester-nudgis
   - ubicast-tester-system
   - ubicast-tester-deployment
+
+# Name of the system in the reports
+tester_system_name: "{{ inventory_hostname }}"
+
 ...