diff --git a/.lint/ansible-apt-block-check.sh b/.lint/ansible-apt-block-check.sh
index 11c930f8a666c91d686e94c0cfa616b4f9b41913..2a2c78007f37fd2e83b8c876ceb0aa7b832fb8a8 100755
--- a/.lint/ansible-apt-block-check.sh
+++ b/.lint/ansible-apt-block-check.sh
@@ -1,14 +1,13 @@
-#!/usr/bin/env bash
 
 # config
 exclude_pattern=()
 exclude_pattern+=('^./roles/elastic.elasticsearch')
 exclude_pattern+=('^./.venv')
 
-apt_regex='^[^#]*apt:'
+apt_regex='^[^#]*(ansible.builtin.|)apt:'
 until_regex='^[^#]*until: apt_status is success'
 
-# * * * 
+# * * *
 
 # go to repository root dir
 cd "$(readlink -f "$(dirname "${0}")")"/..
@@ -24,14 +23,14 @@ errors_count=0
 for f in "${yml_files[@]}"; do
 
     # count apt block
-    apt_block_count=$(grep -c "${apt_regex}" "${f}")
+    apt_block_count=$(grep -cE "${apt_regex}" "${f}")
 
     # test if file contain apt block
     if (( apt_block_count > 0 )); then
-    
+
         # get apt block, count apt: and until:
         apt_blocks="$(awk -v RS='' "/${apt_regex}/" "${f}")"
-        apt_nb="$(echo "${apt_blocks}" | grep -c "${apt_regex}")"
+        apt_nb="$(echo "${apt_blocks}" | grep -cE "${apt_regex}")"
         until_nb="$(echo "${apt_blocks}" | grep -c "${until_regex}")"
 
         # test if apt: and until: count differ
@@ -51,4 +50,3 @@ if (( errors_count != 0 )); then
 else
     exit 0
 fi
-
diff --git a/playbooks/bench.yml b/playbooks/bench.yml
index d653897e9b10be6c2e60bcb55edd9b6923ecb36f..02602c622dd117de0d2de258fbad498e97dd45dd 100755
--- a/playbooks/bench.yml
+++ b/playbooks/bench.yml
@@ -5,7 +5,7 @@
   hosts: bench_server
   pre_tasks:
     - name: "Fail is benchmark server is not unique"
-      fail:
+      ansible.builtin.fail:
         msg: "Benchmark server must be unique"
       when: groups['bench_server'] | length > 1
   tags: bench_server
@@ -16,7 +16,7 @@
       tags:
         - never
         - prepare-bench
-      service:
+      ansible.builtin.service:
         name: bench-server
         state: restarted
 
@@ -30,7 +30,7 @@
       tags:
         - never
         - prepare-bench
-      service:
+      ansible.builtin.service:
         name: bench-worker
         state: restarted
 
diff --git a/playbooks/celerity.yml b/playbooks/celerity.yml
index fcf3728b3c7a946ed2c931f350a0843613ef7f2d..5094f5b899f2bb0de32ccf05de5d82896d626f22 100755
--- a/playbooks/celerity.yml
+++ b/playbooks/celerity.yml
@@ -9,11 +9,11 @@
   post_tasks:
     - name: configure network
       when: network_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: network
     - name: configure proxy
       when: proxy_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: proxy
 
 ...
diff --git a/playbooks/live/deploy-standalone.yml b/playbooks/live/deploy-standalone.yml
index 440ebd3173de47635f8dd0b214020b4ec4d10a9c..4c7803530bd767ed23814f20b5257f0d0749564c 100644
--- a/playbooks/live/deploy-standalone.yml
+++ b/playbooks/live/deploy-standalone.yml
@@ -30,7 +30,7 @@
         ferm_input_rules: "{{ server_ferm_input_rules }}"
         ferm_output_rules: "{{ server_ferm_output_rules }}"
         ferm_global_settings: "{{ server_ferm_global_settings }}"
-      include_role:
+      ansible.builtin.include_role:
         name: ferm-configure
 
 - import_playbook: deploy-minimal.yml
diff --git a/playbooks/live/functions/create-live-app.yml b/playbooks/live/functions/create-live-app.yml
index 053a4a608cf698430300e7d19734671555c81531..97722c5aa5ca7af8f559cc8a4038724b129b327d 100644
--- a/playbooks/live/functions/create-live-app.yml
+++ b/playbooks/live/functions/create-live-app.yml
@@ -6,23 +6,23 @@
   tags: live
   tasks:
     - name: Check the existence of the live configuration
-      stat:
+      ansible.builtin.stat:
         path: /etc/nginx/rtmp.d/{{ live_app_name }}.conf
       register: live_conf_live
 
     - name: Getting the live configuration content
-      shell: grep -oP '^application \K[A-Za-z0-9]+' /etc/nginx/rtmp.d/{{ live_app_name }}.conf
+      ansible.builtin.shell: grep -oP '^application \K[A-Za-z0-9]+' /etc/nginx/rtmp.d/{{ live_app_name }}.conf
       when: live_conf_live.stat.exists
       register: live_conf_secret
       changed_when: false
 
     - name: Extracting the application secret
-      set_fact:
+      ansible.builtin.set_fact:
         live_secret: "{{ live_conf_secret.stdout }}"
       when: live_conf_live.stat.exists
 
     - name: Declaring the application secret
-      set_fact:
+      ansible.builtin.set_fact:
         live_secret: ""
       when: not live_conf_live.stat.exists
 
@@ -32,23 +32,23 @@
   tags: live
   tasks:
     - name: Check the existence of the live configuration
-      stat:
+      ansible.builtin.stat:
         path: /home/{{ live_app_name }}/msinstance/conf/lives.json
       register: ms_conf_live
 
     - name: Retrieve the live configuration
-      slurp:
+      ansible.builtin.slurp:
         src: /home/{{ live_app_name }}/msinstance/conf/lives.json
       register: ms_live_config
       when: ms_conf_live.stat.exists
 
     - name: Extracting the application secret
-      set_fact:
+      ansible.builtin.set_fact:
         live_secret: "{{ ms_live_config.content|b64decode|from_json | json_query('RTMP_APP') }}"
       when: ms_conf_live.stat.exists
 
     - name: Declaring the application secret
-      set_fact:
+      ansible.builtin.set_fact:
         live_secret: ""
       when: not ms_conf_live.stat.exists
 
@@ -58,13 +58,13 @@
   tags: live
   tasks:
     - name: Retrieving the first live host configured app secret as reference
-      set_fact:
+      ansible.builtin.set_fact:
         base_live_secret: "{{ hostvars[groups['live'][0]].live_secret }}"
         app_secret_diff: false
       when: hostvars[groups['live'][0]].live_secret | length > 0
 
     - name: Comparing the app secrets from MS an live servers with the reference
-      set_fact:
+      ansible.builtin.set_fact:
         app_secret_diff: true
       when: base_live_secret is defined
             and hostvars[item].live_secret != base_live_secret
@@ -73,7 +73,7 @@
         - "{{ groups['mediaserver'] }}"
 
     - name: Generating an application secret on localhost with /dev/urandom
-      shell: >
+      ansible.builtin.shell: >
         set -o pipefail && \
         cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 30 | head -n 1
       register: secret
@@ -84,7 +84,7 @@
             or app_secret_diff
 
     - name: Deciding the application secret to use
-      set_fact:
+      ansible.builtin.set_fact:
         live_app_secret: "{{ secret.stdout | d(base_live_secret) }}"
 
 - name: Live server(s) - "{{ live_app_name }}" live application configuration
@@ -93,19 +93,19 @@
   tags: live
   tasks:
     - name: Check the existence of the RTMP app
-      command: ubicast-livectl get {{ live_app_name }} {{ hostvars['localhost'].live_app_secret }}
+      ansible.builtin.command: ubicast-livectl get {{ live_app_name }} {{ hostvars['localhost'].live_app_secret }}
       register: app_status
       changed_when: false
       failed_when: false
 
     - name: (Re)create the RTMP app configuration
       notify: Reload nginx
-      command: ubicast-livectl add {{ live_app_name }} {{ hostvars['localhost'].live_app_secret }}
+      ansible.builtin.command: ubicast-livectl add {{ live_app_name }} {{ hostvars['localhost'].live_app_secret }}
       when: app_status.rc == 1
 
     - name: Prepare the nginx RTMP temporary directory
       notify: Reload nginx
-      file:
+      ansible.builtin.file:
         path: /var/tmp/nginx-rtmp/{{ live_app_name }}
         owner: nginx
         group: root
@@ -114,7 +114,7 @@
 
     - name: Create the nginx RTMP web directory symlink
       notify: Reload nginx
-      file:
+      ansible.builtin.file:
         src: /var/tmp/nginx-rtmp/{{ live_app_name }}
         dest: /var/www/{{ live_app_name }}/streaming-rtmp
         state: link
@@ -123,7 +123,7 @@
 
   handlers:
     - name: Reload nginx
-      systemd:
+      ansible.builtin.systemd:
         name: nginx
         state: reloaded
 
@@ -133,54 +133,54 @@
   tags: live
   tasks:
     - name: Getting the current lives configuration
-      slurp:
+      ansible.builtin.slurp:
         src: /home/{{ live_app_name }}/msinstance/conf/lives.json
       register: lives_config
       when: ms_conf_live.stat.exists
 
     # The "W10K" string is decoded to an empty json file => "[]"
     - name: Store the lives configuration in a variable
-      set_fact:
+      ansible.builtin.set_fact:
         lives_config: "{{ lives_config.content | default('W10K') | b64decode | from_json }}"
 
     - name: Set the live application secret in lives configuration
       vars:
         rtmp_app_line:
           RTMP_APP: "{{ hostvars['localhost'].live_app_secret }}"
-      set_fact:
+      ansible.builtin.set_fact:
         lives_config: "{{ lives_config | combine(rtmp_app_line) }}"
 
     - name: Set the RTMP_NAME in lives configuration
       vars:
         rtmp_name_line:
           RTMP_NAME: "{{ live_app_name }}"
-      set_fact:
+      ansible.builtin.set_fact:
         lives_config: "{{ lives_config | combine(rtmp_name_line) }}"
 
     - name: Set the RTMP_HLS_PLAYBACK_URL in lives configuration
       vars:
         rtmp_hls_line:
           RTMP_HLS_PLAYBACK_URL: "{{ rtmp_hls_url }}"
-      set_fact:
+      ansible.builtin.set_fact:
         lives_config: "{{ lives_config | combine(rtmp_hls_line) }}"
 
     - name: Set the RTMP_PLAYBACK_URL in lives configuration
       vars:
         rtmp_playback_line:
           RTMP_PLAYBACK_URL: null
-      set_fact:
+      ansible.builtin.set_fact:
         lives_config: "{{ lives_config | combine(rtmp_playback_line) }}"
 
     - name: Set the RTMP_PUBLISH_URL in lives configuration
       vars:
         rtmp_publish_line:
           RTMP_PUBLISH_URL: "{{ rtmp_pub_url }}"
-      set_fact:
+      ansible.builtin.set_fact:
         lives_config: "{{ lives_config | combine(rtmp_publish_line) }}"
 
     - name: Update mediaserver lives configuration
       notify: Restart mediaserver
-      copy:
+      ansible.builtin.copy:
         content: "{{ lives_config | to_nice_json }}"
         dest: "/home/{{ live_app_name }}/msinstance/conf/lives.json"
         owner: "{{ live_app_name }}"
@@ -189,7 +189,7 @@
 
   handlers:
     - name: Restart mediaserver
-      systemd:
+      ansible.builtin.systemd:
         name: mediaserver
         state: restarted
 
diff --git a/playbooks/live/subplays/ha-case.yml b/playbooks/live/subplays/ha-case.yml
index b2fc625a6b691771066615d210f413380bba1a1a..1ee19ff95e8ab2ed16d663968d9e3b0e8eb70aad 100644
--- a/playbooks/live/subplays/ha-case.yml
+++ b/playbooks/live/subplays/ha-case.yml
@@ -6,28 +6,28 @@
   gather_facts: false
   tasks:
     - name: resolve domain name to localhost
-      lineinfile:
+      ansible.builtin.lineinfile:
         path: /etc/hosts
         line: '127.0.1.1 {{ live_domain }}'
         backup: true
 
     - name: fill the vhost file
       notify: Restart nginx
-      replace:
+      ansible.builtin.replace:
         path: /etc/nginx/sites-available/live-rtmp.conf
         regexp: '^(\s+server_name)\s+.*(;)$'
         replace: '\1 {{ live_domain }}\2'
 
     - name: Activating the live vhost configuration
       notify: Restart nginx
-      file:
+      ansible.builtin.file:
         src: /etc/nginx/sites-available/live-rtmp.conf
         dest: /etc/nginx/sites-enabled/live-rtmp.conf
         state: link
 
   handlers:
     - name: Restart nginx
-      systemd:
+      ansible.builtin.systemd:
         name: nginx
         state: restarted
 
@@ -43,12 +43,12 @@
   gather_facts: false
   tasks:
     - name: Check the existence of the rtmp configuration folder
-      stat:
+      ansible.builtin.stat:
         path: /etc/nginx/rtmp.d
       register: rtmp_conf_dir
 
     - name: Remove unused MediaServer(s) rtmp configurations
-      shell: /bin/rm -f /etc/nginx/rtmp.d/*
+      ansible.builtin.shell: /bin/rm -f /etc/nginx/rtmp.d/*
       args:
         warn: false
       when: rtmp_conf_dir.stat.exists
diff --git a/playbooks/mediacache.yml b/playbooks/mediacache.yml
index 827d0a1dc58dd4efc8f0059bee84540936a25fb2..045b1c3817365b1204321988ea21ed90a5c354ab 100755
--- a/playbooks/mediacache.yml
+++ b/playbooks/mediacache.yml
@@ -9,15 +9,15 @@
   post_tasks:
     - name: deploy letsencrypt certificate
       when: letsencrypt_enabled | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: letsencrypt
     - name: configure network
       when: network_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: network
     - name: configure proxy
       when: proxy_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: proxy
 
 ...
diff --git a/playbooks/mediacache/deploy-minimal.yml b/playbooks/mediacache/deploy-minimal.yml
index 770b39b9d24afe1c4d1c246aefd7b9c336bad88f..7caadf517a962b3fd3108ca1a560d6924a2b04a1 100644
--- a/playbooks/mediacache/deploy-minimal.yml
+++ b/playbooks/mediacache/deploy-minimal.yml
@@ -7,12 +7,12 @@
     - mediacache
   tasks:
     - name: Getting the IP to trust in term of securelink
-      set_fact:
+      ansible.builtin.set_fact:
         securelink_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
 
     - name: authorize mediacache on mediaserver
       notify: restart nginx on mediaservers
-      lineinfile:
+      ansible.builtin.lineinfile:
         path: /etc/nginx/conf.d/mediaserver-securelink.conf
         line: "{{'\t'}}{{ securelink_ip }} 1;"  # noqa: no-tabs
         insertafter: '^geo'
@@ -22,7 +22,7 @@
 
   handlers:
     - name: restart nginx on mediaservers
-      systemd:
+      ansible.builtin.systemd:
         name: nginx
         state: restarted
       delegate_to: "{{ item }}"
diff --git a/playbooks/mediacache/deploy-standalone.yml b/playbooks/mediacache/deploy-standalone.yml
index 0190c3f0d48ec590383e4144e89c547fec8daf8f..9efd3e7456630f83f20196175c1146dc06a43f80 100644
--- a/playbooks/mediacache/deploy-standalone.yml
+++ b/playbooks/mediacache/deploy-standalone.yml
@@ -30,7 +30,7 @@
         ferm_input_rules: "{{ server_ferm_input_rules }}"
         ferm_output_rules: "{{ server_ferm_output_rules }}"
         ferm_global_settings: "{{ server_ferm_global_settings }}"
-      include_role:
+      ansible.builtin.include_role:
         name: ferm-configure
 
 - import_playbook: deploy-minimal.yml
diff --git a/playbooks/mediaimport.yml b/playbooks/mediaimport.yml
index 3040caabe2563bdcd0415bb4a1363a4403379882..84363dfd7b8db8caa964c97726cce1b21cfd6aeb 100755
--- a/playbooks/mediaimport.yml
+++ b/playbooks/mediaimport.yml
@@ -9,11 +9,11 @@
   post_tasks:
     - name: configure network
       when: network_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: network
     - name: configure proxy
       when: proxy_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: proxy
 
 ...
diff --git a/playbooks/mediaserver.yml b/playbooks/mediaserver.yml
index fd1f3711f975319b7902444a4494a2476c716241..f6c798bc60cf7c0952f5e5a02ae4b96cdbc2b2cb 100755
--- a/playbooks/mediaserver.yml
+++ b/playbooks/mediaserver.yml
@@ -9,15 +9,15 @@
   post_tasks:
     - name: deploy letsencrypt certificate
       when: letsencrypt_enabled | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: letsencrypt
     - name: configure network
       when: network_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: network
     - name: configure proxy
       when: proxy_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: proxy
 
 ...
diff --git a/playbooks/mediavault/deploy.yml b/playbooks/mediavault/deploy.yml
index 87d9f753b034472e270580ec55c53fd0ebd17df6..a18f2914af1f11dd96719881fc86580c23ef0ae9 100755
--- a/playbooks/mediavault/deploy.yml
+++ b/playbooks/mediavault/deploy.yml
@@ -9,11 +9,11 @@
   post_tasks:
     - name: configure network
       when: network_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: network
     - name: configure proxy
       when: proxy_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: proxy
 
 ...
diff --git a/playbooks/mediavault/ressources/add_backup_task.yml b/playbooks/mediavault/ressources/add_backup_task.yml
index d20110e6fc11ec6efd8a780bf7e15ef6cecaa99d..eebe94c78a62e6eb4f2d50cf89cd9915d2c791e7 100644
--- a/playbooks/mediavault/ressources/add_backup_task.yml
+++ b/playbooks/mediavault/ressources/add_backup_task.yml
@@ -5,7 +5,7 @@
   register: backup_marker
 
 - name: create {{ item.name }} backup
-  shell: mediavaultctl add --backup-name "{{ item.name }}" --source-folder "{{ item.source  }}" --dest-folder "{{ item.dest }}"
+  ansible.builtin.shell: mediavaultctl add --backup-name "{{ item.name }}" --source-folder "{{ item.source  }}" --dest-folder "{{ item.dest }}"
   when: not backup_marker.stat.exists
 
 ...
diff --git a/playbooks/mediaworker.yml b/playbooks/mediaworker.yml
index f77e59e90a8ced08f5d5f28b3fd61655f41fbba5..0922e95a2019918fc17712b6f4f17908363ca721 100755
--- a/playbooks/mediaworker.yml
+++ b/playbooks/mediaworker.yml
@@ -9,11 +9,11 @@
   post_tasks:
     - name: configure network
       when: network_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: network
     - name: configure proxy
       when: proxy_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: proxy
 
 ...
diff --git a/playbooks/mirismanager.yml b/playbooks/mirismanager.yml
index 8abe72e7cb846028990d2d8b48f63f65f032f4bb..0cab6e08f7e6e3a219b394787e1b1ab9c8a65dba 100755
--- a/playbooks/mirismanager.yml
+++ b/playbooks/mirismanager.yml
@@ -9,15 +9,15 @@
   post_tasks:
     - name: deploy letsencrypt certificate
       when: letsencrypt_enabled | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: letsencrypt
     - name: configure network
       when: network_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: network
     - name: configure proxy
       when: proxy_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: proxy
 
 ...
diff --git a/playbooks/munin/munin-server.yml b/playbooks/munin/munin-server.yml
index cebc054e0c78d283740c0f641564206936f9a4b7..9408dcb017789e6a1e500596a462c9a167a2ea3a 100644
--- a/playbooks/munin/munin-server.yml
+++ b/playbooks/munin/munin-server.yml
@@ -6,7 +6,7 @@
   tags: munin
   pre_tasks:
     - name: gather munin_node group facts
-      setup:
+      ansible.builtin.setup:
       delegate_to: "{{ item }}"
       delegate_facts: true
       with_items: "{{ groups['munin_node'] }}"
diff --git a/playbooks/netcapture.yml b/playbooks/netcapture.yml
index 8515d5dc9cac5d3c8aa9c9cddf92e5fe7f1d307a..29c756d942a9fc6c270d6d80d134881b385fab08 100755
--- a/playbooks/netcapture.yml
+++ b/playbooks/netcapture.yml
@@ -9,11 +9,11 @@
   post_tasks:
     - name: configure network
       when: network_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: network
     - name: configure proxy
       when: proxy_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: proxy
 
 ...
diff --git a/playbooks/netcapture/deploy-standalone.yml b/playbooks/netcapture/deploy-standalone.yml
index d9fef2af601d634c16c42a573d203f1ef410b623..f9b55731471ecaae307b03b2b9b39f9a49c031f9 100644
--- a/playbooks/netcapture/deploy-standalone.yml
+++ b/playbooks/netcapture/deploy-standalone.yml
@@ -22,7 +22,7 @@
         ferm_input_rules: "{{ server_ferm_input_rules }}"
         ferm_output_rules: "{{ server_ferm_output_rules }}"
         ferm_global_settings: "{{ server_ferm_global_settings }}"
-      include_role:
+      ansible.builtin.include_role:
         name: ferm-configure
 
 - import_playbook: deploy-minimal.yml
diff --git a/playbooks/postgres-ha.yml b/playbooks/postgres-ha.yml
index 4920cb661aafd83188c312417fdc6e8c2d9d1dd5..b7fc0a0ab4066aba886ff713e25be3f4665c0b69 100755
--- a/playbooks/postgres-ha.yml
+++ b/playbooks/postgres-ha.yml
@@ -6,15 +6,15 @@
   tags: postgres
   pre_tasks:
     - name: check that repmgr_node_id is set
-      assert:
+      ansible.builtin.assert:
         that: repmgr_node_id != ""
         quiet: true
     - name: check that repmgr_primary_node is set
-      assert:
+      ansible.builtin.assert:
         that: repmgr_primary_node != ""
         quiet: true
     - name: install psycopg2
-      apt:
+      ansible.builtin.apt:
         force_apt_get: true
         install_recommends: false
         name: python3-psycopg2
@@ -29,7 +29,7 @@
   tags: ['postgres', 'mediaserver']
   pre_tasks:
     - name: check that haproxy is configured
-      assert:
+      ansible.builtin.assert:
         that: hap_config_listen != ""
         quiet: true
   roles:
diff --git a/playbooks/postgres-maintenance/fenced_to_standby.yml b/playbooks/postgres-maintenance/fenced_to_standby.yml
index af00c7ee1c4af6ec99abdad263eb4eb3a4308b4e..2bb1400a4c01a41261f58e55859be9162b0d836b 100644
--- a/playbooks/postgres-maintenance/fenced_to_standby.yml
+++ b/playbooks/postgres-maintenance/fenced_to_standby.yml
@@ -4,23 +4,23 @@
   hosts: postgres_fenced
   tasks:
     - name: fail if node status if not fenced
-      fail:
+      ansible.builtin.fail:
         msg: "Current status {{ rephacheck['stdout'] }} must be fenced."
       when: rephacheck['stdout'] != "fenced"
 
     - name: stop postgresql
-      systemd:
+      ansible.builtin.systemd:
         name: postgresql
         state: stopped
 
     - name: delete postgresql data directory
-      file:
+      ansible.builtin.file:
         path: /var/lib/postgresql/11/main/
         state: absent
         force: true
 
     - name: copy data from primary
-      command: >
+      ansible.builtin.command: >
         repmgr -f /etc/postgresql/11/main/repmgr.conf
           --force --verbose
           standby clone
@@ -33,13 +33,13 @@
       changed_when: false
 
     - name: start postgresql
-      systemd:
+      ansible.builtin.systemd:
         name: postgresql
         state: started
       when: copy_from_primary is succeeded
 
     - name: register node as standby
-      command: "repmgr -f /etc/postgresql/11/main/repmgr.conf --force --verbose standby register"
+      ansible.builtin.command: "repmgr -f /etc/postgresql/11/main/repmgr.conf --force --verbose standby register"
       become: true
       become_user: postgres
       when: copy_from_primary is succeeded
diff --git a/playbooks/postgres-maintenance/rephacheck_status.yml b/playbooks/postgres-maintenance/rephacheck_status.yml
index 75019cdc6d9110ca001fc0aa21e1f469e546398d..4984ead9f2595631429d5e9543934611956c3000 100644
--- a/playbooks/postgres-maintenance/rephacheck_status.yml
+++ b/playbooks/postgres-maintenance/rephacheck_status.yml
@@ -4,12 +4,12 @@
   hosts: postgres_primary:postgres_standby:postgres_fenced
   tasks:
     - name: get cluster state
-      command: "rephacheck"
+      ansible.builtin.command: "rephacheck"
       register: rephacheck
       changed_when: false
 
     - name: show status for each node
-      debug:
+      ansible.builtin.debug:
         msg: "Current node {{ ansible_hostname }} status {{ rephacheck['stdout'] }}"
       when: rephacheck['stdout'] | length > 0
 
diff --git a/playbooks/postgres-maintenance/restart_repmgrd.yml b/playbooks/postgres-maintenance/restart_repmgrd.yml
index 7a01b80478e6e385a91fe47c9b4c70de505dd916..68d0da2db2ab6dd61e21132171e3da117f07ae6c 100644
--- a/playbooks/postgres-maintenance/restart_repmgrd.yml
+++ b/playbooks/postgres-maintenance/restart_repmgrd.yml
@@ -4,13 +4,13 @@
   hosts: postgres
   tasks:
     - name: kill repmgrd
-      command: "pkill repmgrd"
+      ansible.builtin.command: "pkill repmgrd"
       # TOFIX: implement a proper verification
       changed_when: false
       failed_when: false
 
     - name: restart repmgrd
-      systemd:
+      ansible.builtin.systemd:
         name: repmgrd
         state: restarted
 
diff --git a/playbooks/postgres-maintenance/standby_to_primary.yml b/playbooks/postgres-maintenance/standby_to_primary.yml
index bfce1c64b61e4bf28131f2a5dcb4907c2b35e3b0..b073f824f9c1d4ac52f28a7ba4d920ae9267c72f 100644
--- a/playbooks/postgres-maintenance/standby_to_primary.yml
+++ b/playbooks/postgres-maintenance/standby_to_primary.yml
@@ -4,17 +4,17 @@
   hosts: postgres_standby
   tasks:
     - name: fail if node status if not standby
-      fail:
+      ansible.builtin.fail:
         msg: "Current status {{ rephacheck['stdout'] }} must be standby."
       when: rephacheck['stdout'] != "standby"
     - name: check if node is currently in standby
-      command: "repmgr standby switchover -f /etc/postgresql/11/main/repmgr.conf --siblings-follow --dry-run"
+      ansible.builtin.command: "repmgr standby switchover -f /etc/postgresql/11/main/repmgr.conf --siblings-follow --dry-run"
       become: true
       become_user: postgres
       when: rephacheck['stdout'] == "standby"
       register: standby_dry_run
     - name: switch standby node to primary
-      command: "repmgr standby switchover -f /etc/postgresql/11/main/repmgr.conf --siblings-follow"
+      ansible.builtin.command: "repmgr standby switchover -f /etc/postgresql/11/main/repmgr.conf --siblings-follow"
       become: true
       become_user: postgres
       when:
diff --git a/playbooks/postgres.yml b/playbooks/postgres.yml
index 11f8a6c109963aef43a5d0c268461f0bde94356c..3d873561d335bcbfa554b4cb01d708bffe8e6cdc 100755
--- a/playbooks/postgres.yml
+++ b/playbooks/postgres.yml
@@ -9,11 +9,11 @@
   post_tasks:
     - name: configure network
       when: network_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: network
     - name: configure proxy
       when: proxy_apply | d(false)
-      include_role:
+      ansible.builtin.include_role:
         name: proxy
 
 ...
diff --git a/playbooks/site.yml b/playbooks/site.yml
index 7943f047556fa37ee935cfe1c78925ab7afe39bf..9092d56176326c7f6bf58b29f5b724036073aa7c 100755
--- a/playbooks/site.yml
+++ b/playbooks/site.yml
@@ -9,7 +9,7 @@
     - name: ensure python3 is installed
       register: python_install
       changed_when: "'es_pyinstall' in python_install.stdout_lines"
-      raw: command -v python3 || echo es_pyinstall && apt update && apt install -y python3-minimal python3-apt iproute2
+      ansible.builtin.raw: command -v python3 || echo es_pyinstall && apt update && apt install -y python3-minimal python3-apt iproute2
       tags: always
 
 - import_playbook: "{{ 'postgres-ha' if groups['postgres']|d('') | length > 1 else 'postgres' }}.yml"
diff --git a/playbooks/tests/data-partition.yml b/playbooks/tests/data-partition.yml
index 49bf0c4130296c68f9498f167ffa49c0b75717be..cbd9cac0dc429a3e4f9148d43f2eb9f901d18870 100755
--- a/playbooks/tests/data-partition.yml
+++ b/playbooks/tests/data-partition.yml
@@ -7,7 +7,7 @@
   tasks:
 
     - name: verify /data partition existence
-      shell: findmnt /data
+      ansible.builtin.shell: findmnt /data
       register: data_exist
       failed_when: false
       changed_when: false
@@ -16,17 +16,17 @@
     - block:
 
         - name: get /data size
-          shell: df -BG /data --output=size | tail -n1 | grep -o '[0-9]*'
+          ansible.builtin.shell: df -BG /data --output=size | tail -n1 | grep -o '[0-9]*'
           register: data_size
           failed_when: false
           changed_when: false
 
         - name: print size
-          debug:
+          ansible.builtin.debug:
             msg: "/data size is {{ data_size.stdout }}G"
 
         - name: create a test directory in /data
-          file:
+          ansible.builtin.file:
             path: /data/test
             state: directory
             mode: '0755'
@@ -36,7 +36,7 @@
           changed_when: false
 
         - name: create a test file in /data
-          file:
+          ansible.builtin.file:
             state: touch
             path: /data/test/file
             mode: '0644'
@@ -54,13 +54,13 @@
     - block:
 
         - name: get /home size
-          shell: df -BG /home --output=size | tail -n1 | grep -o '[0-9]*'
+          ansible.builtin.shell: df -BG /home --output=size | tail -n1 | grep -o '[0-9]*'
           register: home_size
           failed_when: false
           changed_when: false
 
         - name: verify size
-          debug:
+          ansible.builtin.debug:
             msg: "/home size is too short ({{ home_size.stdout }}G < 200G)"
           when: home_size.stdout | int < 200
           ignore_errors: true
diff --git a/playbooks/tests/exec-tester.yml b/playbooks/tests/exec-tester.yml
index 1165646d4d72af900b82ed183cb95c6fef6b7651..8c22df2f3f4df02368e9b47026b74057ef7e6114 100755
--- a/playbooks/tests/exec-tester.yml
+++ b/playbooks/tests/exec-tester.yml
@@ -10,11 +10,11 @@
   tasks:
     - name: remove envsetup tester log
       when: tester_reset_log
-      file:
+      ansible.builtin.file:
         path: /root/envsetup/tests/logs/tester_pb.log
         state: absent
     - name: envsetup tester
-      shell:
+      ansible.builtin.shell:
         cmd: |
           set -o pipefail
           python3 /root/envsetup/tests/tester.py 2>&1 | tee /root/envsetup/tests/logs/tester_pb.log
diff --git a/playbooks/tests/firewall-rules.yml b/playbooks/tests/firewall-rules.yml
index c0990505e570c7fc3a017fd607d5cc2865e7f46d..9165a3586491dbc78d831a345539e3ff55d8d130 100755
--- a/playbooks/tests/firewall-rules.yml
+++ b/playbooks/tests/firewall-rules.yml
@@ -7,7 +7,7 @@
     - name: ensure python3 is installed
       register: python_install
       changed_when: "'es_pyinstall' in python_install.stdout_lines"
-      raw: command -v python3 || echo es_pyinstall && apt update && apt install -y python3-minimal python3-apt iproute2
+      ansible.builtin.raw: command -v python3 || echo es_pyinstall && apt update && apt install -y python3-minimal python3-apt iproute2
       tags: always
 
 - name: SET PORTS TO LISTEN
diff --git a/playbooks/tests/ressources/firewall/listen.yml b/playbooks/tests/ressources/firewall/listen.yml
index f1c97b6056dc5cc959c413f5daa11fab134d117e..997e084aada933bec6110a89ae047a05e627de0a 100644
--- a/playbooks/tests/ressources/firewall/listen.yml
+++ b/playbooks/tests/ressources/firewall/listen.yml
@@ -3,7 +3,7 @@
 - debug:
     msg: "On {{ outer_item.groupname }} server(s) put {{ outer_item.ports }} port(s) in listen mode"
 
-- shell: "nohup timeout 300 nc -lp {{ item }} >/dev/null 2>&1 &"
+- ansible.builtin.shell: "nohup timeout 300 nc -lp {{ item }} >/dev/null 2>&1 &"
   ignore_errors: true
   loop: "{{ outer_item.ports }}"
   changed_when: false
diff --git a/playbooks/upgrade.yml b/playbooks/upgrade.yml
index 8040b497504688d373c73654e518457feb15280b..4aac96cfbef0c0d069071ad82c46ebcccd231f38 100755
--- a/playbooks/upgrade.yml
+++ b/playbooks/upgrade.yml
@@ -7,7 +7,7 @@
 
     - name: apt-get dist-upgrade
       when: ansible_os_family == "Debian"
-      apt:
+      ansible.builtin.apt:
         force_apt_get: true
         install_recommends: false
         cache_valid_time: 3600
@@ -18,7 +18,7 @@
 
     - name: yum upgrade
       when: ansible_os_family == "RedHat"
-      yum:
+      ansible.builtin.yum:
         name: "*"
         state: latest
 
diff --git a/roles/bench-server/tasks/main.yml b/roles/bench-server/tasks/main.yml
index f2c0b828f287e8fb6ae4f47b215561a3bd546828..38923fdfc2bca0cebab29ade2c357bf280dcf76f 100644
--- a/roles/bench-server/tasks/main.yml
+++ b/roles/bench-server/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install bench-server packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     update_cache: true
@@ -12,13 +12,13 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: ensure configuration directory exists
-  file:
+  ansible.builtin.file:
     path: /etc/mediaserver
     state: directory
     mode: '755'
 
 - name: benchmark configuration settings
-  copy:
+  ansible.builtin.copy:
     dest: /etc/mediaserver/bench-conf.json
     content: |
       {
@@ -33,22 +33,22 @@
     mode: '644'
 
 - name: reload systemd daemon
-  systemd:
+  ansible.builtin.systemd:
     daemon_reload: true
 
 - name: restart bench-server
-  systemd:
+  ansible.builtin.systemd:
     name: bench-server
     state: restarted
 
 - name: streaming configuration settings
-  template:
+  ansible.builtin.template:
     src: bench-streaming.conf.j2
     dest: /etc/mediaserver/bench-streaming.conf
     mode: '644'
 
 - name: clone ms-testing-suite repository
-  git:
+  ansible.builtin.git:
     repo: "{{ bench_stream_repo }}"
     version: stable
     dest: /usr/share/ms-testing-suite
@@ -56,7 +56,7 @@
     force: true
 
 - name: copy configuration for testing tools
-  copy:
+  ansible.builtin.copy:
     src: /etc/mediaserver/bench-streaming.conf
     dest: /usr/share/ms-testing-suite/config.json
     remote_src: true
@@ -66,7 +66,7 @@
   when:
     - not offline_mode | d(false)
     - not in_docker | d(false)
-  apt_key:
+  ansible.builtin.apt_key:
     url: https://download.docker.com/linux/debian/gpg
     state: present
 
@@ -74,7 +74,7 @@
   when:
     - not offline_mode | d(false)
     - not in_docker | d(false)
-  apt_repository:
+  ansible.builtin.apt_repository:
     repo: "deb https://download.docker.com/linux/debian buster stable"
     state: present
     update_cache: true
@@ -83,7 +83,7 @@
   when:
     - not offline_mode | d(false)
     - not in_docker | d(false)
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     update_cache: true
@@ -97,7 +97,7 @@
   when:
     - not offline_mode | d(false)
     - not in_docker | d(false)
-  command:
+  ansible.builtin.command:
     cmd: make build_docker_img
     chdir: /usr/share/ms-testing-suite
   run_once: true
diff --git a/roles/bench-worker/tasks/main.yml b/roles/bench-worker/tasks/main.yml
index 90f4b1b836445bd4180821218c5383556affb2c2..56423bd06ccd987fd4bfd8b711536e1cb95fb998 100644
--- a/roles/bench-worker/tasks/main.yml
+++ b/roles/bench-worker/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install bench-worker packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     update_cache: true
@@ -12,13 +12,13 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: ensure configuration directory exists
-  file:
+  ansible.builtin.file:
     path: /etc/mediaserver
     state: directory
     mode: '755'
 
 - name: benchmark configuration settings
-  copy:
+  ansible.builtin.copy:
     dest: /etc/mediaserver/bench-conf.json
     content: |
       {
@@ -33,11 +33,11 @@
     mode: '644'
 
 - name: reload systemd daemon
-  systemd:
+  ansible.builtin.systemd:
     daemon_reload: true
 
 - name: restart bench-worker
-  systemd:
+  ansible.builtin.systemd:
     name: bench-worker
     state: restarted
 
diff --git a/roles/celerity/handlers/main.yml b/roles/celerity/handlers/main.yml
index f76e4aa3cc43c6982db20df8bfce0587d771fd57..fd32b76eb927e63c7f350cc4ff0825aaa0f99e54 100644
--- a/roles/celerity/handlers/main.yml
+++ b/roles/celerity/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: restart celerity-server
-  service:
+  ansible.builtin.service:
     name: celerity-server
     state: restarted
 
diff --git a/roles/celerity/tasks/main.yml b/roles/celerity/tasks/main.yml
index ba038899ccba301bc4bcaaee3e2f315106d57325..5d223ce619ad0faa354d927c3ceaa27f220dfe92 100644
--- a/roles/celerity/tasks/main.yml
+++ b/roles/celerity/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: celerity server install
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: celerity-server
@@ -11,13 +11,13 @@
 
 - name: config celerity server
   notify: restart celerity-server
-  template:
+  ansible.builtin.template:
     src: celerity-config.py.j2
     dest: /etc/celerity/config.py
     mode: '644'
 
 - name: ensure celerity server is running
-  service:
+  ansible.builtin.service:
     name: celerity-server
     enabled: true
     state: started
@@ -31,10 +31,10 @@
     ferm_input_rules: "{{ celerity_ferm_input_rules }}"
     ferm_output_rules: "{{ celerity_ferm_output_rules }}"
     ferm_global_settings: "{{ celerity_ferm_global_settings }}"
-  include_role:
+  ansible.builtin.include_role:
     name: ferm-configure
 
 - name: flush handlers
-  meta: flush_handlers
+  ansible.builtin.meta: flush_handlers
 
 ...
diff --git a/roles/conf/tasks/main.yml b/roles/conf/tasks/main.yml
index fcf8c4e5d1ada848970654dc300e73e48187fca4..2089992b53de4441dae2731ea692efd5731ade21 100644
--- a/roles/conf/tasks/main.yml
+++ b/roles/conf/tasks/main.yml
@@ -4,11 +4,11 @@
   when:
     - proxy_http | d()
     - proxy_https | d()
-  include_role:
+  ansible.builtin.include_role:
     name: proxy
 
 - name: install requirements
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ conf_req_packages }}"
@@ -18,7 +18,7 @@
 
 - name: install online requirements
   when: not offline_mode | d(false)
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ conf_req_packages_online }}"
@@ -28,14 +28,14 @@
 
 - name: generate root ssh key pair
   register: conf_root
-  user:
+  ansible.builtin.user:
     name: root
     generate_ssh_key: true
     ssh_key_type: ed25519
     ssh_key_file: .ssh/id_ed25519
 
 - name: create conf dir
-  file:
+  ansible.builtin.file:
     path: "{{ conf_dir }}"
     state: directory
     mode: "0700"
@@ -43,7 +43,7 @@
 - name: check if auto-generated-conf.sh exists
   check_mode: false
   register: check_auto_conf
-  stat:
+  ansible.builtin.stat:
     path: "{{ conf_dir }}/auto-generated-conf.sh"
 
 - name: download conf and update ssh public key with activation key
@@ -54,7 +54,7 @@
     - conf_dl_ak.status != 200
     - not check_auto_conf.stat.exists
     - not skyreach_system_key
-  uri:
+  ansible.builtin.uri:
     url: https://{{ conf_host }}/erp/credentials/envsetup-conf.sh
     method: POST
     body_format: form-urlencoded
@@ -73,7 +73,7 @@
   failed_when:
     - conf_dl_sk.status != 200
     - not check_auto_conf.stat.exists
-  uri:
+  ansible.builtin.uri:
     url: https://{{ conf_host }}/erp/credentials/envsetup-conf.sh
     method: POST
     body_format: form-urlencoded
@@ -88,7 +88,7 @@
     - "{{ conf_dl_ak }}"
     - "{{ conf_dl_sk }}"
   when: item is changed
-  copy:
+  ansible.builtin.copy:
     content: "{{ item.content }}"
     dest: "{{ conf_dir }}/auto-generated-conf.sh"
     force: true
@@ -98,13 +98,13 @@
 - name: check if auto-generated-conf.sh exists
   check_mode: false
   register: check_auto_conf
-  stat:
+  ansible.builtin.stat:
     path: "{{ conf_dir }}/auto-generated-conf.sh"
 
 - name: check if conf.sh exists
   check_mode: false
   register: check_local_conf
-  stat:
+  ansible.builtin.stat:
     path: "{{ conf_dir }}/conf.sh"
 
 - name: load generated conf
@@ -127,7 +127,7 @@
 
 - name: debug variables
   when: conf_debug
-  debug:
+  ansible.builtin.debug:
     var: ansible_facts
 
 ...
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index f2883ab5f11aef2ee3419aa1d30cf83683905d5e..5334f82e6aea6dc80f8094ab5354695c05d408e4 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: requirements install
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name:
@@ -17,14 +17,14 @@
 - name: add docker key
   when:
     - not offline_mode | d(false)
-  apt_key:
+  ansible.builtin.apt_key:
     url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg
     state: present
 
 - name: add docker debian repository
   when:
     - not offline_mode | d(false)
-  apt_repository:
+  ansible.builtin.apt_repository:
     repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release | lower }} stable
     state: present
     update_cache: true
@@ -32,7 +32,7 @@
 - name: install docker
   when:
     - not offline_mode | d(false)
-  apt:
+  ansible.builtin.apt:
     name: docker-ce
     state: latest
     update_cache: true
@@ -43,7 +43,7 @@
 - name: docker service
   when:
     - not offline_mode | d(false)
-  systemd:
+  ansible.builtin.systemd:
     name: docker
     enabled: true
     state: started
@@ -51,7 +51,7 @@
 - name: install requirements for docker python binding
   when:
     - not offline_mode | d(false)
-  apt:
+  ansible.builtin.apt:
     name: python3-docker
     state: latest
     update_cache: true
diff --git a/roles/elastic/handlers/main.yml b/roles/elastic/handlers/main.yml
index 65d7d70557485bf967156b1feea618560fcca49f..c40d88041f8e35a0ba692a8c582ec79654605d32 100644
--- a/roles/elastic/handlers/main.yml
+++ b/roles/elastic/handlers/main.yml
@@ -1,11 +1,11 @@
 ---
 - name: restart kibana
-  service:
+  ansible.builtin.service:
     name: kibana
     state: restarted
 
 - name: restart apm-server
-  service:
+  ansible.builtin.service:
     name: apm-server
     state: restarted
 
diff --git a/roles/elastic/tasks/main.yml b/roles/elastic/tasks/main.yml
index 3a18e025c97642216e0e798cd48cf634f3be0b4e..1a8c6ceeee8fadb0e61cb26b1fe1e5e07a164c50 100644
--- a/roles/elastic/tasks/main.yml
+++ b/roles/elastic/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: install kibana package
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: kibana
@@ -10,14 +10,14 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: deploy kibana configuration
-  template:
+  ansible.builtin.template:
     src: kibana.yml.j2
     dest: /etc/kibana/kibana.yml
     mode: '644'
   notify: restart kibana
 
 - name: install apm-server package
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: apm-server
@@ -27,7 +27,7 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: deploy apm-server configuration
-  template:
+  ansible.builtin.template:
     src: apm-server.yml.j2
     dest: /etc/apm-server/apm-server.yml
     mode: '644'
diff --git a/roles/fail2ban/handlers/main.yml b/roles/fail2ban/handlers/main.yml
index 83588db6e0e8b89317cd29fb2ae2a9767955f47d..191b0cc20b4f9a73bc30da5921c9a6c6f950adfd 100644
--- a/roles/fail2ban/handlers/main.yml
+++ b/roles/fail2ban/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: restart fail2ban
-  systemd:
+  ansible.builtin.systemd:
     name: fail2ban
     state: restarted
 
diff --git a/roles/fail2ban/tasks/main.yml b/roles/fail2ban/tasks/main.yml
index 45640e1795f44d762b8d83e2a6d542f71a66b275..f64a9161730e232e2191338a18a6c1df5ff7fa7f 100644
--- a/roles/fail2ban/tasks/main.yml
+++ b/roles/fail2ban/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ f2b_packages }}"
@@ -12,13 +12,13 @@
 
 - name: jail defaults
   notify: restart fail2ban
-  template:
+  ansible.builtin.template:
     src: jail.local.j2
     dest: /etc/fail2ban/jail.local
     mode: '644'
 
 - name: service
-  systemd:
+  ansible.builtin.systemd:
     name: fail2ban
     enabled: true
     state: started
diff --git a/roles/ferm-configure/handlers/main.yml b/roles/ferm-configure/handlers/main.yml
index dec631c0142f85c713dc89830dba3d0abee43e14..13856b0370876966bbcd8ac4a5625ff53e26bd9d 100644
--- a/roles/ferm-configure/handlers/main.yml
+++ b/roles/ferm-configure/handlers/main.yml
@@ -2,13 +2,13 @@
 
 - name: restart ferm
   when: ansible_facts.services['ferm.service'] is defined
-  systemd:
+  ansible.builtin.systemd:
     name: ferm
     state: restarted
 
 - name: restart fail2ban
   when: ansible_facts.services['fail2ban.service'] is defined
-  systemd:
+  ansible.builtin.systemd:
     name: fail2ban
     state: started
 ...
diff --git a/roles/ferm-configure/tasks/main.yml b/roles/ferm-configure/tasks/main.yml
index af6bdc134bb19f99ce5c73035608a315d527fb37..eb141341250c586cc1329a9cf297ce188e54a8cd 100644
--- a/roles/ferm-configure/tasks/main.yml
+++ b/roles/ferm-configure/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: populate service facts
-  service_facts:
+  ansible.builtin.service_facts:
 
 - name: directories
   loop:
@@ -9,7 +9,7 @@
     - /etc/ferm/input.d
     - /etc/ferm/output.d
     - /etc/ferm/forward.d
-  file:
+  ansible.builtin.file:
     path: "{{ item }}"
     state: directory
     mode: '755'
@@ -19,7 +19,7 @@
   notify:
     - restart ferm
     - restart fail2ban
-  copy:
+  ansible.builtin.copy:
     dest: /etc/ferm/ferm.d/{{ ferm_rules_filename }}.conf
     content: "{{ ferm_global_settings }}"
     mode: '644'
@@ -29,7 +29,7 @@
   notify:
     - restart ferm
     - restart fail2ban
-  template:
+  ansible.builtin.template:
     src: ferm_rules_input.conf.j2
     dest: /etc/ferm/input.d/{{ ferm_rules_filename }}.conf
     mode: '644'
@@ -39,7 +39,7 @@
   notify:
     - restart ferm
     - restart fail2ban
-  template:
+  ansible.builtin.template:
     src: ferm_rules_output.conf.j2
     dest: /etc/ferm/output.d/{{ ferm_rules_filename }}.conf
     mode: '644'
@@ -49,7 +49,7 @@
   notify:
     - restart ferm
     - restart fail2ban
-  template:
+  ansible.builtin.template:
     src: ferm_rules_forward.conf.j2
     dest: /etc/ferm/forward.d/{{ ferm_rules_filename }}.conf
     mode: '644'
diff --git a/roles/ferm-install/handlers/main.yml b/roles/ferm-install/handlers/main.yml
index c2f8c0cb26f95dae4bacb2b598273310abd7bc20..cd10766d5ab5821f8baeee93e742eadd9ef0d6ab 100644
--- a/roles/ferm-install/handlers/main.yml
+++ b/roles/ferm-install/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: restart ferm
-  systemd:
+  ansible.builtin.systemd:
     name: ferm
     state: restarted
 
diff --git a/roles/ferm-install/tasks/main.yml b/roles/ferm-install/tasks/main.yml
index a2deae1d0970cb9731e74c9534dc4b3da0ddbda6..22c8b2e43792e58c3c8c013c0cb9d976c8c14fda 100644
--- a/roles/ferm-install/tasks/main.yml
+++ b/roles/ferm-install/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ ferm_packages }}"
@@ -11,14 +11,14 @@
 
 - name: configuration
   notify: restart ferm
-  template:
+  ansible.builtin.template:
     src: ferm.conf.j2
     dest: /etc/ferm/ferm.conf
     backup: true
     mode: '644'
 
 - name: service
-  systemd:
+  ansible.builtin.systemd:
     name: ferm
     enabled: true
     masked: false
diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml
index 7e29375f6a5d93d91b74bdd14e2b3fc5e4f42f66..8c0e406861b2c586a396f64d18f3a4d00ca20f04 100644
--- a/roles/haproxy/handlers/main.yml
+++ b/roles/haproxy/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: reload haproxy
-  systemd:
+  ansible.builtin.systemd:
     name: haproxy
     state: reloaded
 
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
index 32c9c4e4fa0d1d39d232b8225075bfb30caff341..f610bcc37b2d1b8eb705899d8d52f0f635221cb6 100644
--- a/roles/haproxy/tasks/main.yml
+++ b/roles/haproxy/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ hap_packages }}"
@@ -11,12 +11,12 @@
 
 - name: configure
   notify: reload haproxy
-  template:
+  ansible.builtin.template:
     src: haproxy.cfg.j2
     dest: /etc/haproxy/haproxy.cfg
     backup: true
     mode: '644'
 
-- meta: flush_handlers  # noqa unnamed-task
+- ansible.builtin.meta: flush_handlers  # noqa unnamed-task
 
 ...
diff --git a/roles/init/tasks/main.yml b/roles/init/tasks/main.yml
index 8ed302525e5103c3dc6a8c5fa90adb5e697bed5d..5f4c3895a3216e64cf00118d6d4f4be7d79480c3 100644
--- a/roles/init/tasks/main.yml
+++ b/roles/init/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install initial packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ init_packages }}"
@@ -11,7 +11,7 @@
 
 - name: configure proxy
   when: proxy_when is not defined or proxy_when != "end"
-  include_role:
+  ansible.builtin.include_role:
     name: proxy
     allow_duplicates: true
 
diff --git a/roles/letsencrypt/handlers/main.yml b/roles/letsencrypt/handlers/main.yml
index 38fab58a222d274df7c367ebbe7a1853926660cf..8a97cff137ee67aeb5dd79f2058fa75ba8a28f00 100644
--- a/roles/letsencrypt/handlers/main.yml
+++ b/roles/letsencrypt/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: restart nginx
-  service:
+  ansible.builtin.service:
     name: nginx
     state: restarted
 
diff --git a/roles/letsencrypt/tasks/main.yml b/roles/letsencrypt/tasks/main.yml
index 3bd8541962090a4d1be4f49a0cf3a52235c62b0e..97bb2fceb32e96971eb3403dbe106243f9399629 100644
--- a/roles/letsencrypt/tasks/main.yml
+++ b/roles/letsencrypt/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install certbot
-  package:
+  ansible.builtin.package:
     force_apt_get: true
     install_recommends: false
     name: certbot
@@ -10,7 +10,7 @@
   when: letsencrypt_domains == []
   changed_when: false
   register: letsencryt_nginx_output
-  shell:
+  ansible.builtin.shell:
     executable: /bin/bash
     cmd: >
       set -o pipefail;
@@ -18,12 +18,12 @@
 
 - name: save result as list
   when: letsencrypt_domains == []
-  set_fact:
+  ansible.builtin.set_fact:
     letsencrypt_domains: "{{ letsencryt_nginx_output.stdout.split() }}"
 
 - name: save domains list in a file
   register: letsencrypt_save_list
-  copy:
+  ansible.builtin.copy:
     dest: /etc/letsencrypt/domains.txt
     content: |
       {% for domain in letsencrypt_domains %}
@@ -32,19 +32,19 @@
     mode: '644'
 
 - name: create webroot directory
-  file:
+  ansible.builtin.file:
     path: "{{ letsencrypt_webroot }}"
     state: directory
     mode: '755'
 
 - name: create pre hook directory
-  file:
+  ansible.builtin.file:
     path: /etc/letsencrypt/renewal-hooks/pre
     state: directory
     mode: '755'
 
 - name: create pre hook script
-  copy:
+  ansible.builtin.copy:
     dest: /etc/letsencrypt/renewal-hooks/pre/mkdir
     mode: 0755
     content: |
@@ -54,13 +54,13 @@
       chmod 755 "$CERTBOT_DOCROOT"
 
 - name: create deploy hook directory
-  file:
+  ansible.builtin.file:
     path: /etc/letsencrypt/renewal-hooks/deploy
     state: directory
     mode: '755'
 
 - name: create deploy hook script
-  copy:
+  ansible.builtin.copy:
     dest: /etc/letsencrypt/renewal-hooks/deploy/nginx
     mode: 0755
     content: |
@@ -74,7 +74,7 @@
     - letsencrypt_save_list is changed
   register: letsencrypt_dry_run
   ignore_errors: true
-  command:
+  ansible.builtin.command:
     cmd: >
       certbot certonly
         --dry-run
@@ -85,13 +85,13 @@
 
 - name: remove domains list file in case of failure
   when: letsencrypt_dry_run is failed
-  file:
+  ansible.builtin.file:
     path: "{{ letsencrypt_save_list.dest }}"
     state: absent
 
 - name: exit in case of failure
   when: letsencrypt_dry_run is failed
-  fail:
+  ansible.builtin.fail:
 
 - name: generate certificates
   notify: restart nginx
@@ -99,7 +99,7 @@
     - letsencrypt_domains != []
     - letsencrypt_save_list is changed
     - letsencrypt_dry_run is succeeded
-  command:
+  ansible.builtin.command:
     cmd: >
       certbot certonly
         {% if letsencrypt_testing %}--staging{% endif %}
@@ -114,7 +114,7 @@
     - letsencrypt_save_list is changed
     - letsencrypt_dry_run is succeeded
   notify: restart nginx
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/nginx/conf.d/ssl_certificate.conf
     regexp: 'ssl_certificate\s+([\w/\-\_\.]+);'
     line: 'ssl_certificate /etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/fullchain.pem;'
@@ -125,7 +125,7 @@
     - letsencrypt_save_list is changed
     - letsencrypt_dry_run is succeeded
   notify: restart nginx
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/nginx/conf.d/ssl_certificate.conf
     regexp: 'ssl_certificate_key\s+([\w/\-\_\.]+);'
     line: 'ssl_certificate_key /etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/privkey.pem;'
diff --git a/roles/live/handlers/main.yml b/roles/live/handlers/main.yml
index b7774856aa335af9eb5885e0efcd4e2093c9e167..b0abf18b3894fe1af147e0ca063f8e9e9a913d58 100644
--- a/roles/live/handlers/main.yml
+++ b/roles/live/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: restart nginx
-  systemd:
+  ansible.builtin.systemd:
     name: nginx
     state: restarted
 
diff --git a/roles/live/tasks/main.yml b/roles/live/tasks/main.yml
index 1f3c1a6f578e94ee717a1101eb91f28b4883ddfb..3d3c1d1c9591d597c6dcb3ff7c35f9f697e6f6b7 100644
--- a/roles/live/tasks/main.yml
+++ b/roles/live/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: Live packages installation
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ debian_packages }}"
@@ -21,7 +21,7 @@
 
 - name: Changing the rights on the TMPFS directory
   notify: restart nginx
-  file:
+  ansible.builtin.file:
     path: /var/tmp/nginx-rtmp
     owner: nginx
     group: root
diff --git a/roles/lxc/handlers/main.yml b/roles/lxc/handlers/main.yml
index 527eef95797330f7384e75e0ede40a537ea79169..a237a6dbb4186d659b0a05546ba1bc5201a077ef 100644
--- a/roles/lxc/handlers/main.yml
+++ b/roles/lxc/handlers/main.yml
@@ -1,14 +1,14 @@
 ---
 
 - name: restart lxc
-  systemd:
+  ansible.builtin.systemd:
     name: lxc
     state: restarted
   changed_when: true
   notify: restart lxc-net
 
 - name: restart lxc-net
-  systemd:
+  ansible.builtin.systemd:
     name: lxc-net
     state: restarted
 
diff --git a/roles/lxc/tasks/main.yml b/roles/lxc/tasks/main.yml
index ed7d96a03af59fb4496d3c5631335bd3d7d93b0e..64e69613709438f10a58145d3e1427711b3d321b 100644
--- a/roles/lxc/tasks/main.yml
+++ b/roles/lxc/tasks/main.yml
@@ -3,7 +3,7 @@
 - name: Masquerade bridge configuration
   block:
     - name: Ask confirmation
-      pause:
+      ansible.builtin.pause:
         prompt: |
           -------------------------------------------------------------------------------------------
           ! WARNING !
@@ -16,13 +16,13 @@
       no_log: true
 
     - name: 'check parm is null or invalid'
-      fail: msg='Installation aborted'
+      ansible.builtin.fail: msg='Installation aborted'
       when: not ((confirm_continue.user_input | bool)
             or (confirm_continue.user_input | length == 0))
   when: lxc_network_type == 'host_bridge'
 
 - name: LXC packages installation
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     name:
       - lxc
@@ -35,7 +35,7 @@
 
 - name: Default container configuration
   notify: restart lxc
-  template:
+  ansible.builtin.template:
     src: lxc-default.j2
     dest: /etc/lxc/default.conf
     mode: '644'
@@ -44,7 +44,7 @@
   block:
     - name: Container network configuration
       notify: restart lxc-net
-      template:
+      ansible.builtin.template:
         src: lxc-net.j2
         dest: /etc/default/lxc-net
         mode: '644'
diff --git a/roles/mediacache/handlers/main.yml b/roles/mediacache/handlers/main.yml
index b7774856aa335af9eb5885e0efcd4e2093c9e167..b0abf18b3894fe1af147e0ca063f8e9e9a913d58 100644
--- a/roles/mediacache/handlers/main.yml
+++ b/roles/mediacache/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: restart nginx
-  systemd:
+  ansible.builtin.systemd:
     name: nginx
     state: restarted
 
diff --git a/roles/mediacache/tasks/main.yml b/roles/mediacache/tasks/main.yml
index fc09d6b6b9b6b1ec5630569d2180bb1c5f6803b5..c4c7a0665ce5ca2883a82798d409557d82ba1a81 100644
--- a/roles/mediacache/tasks/main.yml
+++ b/roles/mediacache/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: MediaCache packages installation
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ debian_packages }}"
@@ -11,13 +11,13 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: resolve domain name to localhost
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/hosts
     line: '127.0.1.1 {{ mediacache_url }}'
     backup: true
 
 - name: create mediacache VOD data directory
-  file:
+  ansible.builtin.file:
     dest: '{{ role_mc_vod_folder }}'
     state: directory
     owner: nginx
@@ -25,7 +25,7 @@
     mode: '0700'
 
 - name: create mediacache live data directory
-  file:
+  ansible.builtin.file:
     dest: '{{ role_mc_live_folder }}'
     state: directory
     owner: nginx
@@ -35,49 +35,49 @@
 
 - name: fill the vhost file
   notify: restart nginx
-  replace:
+  ansible.builtin.replace:
     path: /etc/nginx/sites-available/mediacache.conf
     regexp: '^(\s+server_name)\s+.*(;)$'
     replace: '\1 {{ mediacache_url }}\2'
 
 - name: fill the mediacache zones file - VOD folder
   notify: restart nginx
-  replace:
+  ansible.builtin.replace:
     path: /etc/mediacache/nginx-zones.conf
     regexp: '/var/cache/nginx/mediacache-vod'
     replace: '{{ role_mc_vod_folder }}'
 
 - name: fill the mediacache zones file - Live folder
   notify: restart nginx
-  replace:
+  ansible.builtin.replace:
     path: /etc/mediacache/nginx-zones.conf
     regexp: '/var/cache/nginx/mediacache-live'
     replace: '{{ role_mc_live_folder }}'
 
 - name: fill the mediacache zones file - VOD folder size
   notify: restart nginx
-  replace:
+  ansible.builtin.replace:
     path: /etc/mediacache/nginx-zones.conf
     regexp: '(?P<key>keys_zone=mediacache-vod.*max_size=).*(?P<unit>g)'
     replace: '\g<key>{{ role_mc_vod_size }}\g<unit>'
 
 - name: fill the mediacache zones file - Live folder size
   notify: restart nginx
-  replace:
+  ansible.builtin.replace:
     path: /etc/mediacache/nginx-zones.conf
     regexp: '(?P<key>keys_zone=mediacache-live.*max_size=).*(?P<unit>g)'
     replace: '\g<key>{{ role_mc_live_size }}\g<unit>'
 
 - name: fill the nginx VOD proxypass
   notify: restart nginx
-  replace:
+  ansible.builtin.replace:
     path: /etc/mediacache/nginx-proxy-mediaserver.conf
     regexp: '^(proxy_pass)\s+.*(;)$'
     replace: '\1 https://{{ ms_url }}\2'
 
 - name: fill the nginx Live proxypass
   notify: restart nginx
-  replace:
+  ansible.builtin.replace:
     path: /etc/mediacache/nginx-proxy-live.conf
     regexp: '^(proxy_pass)\s+.*(;)$'
     replace: '\1 https://{{ live_url }}\2'
diff --git a/roles/mediaimport/handlers/main.yml b/roles/mediaimport/handlers/main.yml
index f432847852fa10ae85f5385c7087d51337ec5b7b..fa3120ff1e45824b4837588379d1d16ba753588f 100644
--- a/roles/mediaimport/handlers/main.yml
+++ b/roles/mediaimport/handlers/main.yml
@@ -1,31 +1,31 @@
 ---
 
 - name: reload systemd
-  systemd:
+  ansible.builtin.systemd:
     daemon_reload: true
 
 - name: restart pure-ftpd
-  systemd:
+  ansible.builtin.systemd:
     name: pure-ftpd
     state: restarted
 
 - name: restart mysecureshell
-  systemd:
+  ansible.builtin.systemd:
     name: mysecureshell
     state: restarted
 
 - name: restart mediaimport
-  systemd:
+  ansible.builtin.systemd:
     name: mediaimport
     state: restarted
 
 - name: restart fail2ban
-  systemd:
+  ansible.builtin.systemd:
     name: fail2ban
     state: restarted
 
 - name: sftp-verif
-  command:
+  ansible.builtin.command:
     cmd: timeout 30 sftp-verif
 
 ...
diff --git a/roles/mediaimport/tasks/main.yml b/roles/mediaimport/tasks/main.yml
index ba194030e8629edc1f1d74a0fcd7c4f0d5b0463e..01c83410b4b0b481e5bf6d9e94b3c0da94e6a6e5 100644
--- a/roles/mediaimport/tasks/main.yml
+++ b/roles/mediaimport/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install packages
-  package:
+  ansible.builtin.package:
     force_apt_get: true
     install_recommends: false
     name: "{{ mediaimport_packages }}"
@@ -12,13 +12,13 @@
   loop:
     - /home/ftp/storage/incoming
     - /home/ftp/storage/watchfolder
-  file:
+  ansible.builtin.file:
     path: "{{ item }}"
     state: directory
     mode: '755'
 
 - name: deploy users management script
-  copy:
+  ansible.builtin.copy:
     src: files/mediaimport.py
     dest: /usr/local/bin/mediaimport
     mode: '755'
@@ -29,12 +29,12 @@
     - item.name | d(false)
     - item.passwd | d(false)
   no_log: true
-  command: mediaimport add --yes --user {{ item.name }} --passwd {{ item.passwd }}
+  ansible.builtin.command: mediaimport add --yes --user {{ item.name }} --passwd {{ item.passwd }}
   args:
     creates: /home/ftp/storage/incoming/{{ item.name }}
 
 - name: deploy on-upload script with setuid
-  copy:
+  ansible.builtin.copy:
     src: files/on-upload
     dest: /home/ftp/on-upload
     mode: 04755
@@ -42,7 +42,7 @@
 ## MYSECURESHELL
 
 - name: set the setuid on mysecureshell
-  file:
+  ansible.builtin.file:
     path: /usr/bin/mysecureshell
     mode: 04755
 
@@ -50,7 +50,7 @@
   notify:
     - restart mysecureshell
     - sftp-verif
-  template:
+  ansible.builtin.template:
     src: sftp_config.j2
     dest: /etc/ssh/sftp_config
     mode: '644'
@@ -59,7 +59,7 @@
 
 - name: set pure-ftpd default config
   notify: restart pure-ftpd
-  copy:
+  ansible.builtin.copy:
     dest: /etc/default/pure-ftpd-common
     mode: '644'
     content: |
@@ -72,7 +72,7 @@
 - name: configure pure-ftpd
   notify: restart pure-ftpd
   loop: "{{ mediaimport_pureftpd_config }}"
-  copy:
+  ansible.builtin.copy:
     dest: /etc/pure-ftpd/conf/{{ item.key }}
     content: "{{ item.value }}"
     mode: '644'
@@ -80,14 +80,14 @@
 ## PURE-FTPD CERTIFICATES
 
 - name: create certificate directory
-  file:
+  ansible.builtin.file:
     path: /etc/ssl/{{ ansible_fqdn }}
     state: directory
     mode: '755'
 
 - name: generate an private key
   register: mediaimport_privkey
-  openssl_privatekey:
+  community.crypto.openssl_privatekey:
     path: /etc/ssl/{{ ansible_fqdn }}/key.pem
     mode: '600'
 
@@ -113,7 +113,7 @@
 - name: concatenate key and certificate
   when: mediaimport_cert is changed  # noqa no-handler
   notify: restart pure-ftpd
-  shell: >
+  ansible.builtin.shell: >
     cat /etc/ssl/{{ ansible_fqdn }}/key.pem /etc/ssl/{{ ansible_fqdn }}/cert.pem > /etc/ssl/private/pure-ftpd.pem;
     chmod 600 /etc/ssl/private/pure-ftpd.pem;
 
@@ -127,7 +127,7 @@
 ## MEDIAIMPORT
 
 - name: setup cron job
-  copy:
+  ansible.builtin.copy:
     src: files/mediaimport
     dest: /etc/cron.d/mediaimport
     mode: '644'
@@ -137,14 +137,14 @@
     - mediaimport_ms_api_key | d(false)
     - mediaimport_ms_server_name | d(false)
   notify: restart mediaimport
-  template:
+  ansible.builtin.template:
     src: mediaimport.json.j2
     dest: /etc/mediaserver/mediaimport.json
     backup: true
     mode: 0640
 
 - name: enable mediaimport service
-  systemd:
+  ansible.builtin.systemd:
     name: mediaimport
     enabled: true
 
@@ -152,13 +152,13 @@
 
 - name: deploy fail2ban jail
   notify: restart fail2ban
-  template:
+  ansible.builtin.template:
     src: fail2ban_ftpd.conf.j2
     dest: /etc/fail2ban/jail.d/pure-ftpd.conf
     mode: 0644
 
 - name: flush handlers
-  meta: flush_handlers
+  ansible.builtin.meta: flush_handlers
 
 # FIREWALL
 
@@ -169,10 +169,10 @@
     ferm_input_rules: "{{ mediaimport_ferm_input_rules }}"
     ferm_output_rules: "{{ mediaimport_ferm_output_rules }}"
     ferm_global_settings: "{{ mediaimport_ferm_global_settings }}"
-  include_role:
+  ansible.builtin.include_role:
     name: ferm-configure
 
 - name: flush handlers
-  meta: flush_handlers
+  ansible.builtin.meta: flush_handlers
 
 ...
diff --git a/roles/mediaserver/handlers/main.yml b/roles/mediaserver/handlers/main.yml
index 8a9540afe0f79214c2124c72df626098da1fdc8e..7013e6027ecc094f6e42a6c5b309d3be380a8633 100644
--- a/roles/mediaserver/handlers/main.yml
+++ b/roles/mediaserver/handlers/main.yml
@@ -1,21 +1,21 @@
 ---
 
 - name: mscontroller restart
-  command:
+  ansible.builtin.command:
     cmd: mscontroller.py restart
 
 - name: restart nginx
-  systemd:
+  ansible.builtin.systemd:
     name: nginx
     state: restarted
 
 - name: restart mediaserver
-  systemd:
+  ansible.builtin.systemd:
     name: mediaserver
     state: restarted
 
 - name: restart systemd-sysusers
-  systemd:
+  ansible.builtin.systemd:
     name: systemd-sysusers
     state: restarted
 
diff --git a/roles/mediaserver/tasks/main.yml b/roles/mediaserver/tasks/main.yml
index 3cdbf67f8a0bd7c35ae6e3f4a499e50a3dec64eb..2472be46c9a4655acba584b602abf58c10fade20 100644
--- a/roles/mediaserver/tasks/main.yml
+++ b/roles/mediaserver/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: mediaserver install
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ server_packages }}"
@@ -11,18 +11,18 @@
 
 - name: fetch ssh public key
   register: root_ssh_pubkey
-  slurp:
+  ansible.builtin.slurp:
     path: /root/.ssh/id_ed25519.pub
   tags: always
 
 - name: register ssh public key as an ansible fact
-  set_fact:
+  ansible.builtin.set_fact:
     pubkey: "{{ root_ssh_pubkey['content'] | b64decode }}"
   tags: always
 
 - name: share ssh public key between cluster members
   loop: "{{ groups['mediaserver'] }}"
-  authorized_key:
+  ansible.posix.authorized_key:
     user: root
     key: "{{ hostvars[item]['pubkey'] }}"
   tags: always
@@ -30,13 +30,13 @@
 - name: resolve domain name to localhost
   notify: restart nginx
   loop: "{{ server_instances }}"
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/hosts
     line: '127.0.1.1 {{ item.ms_server_name }}'
     backup: true
 
 - name: Update the MS configuration with the celerity server IP
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/mediaserver/msconf.py
     regexp: '^CELERITY_SERVER_URL = '
     line: "CELERITY_SERVER_URL = 'https://{{ server_celerity_server_url }}:6200'"
@@ -48,7 +48,7 @@
     mode: '0644'
 
 - name: Update the MS configuration with the celerity server secret
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/mediaserver/msconf.py
     regexp: '^CELERITY_SIGNING_KEY = '
     line: "CELERITY_SIGNING_KEY = '{{ server_celerity_signing_key }}'"
@@ -69,7 +69,7 @@
     CM_SERVER_NAME: "{{ item.cm_server_name }}"
     MS_SUPERUSER_PWD: "{{ item.ms_superuser_pwd }}"
     MS_ADMIN_PWD: "{{ item.ms_admin_pwd }}"
-  command:
+  ansible.builtin.command:
     cmd: msinstaller.py {{ item.name }} --no-input
     creates: /etc/nginx/sites-available/mediaserver-{{ item.name }}.conf
 
@@ -85,7 +85,7 @@
     CM_SERVER_NAME: "{{ item.cm_server_name }}"
     MS_SUPERUSER_PWD: "{{ item.ms_superuser_pwd }}"
     MS_ADMIN_PWD: "{{ item.ms_admin_pwd }}"
-  command:
+  ansible.builtin.command:
     cmd: msinstaller.py {{ item.name }} --no-input
     creates: /etc/nginx/sites-available/mediaserver-{{ item.name }}.conf
   throttle: 1
@@ -102,7 +102,7 @@
     - /etc/celerity
     - /etc/sysusers.d
     - /var/www
-  command: |
+  ansible.builtin.command: |
     rsync \
       -avh \
       -e "ssh -o StrictHostKeyChecking=no" \
@@ -126,7 +126,7 @@
     - letsencrypt_enabled | d(false)
   loop:
     - /etc/letsencrypt
-  command: |
+  ansible.builtin.command: |
     rsync \
       -avh \
       -e "ssh -o StrictHostKeyChecking=no" \
@@ -141,7 +141,7 @@
 
 - name: configure email sender address
   notify: mscontroller restart
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/mediaserver/msconf.py
     backup: true
     create: true
@@ -157,7 +157,7 @@
 - name: configure domain name in nginx conf
   notify: restart nginx
   loop: "{{ server_instances }}"
-  replace:
+  ansible.builtin.replace:
     path: /etc/nginx/sites-available/mediaserver-{{ item.name }}.conf
     regexp: '^(\s*server_name).*;$'
     replace: '\1 {{ item.ms_server_name }};'
@@ -165,7 +165,7 @@
 
 - name: configure domain name in database
   loop: "{{ server_instances }}"
-  shell:
+  ansible.builtin.shell:
     cmd: |
       python3 /usr/lib/python3/dist-packages/mediaserver/scripts/mssiteconfig.py {{ item.name }} site_url=https://{{ item.ms_server_name }} ;
       mscontroller.py restart -u {{ item.name }} ;
@@ -174,7 +174,7 @@
 
 - name: reset service resources
   loop: "{{ server_instances }}"
-  shell:
+  ansible.builtin.shell:
     cmd: |
       python3 /usr/lib/python3/dist-packages/mediaserver/scripts/reset_service_resources.py {{ item.name }} local ;
       mscontroller.py restart -u {{ item.name }} ;
@@ -186,13 +186,13 @@
   when:
     - groups['mediaserver'] | length > 1
     - real_ip_from | length > 0
-  template:
+  ansible.builtin.template:
     src: realip.conf.j2
     dest: /etc/nginx/conf.d/realip.conf
     mode: '644'
 
 - name: ensure mediaserver is running
-  service:
+  ansible.builtin.service:
     name: mediaserver
     enabled: true
     state: started
@@ -206,10 +206,10 @@
     ferm_input_rules: "{{ server_ferm_input_rules }}"
     ferm_output_rules: "{{ server_ferm_output_rules }}"
     ferm_global_settings: "{{ server_ferm_global_settings }}"
-  include_role:
+  ansible.builtin.include_role:
     name: ferm-configure
 
 - name: flush handlers
-  meta: flush_handlers
+  ansible.builtin.meta: flush_handlers
 
 ...
diff --git a/roles/mediavault/tasks/mailer.yml b/roles/mediavault/tasks/mailer.yml
index 5da303fa87ffdc00c3dc4065ea709431183add8c..37ffc09ca4871eb2191a382d70cf6af22bb3e7e2 100644
--- a/roles/mediavault/tasks/mailer.yml
+++ b/roles/mediavault/tasks/mailer.yml
@@ -2,7 +2,7 @@
 
 - name: create mailer script
   when: mvt_mailer_enabled
-  template:
+  ansible.builtin.template:
     src: systemd-mailer-script.j2
     dest: "{{ mvt_mailer_script_path }}"
     mode: 0755
@@ -10,7 +10,7 @@
 - name: create mailer service
   when: mvt_mailer_enabled
   notify: systemd daemon reload
-  template:
+  ansible.builtin.template:
     src: systemd-mailer-service.j2
     dest: "{{ mvt_mailer_service_path }}"
     mode: '644'
diff --git a/roles/mediavault/tasks/main.yml b/roles/mediavault/tasks/main.yml
index b5dd457e6bbbee3127627dcff05d820651faf71c..76d6020b360082d5b37953dbefd0947a7f6f287f 100644
--- a/roles/mediavault/tasks/main.yml
+++ b/roles/mediavault/tasks/main.yml
@@ -1,14 +1,14 @@
 ---
 
 - name: install packages
-  package:
+  ansible.builtin.package:
     force_apt_get: true
     install_recommends: false
     name: "{{ mvt_packages }}"
     state: present
 
 - name: generate ssh keys pair
-  user:
+  ansible.builtin.user:
     name: root
     generate_ssh_key: true
     ssh_key_type: ed25519
@@ -25,9 +25,9 @@
     ferm_input_rules: "{{ mvt_ferm_input_rules }}"
     ferm_output_rules: "{{ mvt_ferm_output_rules }}"
     ferm_global_settings: "{{ mvt_ferm_global_settings }}"
-  include_role:
+  ansible.builtin.include_role:
     name: ferm-configure
 
-- meta: flush_handlers  # noqa unnamed-task
+- ansible.builtin.meta: flush_handlers  # noqa unnamed-task
 
 ...
diff --git a/roles/mediaworker/handlers/main.yml b/roles/mediaworker/handlers/main.yml
index d06d284e8fea73f13623971095ed8b2c5d0aa07b..41c27f7ea08c5d4bb8dd25d6680cd02a9d477ad5 100644
--- a/roles/mediaworker/handlers/main.yml
+++ b/roles/mediaworker/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: restart celerity-workers
-  service:
+  ansible.builtin.service:
     name: celerity-workers
     state: restarted
 
diff --git a/roles/mediaworker/tasks/main.yml b/roles/mediaworker/tasks/main.yml
index 99459810992afca968546b236954a53c0f987ad8..3e0fcfc513d83a21a13f1b2dc0dbffcc327c1b9d 100644
--- a/roles/mediaworker/tasks/main.yml
+++ b/roles/mediaworker/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install celerity worker
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: celerity-workers
@@ -11,13 +11,13 @@
 
 - name: config celerity worker
   notify: restart celerity-workers
-  template:
+  ansible.builtin.template:
     src: celerity-config.py.j2
     dest: /etc/celerity/config.py
     mode: '644'
 
 - name: ensure celerity worker is running
-  service:
+  ansible.builtin.service:
     name: celerity-workers
     enabled: true
     state: started
@@ -31,10 +31,10 @@
     ferm_input_rules: "{{ worker_ferm_input_rules }}"
     ferm_output_rules: "{{ worker_ferm_output_rules }}"
     ferm_global_settings: "{{ worker_ferm_global_settings }}"
-  include_role:
+  ansible.builtin.include_role:
     name: ferm-configure
 
 - name: flush handlers
-  meta: flush_handlers
+  ansible.builtin.meta: flush_handlers
 
 ...
diff --git a/roles/metricbeat/handlers/main.yml b/roles/metricbeat/handlers/main.yml
index 5d576b93bdc9bac8372926d851fff7227e5059b9..273514a5571eb631a32969e2b565d31ceef7ddee 100644
--- a/roles/metricbeat/handlers/main.yml
+++ b/roles/metricbeat/handlers/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: restart metricbeat
-  service:
+  ansible.builtin.service:
     name: metricbeat
     state: restarted
 
diff --git a/roles/metricbeat/tasks/main.yml b/roles/metricbeat/tasks/main.yml
index b4c46f45a1f08f56959dd0173ed972131ba14fa0..95d966913771b8e3723d8ec1cc352b1d3dba2fd3 100644
--- a/roles/metricbeat/tasks/main.yml
+++ b/roles/metricbeat/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: install apt-transport-https
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: apt-transport-https
@@ -10,16 +10,16 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: install elastic GPG key
-  apt_key:
+  ansible.builtin.apt_key:
     url: https://artifacts.elastic.co/GPG-KEY-elasticsearch
     state: present
 
 - name: install elastic repository
-  apt_repository:
+  ansible.builtin.apt_repository:
     repo: deb https://artifacts.elastic.co/packages/7.x/apt stable main
 
 - name: install metricbeat
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: metricbeat
@@ -29,18 +29,18 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: install metricbeat configuration
-  template:
+  ansible.builtin.template:
     src: metricbeat.yml.j2
     dest: /etc/metricbeat/metricbeat.yml
     mode: '644'
   notify: restart metricbeat
 
 - name: enable metricbeat dashboard
-  command: metricbeat setup
+  ansible.builtin.command: metricbeat setup
   when: inventory_hostname == groups['mediaserver'][0]
 
 - name: enable sql metricbeat configuration
-  template:
+  ansible.builtin.template:
     src: postgresql.yml.j2
     dest: /etc/metricbeat/modules.d/postgresql.yml
     mode: '644'
@@ -48,7 +48,7 @@
   notify: restart metricbeat
 
 - name: enable metricbeat client
-  systemd:
+  ansible.builtin.systemd:
     name: metricbeat
     enabled: true
     state: started
diff --git a/roles/mirismanager/handlers/main.yml b/roles/mirismanager/handlers/main.yml
index 9c36ad008fd812203199bbbc2446d02225dc6410..90192c9b7a29fbd47b74cb3f6522433505e13028 100644
--- a/roles/mirismanager/handlers/main.yml
+++ b/roles/mirismanager/handlers/main.yml
@@ -1,17 +1,17 @@
 ---
 
 - name: restart nginx
-  service:
+  ansible.builtin.service:
     name: nginx
     state: restarted
 
 - name: restart skyreach
-  service:
+  ansible.builtin.service:
     name: skyreach
     state: restarted
 
 - name: restart apt-cacher-ng
-  service:
+  ansible.builtin.service:
     name: apt-cacher-ng
     state: restarted
 
diff --git a/roles/mirismanager/tasks/main.yml b/roles/mirismanager/tasks/main.yml
index 949af4bf9091e338be56a6df5888e1b9850ee8d6..fc47293153bac63910e28604c32e8221d67e4edb 100644
--- a/roles/mirismanager/tasks/main.yml
+++ b/roles/mirismanager/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: mirismanager dependencies install
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ dependencies_packages }}"
@@ -10,12 +10,12 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: start postgresql
-  systemd:
+  ansible.builtin.systemd:
     name: postgresql
     state: started
 
 - name: mirismanager install
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ manager_packages }}"
@@ -25,7 +25,7 @@
 
 - name: configure domain name in nginx conf
   notify: restart nginx
-  replace:
+  ansible.builtin.replace:
     path: /etc/nginx/sites-available/skyreach.conf
     regexp: '^(\s*server_name).*;$'
     replace: '\1 {{ manager_hostname }};'
@@ -33,7 +33,7 @@
 
 - name: configure domain name in settings
   notify: restart skyreach
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /home/skyreach/skyreach_data/private/settings_override.py
     regexp: '^#? ?SITE_URL.*'
     line: "SITE_URL = 'https://{{ manager_hostname }}'"
@@ -41,7 +41,7 @@
 
 - name: configure site title in settings
   notify: restart skyreach
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /home/skyreach/skyreach_data/private/settings_override.py
     regexp: '^#? ?SITE_TITLE.*'
     line: "SITE_TITLE = '{{ manager_hostname }}'"
@@ -49,7 +49,7 @@
 
 - name: configure site name in settings
   notify: restart skyreach
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /home/skyreach/skyreach_data/private/settings_override.py
     regexp: '^#? ?SITE_NAME.*'
     line: "SITE_NAME = '{{ manager_hostname }}'"
@@ -57,7 +57,7 @@
 
 - name: configure email sender address in settings
   notify: restart skyreach
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /home/skyreach/skyreach_data/private/settings_override.py
     regexp: '^#? ?DEFAULT_FROM_EMAIL.*'
     line: "DEFAULT_FROM_EMAIL = '{{ manager_email_sender }}'"
@@ -65,20 +65,20 @@
 
 - name: resolve domain name to localhost ipv4
   notify: restart nginx
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/hosts
     line: '127.0.0.1 {{ manager_hostname }}'
     backup: true
 
 - name: ensure skyreach is running
-  service:
+  ansible.builtin.service:
     name: skyreach
     enabled: true
     state: started
 
 - name: check apt cacher ng config exists
   register: manager_apt_cacher_conf
-  stat:
+  ansible.builtin.stat:
     path: /etc/apt-cacher-ng/acng.conf
 
 - name: configure apt-cacher-ng
@@ -86,13 +86,13 @@
     - manager_apt_cacher_conf.stat.exists
     - manager_proxy_http | d(false)
   notify: restart apt-cacher-ng
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/apt-cacher-ng/acng.conf
     regexp: '^Proxy: .*'
     line: 'Proxy: {{ manager_proxy_http }}'
 
 - name: ensure apt-cacher-ng is running
-  service:
+  ansible.builtin.service:
     name: apt-cacher-ng
     enabled: true
     state: started
@@ -106,10 +106,10 @@
     ferm_input_rules: "{{ manager_ferm_input_rules }}"
     ferm_output_rules: "{{ manager_ferm_output_rules }}"
     ferm_global_settings: "{{ manager_ferm_global_settings }}"
-  include_role:
+  ansible.builtin.include_role:
     name: ferm-configure
 
 - name: flush handlers
-  meta: flush_handlers
+  ansible.builtin.meta: flush_handlers
 
 ...
diff --git a/roles/munin/msmonitor/handlers/main.yml b/roles/munin/msmonitor/handlers/main.yml
index b30d218a40fdc2985b9b3259731e0c7b0838c727..79ac1936cc627c8e403355aa0459de373f6f5cd8 100644
--- a/roles/munin/msmonitor/handlers/main.yml
+++ b/roles/munin/msmonitor/handlers/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: restart nginx
-  service:
+  ansible.builtin.service:
     name: nginx
     state: restarted
 ...
diff --git a/roles/munin/msmonitor/tasks/main.yml b/roles/munin/msmonitor/tasks/main.yml
index 084b9c21972b7733bd8b31c1d091698d2ad0e294..da807a33f45134ca044c8bdc4aff7f965cd078eb 100644
--- a/roles/munin/msmonitor/tasks/main.yml
+++ b/roles/munin/msmonitor/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install ubicast msmonitor
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     state: latest
@@ -13,13 +13,13 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: set msmonitor account password
-  user:
+  ansible.builtin.user:
     name: msmonitor
     password: "{{ monitor_shell_pwd | password_hash('sha512', 'monitor') }}"
 
 - name: configure domain name in nginx
   notify: restart nginx
-  replace:
+  ansible.builtin.replace:
     path: /etc/nginx/sites-available/msmonitor.conf
     regexp: '^(\s*server_name).*;$'
     replace: '\1 {{ monitor_hostname }};'
@@ -27,19 +27,19 @@
 
 - name: resolve domain name to localhost ipv4
   notify: restart nginx
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/hosts
     line: '127.0.1.1 {{ monitor_hostname }}'
     backup: true
 
 - name: ensure msmonitor is running
-  service:
+  ansible.builtin.service:
     name: msmonitor
     enabled: true
     state: started
 
 - name: set directory permissions
-  file:
+  ansible.builtin.file:
     path: /home/msmonitor/msmonitor
     mode: 0755
     state: directory
@@ -53,7 +53,7 @@
     ferm_input_rules: "{{ monitor_ferm_input_rules }}"
     ferm_output_rules: "{{ monitor_ferm_output_rules }}"
     ferm_global_settings: "{{ monitor_ferm_global_settings }}"
-  include_role:
+  ansible.builtin.include_role:
     name: ferm-configure
 
 ...
diff --git a/roles/munin/munin-node/handlers/main.yml b/roles/munin/munin-node/handlers/main.yml
index e68afb7a82d5006c410efabf2da3d6b7f6735f57..04737382a43c73eee1fbf9d0305bfe2527bf1912 100644
--- a/roles/munin/munin-node/handlers/main.yml
+++ b/roles/munin/munin-node/handlers/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: restart munin-node
-  service:
+  ansible.builtin.service:
     name: munin-node
     state: restarted
 ...
diff --git a/roles/munin/munin-node/tasks/main.yml b/roles/munin/munin-node/tasks/main.yml
index 8a6bd6ef0146420a15c4e11b7d9164a73411cbf1..d001c1a560082ec98a7357d83cc634515f5ec3bc 100644
--- a/roles/munin/munin-node/tasks/main.yml
+++ b/roles/munin/munin-node/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install required packages for munin-node
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     state: latest
@@ -14,14 +14,14 @@
 
 - name: copy munin-node configuration
   notify: restart munin-node
-  template:
+  ansible.builtin.template:
     src: munin-node.conf.j2
     dest: /etc/munin/munin-node.conf
     mode: '644'
 
 - name: setup munin-node plugins link
   notify: restart munin-node
-  shell:
+  ansible.builtin.shell:
     cmd: munin-node-configure --shell --remove-also 2>&1 | sh -x
   # sh -x print executed cmd to stderr
   register: munin_plugin_linked
diff --git a/roles/munin/munin-server/handlers/main.yml b/roles/munin/munin-server/handlers/main.yml
index f0bac579b41a63a6f72c0b9392d34ea5352f78b5..9a7279bde041ef9e7569289a7c47355478c0d9a4 100644
--- a/roles/munin/munin-server/handlers/main.yml
+++ b/roles/munin/munin-server/handlers/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: restart munin-server
-  service:
+  ansible.builtin.service:
     name: munin
     state: restarted
 ...
diff --git a/roles/munin/munin-server/tasks/main.yml b/roles/munin/munin-server/tasks/main.yml
index 6e0e716e391a6d0df8f4c416d27857b316ab33d3..521995326dc5cc989a1b32ab7da1209948761884 100644
--- a/roles/munin/munin-server/tasks/main.yml
+++ b/roles/munin/munin-server/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: "install required packages for munin-server"
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     state: latest
@@ -13,13 +13,13 @@
 
 - name: "copy munin-server configuration"
   notify: restart munin-server
-  template:
+  ansible.builtin.template:
     src: munin.conf.j2
     dest: /etc/munin/munin.conf
     mode: '644'
 
 - name: "remove default localdomain files"
-  file:
+  ansible.builtin.file:
     path: /var/cache/munin/www/localdomain
     state: absent
 
diff --git a/roles/netcapture/tasks/main.yml b/roles/netcapture/tasks/main.yml
index 34eafd90742bffed4ff46865e8dbbfc8ea3b03b2..0e797c9695c6ec3f3fc3dc063dbc7a3957eddfe6 100644
--- a/roles/netcapture/tasks/main.yml
+++ b/roles/netcapture/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: netcapture install
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: ubicast-netcapture
@@ -10,19 +10,19 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: netcapture config
-  template:
+  ansible.builtin.template:
     src: netcapture.json.j2
     dest: /etc/miris/netcapture.json
     mode: '644'
 
 - name: netcapture miris
-  template:
+  ansible.builtin.template:
     src: miris-api.json.j2
     dest: /etc/miris/conf/api.json
     mode: '644'
 
 - name: netcapture config dir
-  file:
+  ansible.builtin.file:
     path: "{{ netcapture_conf_folder }}"
     group: video
     mode: u=rwX,g=rwX,o=r
@@ -30,14 +30,14 @@
     state: directory
 
 - name: netcapture media dir
-  file:
+  ansible.builtin.file:
     path: "{{ netcapture_media_folder }}"
     group: video
     mode: u=rwX,g=rwX,o=rx
     state: directory
 
 - name: netcapture package dir
-  file:
+  ansible.builtin.file:
     path: "{{ netcapture_pkg_folder }}"
     mode: u=rwX,g=rwX,o=rx
     state: directory
diff --git a/roles/network/tasks/main.yml b/roles/network/tasks/main.yml
index 5ff2ef45a4042f1c789657639c8cf380e8222c38..a0dec2a1cbe042f0aed410a0e1de0c12c16e9ff9 100644
--- a/roles/network/tasks/main.yml
+++ b/roles/network/tasks/main.yml
@@ -10,7 +10,7 @@
   block:
 
     - name: packages
-      apt:
+      ansible.builtin.apt:
         force_apt_get: true
         install_recommends: false
         name: "{{ network_packages }}"
@@ -21,7 +21,7 @@
 
     - name: cleanup
       register: network_cleanup_interfaces
-      copy:
+      ansible.builtin.copy:
         dest: /etc/network/interfaces
         backup: true
         mode: '644'
@@ -37,7 +37,7 @@
 
     - name: service
       when: network_cleanup_interfaces is changed
-      systemd:
+      ansible.builtin.systemd:
         name: network-manager
         enabled: true
         state: restarted
diff --git a/roles/nginx/handlers/main.yml b/roles/nginx/handlers/main.yml
index b7774856aa335af9eb5885e0efcd4e2093c9e167..b0abf18b3894fe1af147e0ca063f8e9e9a913d58 100644
--- a/roles/nginx/handlers/main.yml
+++ b/roles/nginx/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: restart nginx
-  systemd:
+  ansible.builtin.systemd:
     name: nginx
     state: restarted
 
diff --git a/roles/nginx/tasks/main.yml b/roles/nginx/tasks/main.yml
index 117f3aba56c660cd250a9e2cd32bd98410b70525..fd7de4cab5e7f5ff639b64824e4481497cb7cc13 100644
--- a/roles/nginx/tasks/main.yml
+++ b/roles/nginx/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: nginx install
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ nginx_packages }}"
@@ -15,13 +15,13 @@
   loop:
     - /etc/nginx/sites-enabled/default
     - /etc/nginx/sites-enabled/default.conf
-  file:
+  ansible.builtin.file:
     path: "{{ item }}"
     state: absent
 
 - name: nginx check old ssl conf exists
   register: nginx_old_ssl_conf
-  stat:
+  ansible.builtin.stat:
     path: /etc/nginx/conf.d/ssl.conf
 
 - name: nginx migrate old ssl certificate conf
@@ -30,12 +30,12 @@
   loop:
     - grep ssl_certificate /etc/nginx/conf.d/ssl.conf > /etc/nginx/conf.d/ssl_certificate.conf
     - mv /etc/nginx/conf.d/ssl.conf /etc/nginx/conf.d/ssl.conf.old
-  command:
+  ansible.builtin.command:
     cmd: "{{ item }}"
 
 - name: nginx check ssl cert conf exists
   register: nginx_ssl_cert_conf
-  stat:
+  ansible.builtin.stat:
     path: /etc/nginx/conf.d/ssl_certificate.conf
 
 - name: nginx update ssl certificate conf
@@ -43,7 +43,7 @@
     - nginx_ssl_cert_conf.stat.exists
     - nginx_ssl_certificate != "/etc/ssl/certs/ssl-cert-snakeoil.pem"
   notify: restart nginx
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/nginx/conf.d/ssl_certificate.conf
     regexp: 'ssl_certificate\s+([\w/\-\_\.]+);'
     line: 'ssl_certificate {{ nginx_ssl_certificate }};'
@@ -53,7 +53,7 @@
     - nginx_ssl_cert_conf.stat.exists
     - nginx_ssl_certificate_key != "/etc/ssl/private/ssl-cert-snakeoil.key"
   notify: restart nginx
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/nginx/conf.d/ssl_certificate.conf
     regexp: 'ssl_certificate_key\s+([\w/\-\_\.]+);'
     line: 'ssl_certificate_key {{ nginx_ssl_certificate_key }};'
diff --git a/roles/postfix/handlers/main.yml b/roles/postfix/handlers/main.yml
index f55195130a602f6fc484528c5ed43295313c2e4b..ef16bd5ac889dd4f632385b76e10c30cfa7a6591 100644
--- a/roles/postfix/handlers/main.yml
+++ b/roles/postfix/handlers/main.yml
@@ -1,19 +1,19 @@
 ---
 
 - name: postmap sasl
-  command: postmap hash:/etc/postfix/sasl-passwords
+  ansible.builtin.command: postmap hash:/etc/postfix/sasl-passwords
 
 - name: postmap generic
-  command: postmap hash:/etc/postfix/generic
+  ansible.builtin.command: postmap hash:/etc/postfix/generic
 
 - name: postmap virtual
-  command: postmap hash:/etc/postfix/virtual
+  ansible.builtin.command: postmap hash:/etc/postfix/virtual
 
 - name: newaliases
-  command: newaliases
+  ansible.builtin.command: newaliases
 
 - name: restart postfix
-  service:
+  ansible.builtin.service:
     name: postfix
     state: restarted
 
diff --git a/roles/postfix/tasks/main.yml b/roles/postfix/tasks/main.yml
index 52205114f4a691bd529904753c3cb7b62c9f5bbc..a65ace761ffb32d5274239bba057f93bed234a6e 100644
--- a/roles/postfix/tasks/main.yml
+++ b/roles/postfix/tasks/main.yml
@@ -1,14 +1,14 @@
 ---
 
 - name: create postfix dir
-  file:
+  ansible.builtin.file:
     path: /etc/postfix
     state: directory
     mode: '755'
 
 - name: postfix main config
   notify: restart postfix
-  template:
+  ansible.builtin.template:
     backup: true
     src: main.cf.j2
     dest: /etc/postfix/main.cf
@@ -16,7 +16,7 @@
 
 - name: postfix mailname
   notify: restart postfix
-  copy:
+  ansible.builtin.copy:
     backup: true
     dest: /etc/mailname
     content: "{{ postfix_mailname }}"
@@ -26,7 +26,7 @@
   notify:
     - newaliases
     - restart postfix
-  copy:
+  ansible.builtin.copy:
     backup: true
     dest: /etc/aliases
     mode: '644'
@@ -39,7 +39,7 @@
   notify:
     - postmap virtual
     - restart postfix
-  copy:
+  ansible.builtin.copy:
     backup: true
     dest: /etc/postfix/virtual
     mode: '644'
@@ -52,7 +52,7 @@
   notify:
     - postmap generic
     - restart postfix
-  copy:
+  ansible.builtin.copy:
     backup: true
     dest: /etc/postfix/generic
     mode: '644'
@@ -71,14 +71,14 @@
   notify:
     - postmap sasl
     - restart postfix
-  copy:
+  ansible.builtin.copy:
     backup: true
     dest: /etc/postfix/sasl-passwords
     mode: '644'
     content: "{{ postfix_relay_host }} {{ postfix_relay_user }}:{{ postfix_relay_pass }}"
 
 - name: install postfix
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ postfix_packages }}"
@@ -88,7 +88,7 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: ensure postfix is running
-  service:
+  ansible.builtin.service:
     name: postfix
     enabled: true
     state: started
diff --git a/roles/postgres-ha/handlers/main.yml b/roles/postgres-ha/handlers/main.yml
index 6b60369e5e0af7c27e2559489ca84e47274eca17..8e43f565efc01e13ec4aca133934d3b3a39502a0 100644
--- a/roles/postgres-ha/handlers/main.yml
+++ b/roles/postgres-ha/handlers/main.yml
@@ -1,21 +1,21 @@
 ---
 
 - name: reload systemd
-  systemd:
+  ansible.builtin.systemd:
     daemon_reload: true
 
 - name: restart postgresql
-  systemd:
+  ansible.builtin.systemd:
     name: postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}
     state: restarted
 
 - name: restart repmgrd
-  systemd:
+  ansible.builtin.systemd:
     name: repmgrd
     state: restarted
 
 - name: restart rephacheck
-  systemd:
+  ansible.builtin.systemd:
     name: rephacheck.socket
     state: restarted
 
diff --git a/roles/postgres-ha/tasks/main.yml b/roles/postgres-ha/tasks/main.yml
index 88eb33a03025872f53847f17d23af7562322577c..b1bd12c6875e3722d94700c78b5601b0cecea08c 100644
--- a/roles/postgres-ha/tasks/main.yml
+++ b/roles/postgres-ha/tasks/main.yml
@@ -3,7 +3,7 @@
 # INSTALLATION
 
 - name: install packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ repmgr_packages }}"
@@ -61,14 +61,14 @@
         dport:
           - 5432
           - "{{ repmgr_repha_port }}"
-  include_role:
+  ansible.builtin.include_role:
     name: postgres
 
 # CONFIGURATION
 
 - name: configure repmgr
   notify: restart repmgrd
-  template:
+  ansible.builtin.template:
     src: repmgr.conf.j2
     dest: "{{ repmgr_config }}"
     owner: postgres
@@ -82,13 +82,13 @@
       value: 'yes'
     - key: REPMGRD_CONF
       value: "{{ repmgr_config }}"
-  replace:
+  ansible.builtin.replace:
     path: /etc/default/repmgrd
     regexp: '^#?{{ item.key }}=.*$'
     replace: '{{ item.key }}={{ item.value }}'
 
 - name: configure sudo
-  copy:
+  ansible.builtin.copy:
     dest: /etc/sudoers.d/postgres
     validate: visudo -cf %s
     mode: '440'
@@ -103,7 +103,7 @@
 # SSH
 
 - name: ensure postgres account have a ssh keypair
-  user:
+  ansible.builtin.user:
     name: postgres
     generate_ssh_key: true
     ssh_key_type: ed25519
@@ -111,21 +111,21 @@
 
 - name: fetch postgres ssh public key
   register: repmgr_postgres_ssh_pubkey
-  slurp:
+  ansible.builtin.slurp:
     path: ~postgres/.ssh/id_ed25519.pub
 
 - name: register postgres ssh public key as an ansible fact
-  set_fact:
+  ansible.builtin.set_fact:
     pubkey: "{{ repmgr_postgres_ssh_pubkey['content'] | b64decode }}"
 
 - name: share postgres ssh public key between cluster members
   loop: "{{ groups['postgres'] }}"
-  authorized_key:
+  ansible.posix.authorized_key:
     user: postgres
     key: "{{ hostvars[item]['pubkey'] }}"
 
 - name: postgres ssh client configuration
-  copy:
+  ansible.builtin.copy:
     dest: ~postgres/.ssh/config
     owner: postgres
     group: postgres
@@ -145,7 +145,7 @@
       become: true
       become_user: postgres
       register: repmgr_check_primary
-      postgresql_query:
+      community.general.postgresql_query:
         db: repmgr
         query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
 
@@ -154,10 +154,10 @@
       become_user: postgres
       when: repmgr_check_primary.query_result | length == 0
       notify: restart repmgrd
-      command:
+      ansible.builtin.command:
         cmd: repmgr --config-file={{ repmgr_config }} primary register
 
-- meta: flush_handlers  # noqa unnamed-task
+- ansible.builtin.meta: flush_handlers  # noqa unnamed-task
 
 # REGISTER STANDBY
 
@@ -169,19 +169,19 @@
       become: true
       become_user: postgres
       register: repmgr_check_standby
-      postgresql_query:
+      community.general.postgresql_query:
         db: repmgr
         query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
 
     - name: stop postgresql service
       when: repmgr_check_standby.query_result | length == 0
-      systemd:
+      ansible.builtin.systemd:
         name: postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}
         state: stopped
 
     - name: remove existing pgdata
       when: repmgr_check_standby.query_result | length == 0
-      command:
+      ansible.builtin.command:
         cmd: mv -vf {{ repmgr_pg_data }} {{ repmgr_pg_data }}.save
         removes: "{{ repmgr_pg_data }}"
 
@@ -191,7 +191,7 @@
       when: repmgr_check_standby.query_result | length == 0
       ignore_errors: true
       register: repmgr_clone_standby
-      shell:
+      ansible.builtin.shell:
         cmd: |
           repmgr \
             --config-file={{ repmgr_config }} \
@@ -205,30 +205,30 @@
 
     - name: remove pgdata backup
       when: repmgr_clone_standby is succeeded
-      file:
+      ansible.builtin.file:
         path: "{{ repmgr_pg_data }}.save"
         state: absent
 
     - name: remove failed clone pgdata
       when: repmgr_clone_standby is failed
-      file:
+      ansible.builtin.file:
         path: "{{ repmgr_pg_data }}"
         state: absent
 
     - name: restore pgdata backup
       when: repmgr_clone_standby is failed
-      command:
+      ansible.builtin.command:
         cmd: mv -vf {{ repmgr_pg_data }}.save {{ repmgr_pg_data }}
         removes: "{{ repmgr_pg_data }}.save"
 
     - name: start postgresql service
-      systemd:
+      ansible.builtin.systemd:
         name: postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}
         state: started
 
     - name: standby clone failed
       when: repmgr_clone_standby is failed
-      fail:
+      ansible.builtin.fail:
         msg: "{{ repmgr_clone_standby.stderr }}"
 
     - name: register standby
@@ -236,10 +236,10 @@
       become_user: postgres
       when: repmgr_check_standby.query_result | length == 0
       notify: restart repmgrd
-      command:
+      ansible.builtin.command:
         cmd: repmgr --config-file={{ repmgr_config }} standby register
 
-- meta: flush_handlers  # noqa unnamed-task
+- ansible.builtin.meta: flush_handlers  # noqa unnamed-task
 
 # REGISTER WITNESS
 
@@ -251,7 +251,7 @@
       become: true
       become_user: postgres
       register: repmgr_check_witness
-      postgresql_query:
+      community.general.postgresql_query:
         db: repmgr
         query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
 
@@ -260,26 +260,26 @@
       become_user: postgres
       when: repmgr_check_witness.query_result | length == 0
       notify: restart repmgrd
-      command:
+      ansible.builtin.command:
         cmd: repmgr --config-file={{ repmgr_config }} --host={{ repmgr_primary_node }} witness register
 
-- meta: flush_handlers  # noqa unnamed-task
+- ansible.builtin.meta: flush_handlers  # noqa unnamed-task
 
 # REPHACHECK
 
 - name: install rephacheck
-  template:
+  ansible.builtin.template:
     src: rephacheck.py.j2
     dest: /usr/bin/rephacheck
     mode: 0755
 
 - name: register variables needed by rephacheck as facts
-  set_fact:
+  ansible.builtin.set_fact:
     repmgr_node_name: "{{ repmgr_node_name }}"
     repmgr_node_id: "{{ repmgr_node_id }}"
 
 - name: configure rephacheck
-  template:
+  ansible.builtin.template:
     src: rephacheck.conf.j2
     dest: /etc/postgresql/{{ repmgr_pg_version }}/{{ repmgr_pg_cluster }}/rephacheck.conf
     owner: postgres
@@ -290,7 +290,7 @@
   notify:
     - reload systemd
     - restart rephacheck
-  copy:
+  ansible.builtin.copy:
     dest: /etc/systemd/system/rephacheck.socket
     mode: '644'
     content: |
@@ -308,7 +308,7 @@
   notify:
     - reload systemd
     - restart rephacheck
-  copy:
+  ansible.builtin.copy:
     dest: /etc/systemd/system/rephacheck@.service
     mode: '644'
     content: |
@@ -322,7 +322,7 @@
       Group=postgres
 
 - name: enable and start rephacheck
-  service:
+  ansible.builtin.service:
     name: rephacheck.socket
     state: started
     enabled: true
@@ -334,7 +334,7 @@
     ferm_input_rules: "{{ pg_ferm_input_rules }}"
     ferm_output_rules: "{{ pg_ferm_output_rules }}"
     ferm_global_settings: "{{ pg_ferm_global_settings }}"
-  include_role:
+  ansible.builtin.include_role:
     name: ferm-configure
 
 ...
diff --git a/roles/postgres/handlers/main.yml b/roles/postgres/handlers/main.yml
index 2f1c67e4c548ddbe3b23e86c3b1673f62f700dc1..6a5616ec10107967826013510b384228909a6faa 100644
--- a/roles/postgres/handlers/main.yml
+++ b/roles/postgres/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: restart postgresql
-  systemd:
+  ansible.builtin.systemd:
     name: postgresql@{{ pg_version }}-{{ pg_cluster }}
     state: restarted
 
diff --git a/roles/postgres/tasks/main.yml b/roles/postgres/tasks/main.yml
index 89c68eca1eda2c9c919aad31601f9a9c6fdc253b..f30db26aeeed30984cc486f717340f8d0088abf1 100644
--- a/roles/postgres/tasks/main.yml
+++ b/roles/postgres/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: ansible postgresql requirements install
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: python3-psycopg2
@@ -10,7 +10,7 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: install packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ pg_packages }}"
@@ -21,7 +21,7 @@
 # CONFIGURATION
 
 - name: ensure conf directory exists
-  file:
+  ansible.builtin.file:
     path: "{{ pg_conf_dir }}/conf.d"
     owner: postgres
     group: postgres
@@ -29,14 +29,14 @@
     mode: '755'
 
 - name: ensure conf directory is included
-  replace:
+  ansible.builtin.replace:
     path: "{{ pg_conf_dir }}/postgresql.conf"
     backup: true
     regexp: "^#?include_dir = '[A-Za-z\\.]+'(\\s+.*)$"
     replace: "include_dir = 'conf.d'\\1"
 
 - name: change max connections value
-  replace:
+  ansible.builtin.replace:
     path: "{{ pg_conf_dir }}/postgresql.conf"
     backup: true
     regexp: "^#?max_connections = [0-9]+"
@@ -47,7 +47,7 @@
   notify: restart postgresql
   loop: "{{ pg_conf }}"
   when: item.content | d(false)
-  copy:
+  ansible.builtin.copy:
     dest: "{{ pg_conf_dir }}/conf.d/{{ item.name }}.conf"
     owner: postgres
     group: postgres
@@ -57,7 +57,7 @@
 
 - name: configure authentication
   notify: restart postgresql
-  template:
+  ansible.builtin.template:
     src: pg_hba.conf.j2
     dest: "{{ pg_conf_dir }}/pg_hba.conf"
     owner: postgres
@@ -66,7 +66,7 @@
     backup: true
 
 - name: ensure service is enabled and running
-  systemd:
+  ansible.builtin.systemd:
     name: postgresql@{{ pg_version }}-{{ pg_cluster }}
     enabled: true
     state: started
@@ -77,7 +77,7 @@
   become: true
   become_user: postgres
   no_log: true
-  postgresql_user:
+  community.general.postgresql_user:
     name: postgres
     password: "{{ pg_password | d(omit) }}"
 
@@ -86,7 +86,7 @@
   become_user: postgres
   no_log: true
   loop: "{{ pg_users }}"
-  postgresql_user:
+  community.general.postgresql_user:
     name: "{{ item.name }}"
     password: "{{ item.password | d(omit) }}"
     db: "{{ item.db | d(omit) }}"
@@ -95,7 +95,7 @@
 
 - name: set .pgpass to allow passwordless connection
   loop: "{{ query('nested', ['root', 'postgres'], pg_users) }}"
-  blockinfile:
+  ansible.builtin.blockinfile:
     path: "~{{ item.0 }}/.pgpass"
     block: "*:*:*:{{ item.1.name }}:{{ item.1.password }}"
     marker: "# {mark} {{ item.1.name }}"
@@ -123,10 +123,10 @@
     ferm_input_rules: "{{ pg_ferm_input_rules }}"
     ferm_output_rules: "{{ pg_ferm_output_rules }}"
     ferm_global_settings: "{{ pg_ferm_global_settings }}"
-  include_role:
+  ansible.builtin.include_role:
     name: ferm-configure
 
 - name: flush handlers
-  meta: flush_handlers
+  ansible.builtin.meta: flush_handlers
 
 ...
diff --git a/roles/proxy/tasks/main.yml b/roles/proxy/tasks/main.yml
index 5545622aa9106e0f7dcd7e2df975a6db27dc101f..5b8f081210d5c517ed87c723b02cb944bfb7e08f 100644
--- a/roles/proxy/tasks/main.yml
+++ b/roles/proxy/tasks/main.yml
@@ -7,7 +7,7 @@
   block:
 
     - name: environment
-      blockinfile:
+      ansible.builtin.blockinfile:
         path: /etc/environment
         create: true
         marker_begin: BEGIN PROXY
@@ -22,7 +22,7 @@
           NO_PROXY={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
 
     - name: apt
-      copy:
+      ansible.builtin.copy:
         dest: /etc/apt/apt.conf.d/proxy
         mode: '644'
         content: |
@@ -30,7 +30,7 @@
           Acquire::https::Proxy "{{ proxy_https }}";
 
     - name: wget
-      copy:
+      ansible.builtin.copy:
         dest: /etc/wgetrc
         mode: '644'
         content: |
@@ -40,7 +40,7 @@
           no_proxy={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
 
     - name: install git
-      apt:
+      ansible.builtin.apt:
         force_apt_get: true
         install_recommends: false
         name: git
diff --git a/roles/sysconfig/handlers/main.yml b/roles/sysconfig/handlers/main.yml
index ecd373946c21cab71eb3f2fe6daf2283136dcda4..d58394d537aeef45574d6603654ab78b8d4bf69a 100644
--- a/roles/sysconfig/handlers/main.yml
+++ b/roles/sysconfig/handlers/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: update cache
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     update_cache: true
@@ -9,34 +9,34 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: systemd daemon reload
-  systemd:
+  ansible.builtin.systemd:
     daemon_reload: true
 
 - name: update locale
-  command: locale-gen
+  ansible.builtin.command: locale-gen
 
 - name: restart cron
-  service:
+  ansible.builtin.service:
     name: cron
     state: restarted
 
 - name: restart sshd
-  service:
+  ansible.builtin.service:
     name: sshd
     state: restarted
 
 - name: restart unattended-upgrades
-  service:
+  ansible.builtin.service:
     name: unattended-upgrades
     state: restarted
 
 - name: restart ntp
-  service:
+  ansible.builtin.service:
     name: ntp
     state: restarted
 
 - name: update cache
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     update_cache: true
diff --git a/roles/sysconfig/tasks/locale.yml b/roles/sysconfig/tasks/locale.yml
index c8d2d4e7ae9515a3257ab11f0635db9528d69b5d..4e93ac338490308ee186efd3ebbcbb7aef5904b1 100644
--- a/roles/sysconfig/tasks/locale.yml
+++ b/roles/sysconfig/tasks/locale.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install locale packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ locale_packages }}"
@@ -16,7 +16,7 @@
 
 - name: set locale
   notify: update locale
-  copy:
+  ansible.builtin.copy:
     dest: /etc/default/locale
     mode: '644'
     content: |
@@ -26,7 +26,7 @@
 
 - name: set locale.gen
   notify: update locale
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/locale.gen
     regexp: '^(?:# )?({{ init_locale }}.*)$'
     backrefs: true
diff --git a/roles/sysconfig/tasks/logs.yml b/roles/sysconfig/tasks/logs.yml
index 3946bce28ba906e249f7acfa460d419708129341..395d042f69f83cbc1c086b3c3443446d9607468d 100644
--- a/roles/sysconfig/tasks/logs.yml
+++ b/roles/sysconfig/tasks/logs.yml
@@ -1,6 +1,6 @@
 ---
 - name: install logs packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ sysconfig_logs_packages }}"
@@ -9,13 +9,13 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: start rsyslog
-  systemd:
+  ansible.builtin.systemd:
     name: rsyslog
     enabled: true
     state: started
 
 - name: ensure journald logs persistence is enabled
-  file:
+  ansible.builtin.file:
     path: /var/log/journal
     state: directory
     mode: '755'
diff --git a/roles/sysconfig/tasks/main.yml b/roles/sysconfig/tasks/main.yml
index ffb616bf7d8f960f13482d5b6b16af1b353913af..0a6d4b24612d379af4ca58992fc7f118f9bc344d 100644
--- a/roles/sysconfig/tasks/main.yml
+++ b/roles/sysconfig/tasks/main.yml
@@ -4,7 +4,7 @@
 # Upgrade already installed packages to latest version and clean system
 
 - name: apt update
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     update_cache: true
@@ -14,7 +14,7 @@
   changed_when: false
 
 - name: apt dist upgrade
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     upgrade: dist
@@ -23,7 +23,7 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: apt clean and autoremove
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     autoclean: true
@@ -35,7 +35,7 @@
 # Install new packages and remove conflicts
 
 - name: install system utilities
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ sysconfig_packages }}"
@@ -45,7 +45,7 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: remove conflicting packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name:
@@ -62,7 +62,7 @@
 # Enable automatic security upgrades
 
 - name: install unattended-upgrades
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: unattended-upgrades
@@ -72,7 +72,7 @@
   until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
 
 - name: enable unattended upgrades
-  copy:
+  ansible.builtin.copy:
     dest: /etc/apt/apt.conf.d/20auto-upgrades
     content: |
       APT::Periodic::Update-Package-Lists "1";
@@ -80,28 +80,28 @@
     mode: '644'
 
 - name: remove old kernel with unattended-upgrades
-  replace:
+  ansible.builtin.replace:
     dest: /etc/apt/apt.conf.d/50unattended-upgrades
     regexp: '^//Unattended-Upgrade::Remove-Unused-Kernel-Packages.*$'
     replace: 'Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";'
   notify: restart unattended-upgrades
 
 - name: allow automatic updates for ubicast security
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/apt/apt.conf.d/50unattended-upgrades
     insertafter: '^Unattended-Upgrade::Origins-Pattern {$'
     line: '        "origin=UbiCast,label=UbiCast-Security";'
     backup: true
 
 - name: enable root login via ssh with key
-  replace:
+  ansible.builtin.replace:
     dest: /etc/ssh/sshd_config
     regexp: "^#PermitRootLogin (yes|without-password|prohibit-password)"
     replace: "PermitRootLogin without-password"
   notify: restart sshd
 
 - name: remove disabled root login
-  replace:
+  ansible.builtin.replace:
     dest: /root/.ssh/authorized_keys
     regexp: "^no-port-forwarding,(.+) ssh-"
     replace: "ssh-"
@@ -117,7 +117,7 @@
     ferm_input_rules: "{{ sysconfig_ferm_input_rules }}"
     ferm_output_rules: "{{ sysconfig_ferm_output_rules }}"
     ferm_global_settings: "{{ sysconfig_ferm_global_settings }}"
-  include_role:
+  ansible.builtin.include_role:
     name: ferm-configure
 
 - include_tasks: logs.yml
diff --git a/roles/sysconfig/tasks/ntp.yml b/roles/sysconfig/tasks/ntp.yml
index f96507d72c2c22619eb0e386ad3d3298cf367e95..f0e6d6383c61abdf1df39bbb90fdaf1625b911e4 100644
--- a/roles/sysconfig/tasks/ntp.yml
+++ b/roles/sysconfig/tasks/ntp.yml
@@ -1,13 +1,13 @@
 ---
 - name: create systemd-timesync service config directory
-  file:
+  ansible.builtin.file:
     path: /lib/systemd/system/systemd-timesyncd.service.d
     state: directory
     mode: 0755
 
 - name: ntp add condition to systemd-timesyncd service
   notify: systemd daemon reload
-  copy:
+  ansible.builtin.copy:
     dest: /lib/systemd/system/systemd-timesyncd.service.d/disable-with-time-daemon.conf
     mode: '644'
     content: |
@@ -20,13 +20,13 @@
 
 - name: ntp disable systemd-timesyncd service
   notify: restart ntp
-  systemd:
+  ansible.builtin.systemd:
     name: systemd-timesyncd
     enabled: false
     state: stopped
 
 - name: ntp install
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: ntp
@@ -37,14 +37,14 @@
 
 - name: ntp config
   notify: restart ntp
-  template:
+  ansible.builtin.template:
     backup: true
     src: ntp.conf.j2
     dest: /etc/ntp.conf
     mode: '644'
 
 - name: ensure ntp is running
-  service:
+  ansible.builtin.service:
     name: ntp
     enabled: true
     state: started
diff --git a/roles/sysconfig/tasks/repos.yml b/roles/sysconfig/tasks/repos.yml
index c2d10afef5afa8c118faddff0d4be754ff94301e..653aea4fffaccdeb7bcb24629528db307938a894 100644
--- a/roles/sysconfig/tasks/repos.yml
+++ b/roles/sysconfig/tasks/repos.yml
@@ -5,7 +5,7 @@
     - not offline_mode | d(false)
     - ansible_distribution == 'Ubuntu'
   notify: update cache
-  copy:
+  ansible.builtin.copy:
     dest: /etc/apt/sources.list
     mode: '644'
     content: |
@@ -19,7 +19,7 @@
     - not offline_mode | d(false)
     - ansible_distribution == 'Debian'
   notify: update cache
-  copy:
+  ansible.builtin.copy:
     dest: /etc/apt/sources.list
     mode: '644'
     content: |
@@ -29,21 +29,21 @@
 
 - name: add ubicast apt repo key
   when: not offline_mode | d(false)
-  apt_key:
+  ansible.builtin.apt_key:
     url: https://{{ repos_skyreach_host }}/media/public.gpg
 
 - name: add ubicast apt repo
   when:
     - not offline_mode | d(false)
     - repos_skyreach_token | d(false)
-  apt_repository:
+  ansible.builtin.apt_repository:
     repo: deb https://{{ repos_skyreach_host }} packaging/apt/{{ repos_skyreach_token }}/
     filename: ubicast
     update_cache: true
 
 - name: add ubicast security apt repo
   when: not offline_mode | d(false)
-  apt_repository:
+  ansible.builtin.apt_repository:
     repo: deb https://{{ repos_skyreach_host }} packaging/apt/ubicast-security-updates/
     filename: ubicast-secu
     update_cache: true
diff --git a/roles/tester/tasks/main.yml b/roles/tester/tasks/main.yml
index ea60d4d1422654db8524c6c4c849dba6199aa6c9..5b148f460ad332523b5d15c084891dde68a9c720 100644
--- a/roles/tester/tasks/main.yml
+++ b/roles/tester/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: install tester packages
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "{{ tester_packages }}"
diff --git a/roles/users/handlers/main.yml b/roles/users/handlers/main.yml
index fa217d1484825522405fd5d3b4f2182bb227be19..cbc6f332ac43dd2b59fe05cc1eadd3472a872924 100644
--- a/roles/users/handlers/main.yml
+++ b/roles/users/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: restart sshd
-  service:
+  ansible.builtin.service:
     name: sshd
     state: restarted
 
diff --git a/roles/users/tasks/main.yml b/roles/users/tasks/main.yml
index e4fb980cf2717686bda53e105083967b34af6c8c..b4a4a12be07cf29c8266d4b631772694e91475df 100644
--- a/roles/users/tasks/main.yml
+++ b/roles/users/tasks/main.yml
@@ -2,13 +2,13 @@
 
 - name: create users groups
   loop: "{{ users }}"
-  group:
+  ansible.builtin.group:
     name: "{{ item.name }}"
     state: present
 
 - name: create users
   loop: "{{ users }}"
-  user:
+  ansible.builtin.user:
     name: "{{ item.name }}"
     group: "{{ item.name }}"
     shell: /bin/bash
@@ -22,53 +22,53 @@
 
 - name: set users passwords
   loop: "{{ users }}"
-  user:
+  ansible.builtin.user:
     name: "{{ item.name }}"
     password: "{{ item.passwd }}"
     update_password: always
 
 - name: copy .bashrc
   loop: "{{ users }}"
-  copy:
+  ansible.builtin.copy:
     src: .bashrc
     dest: ~{{ item.name }}/.bashrc
     mode: '644'
 
 - name: copy .vimrc
   loop: "{{ users }}"
-  copy:
+  ansible.builtin.copy:
     src: .vimrc
     dest: ~{{ item.name }}/.vimrc
     mode: '644'
 
 - name: copy .bashrc for root
   when: users_root_change
-  copy:
+  ansible.builtin.copy:
     src: .bashrc
     dest: ~root/.bashrc
     mode: '644'
 
 - name: copy .vimrc for root
   when: users_root_change
-  copy:
+  ansible.builtin.copy:
     src: .vimrc
     dest: ~root/.vimrc
     mode: '644'
 
 - name: set users allowed ssh keys
   loop: "{{ users | product(users_ssh_authorized_keys) | list }}"
-  authorized_key:
+  ansible.posix.authorized_key:
     user: "{{ item[0].name }}"
     key: "{{ item[1] }}"
 
 - name: set root allowed ssh keys
   loop: "{{ users_ssh_authorized_keys }}"
-  authorized_key:
+  ansible.posix.authorized_key:
     user: root
     key: "{{ item }}"
 
 - name: sudoers without password
-  copy:
+  ansible.builtin.copy:
     dest: /etc/sudoers.d/nopasswd
     validate: visudo -cf %s
     mode: '440'
@@ -77,7 +77,7 @@
 
 - name: install ubicast ssh access
   when: not offline_mode | d(false)
-  apt:
+  ansible.builtin.apt:
     force_apt_get: true
     install_recommends: false
     name: "ubicast-ssh-access"