From 5362c5119f4844930c2b6eb2f765c01549e2f0ee Mon Sep 17 00:00:00 2001
From: Nicolas KAROLAK <nicolas@karolak.fr>
Date: Wed, 25 Mar 2020 14:57:56 +0000
Subject: [PATCH] merge envsetup3 | fixes #31425

---
 .ansible-lint                                 |    3 +
 .devcontainer/Dockerfile                      |   78 +-
 .devcontainer/Dockerfile.root                 |    5 +
 .devcontainer/devcontainer.json               |   32 +-
 .devcontainer/docker-compose.yml              |   16 +-
 .editorconfig                                 |   25 +-
 .flake8                                       |   12 +
 .gitattributes                                |    1 +
 .gitignore                                    |   75 +-
 .gitlab-ci.yml                                |   81 +-
 .yamllint                                     |   23 +
 1.Base/1.Utilities/0_setup.py                 |   49 -
 1.Base/2.ubicast_shell_access/0_setup.py      |   69 -
 1.Base/3.admin_shell_account/0_setup.py       |   30 -
 1.Base/4.cockpit/0_setup.py                   |   26 -
 .../2.Install_FTP_watch_folder/0_setup.py     |   83 -
 .../mediaimport_create_ftp_user.sh            |   15 -
 .../mediaimport_remove_ftp_user.sh            |   17 -
 .../on_ftp_upload.py                          |   71 -
 .../pure-ftpd-common                          |   26 -
 .../2.Install_FTP_watch_folder/readme.txt     |   31 -
 .../remove_empty_dirs.py                      |   45 -
 11.VM/1.Export_VM/0_setup.sh                  |   37 -
 11.VM/2.Export_VM_local/0_setup.sh            |   27 -
 12.Netcapture/1.Install_Netcapture/0_setup.py |   75 -
 13.Demokit/1.Deploy_demokit/0_setup.py        |   58 -
 .../mirisconf/recorderd/room-a.json           |   82 -
 .../1.Deploy_demokit/publish_zip_by_url.py    |   80 -
 13.Demokit/2.Reset_demokit/0_setup.py         |   27 -
 .../3.Generate_SSL_certificate/0_setup.py     |   21 -
 14.Dell/1.Dell_openmanage/0_setup.sh          |   28 -
 2.Common_services/1.Postfix/0_setup.py        |   82 -
 2.Common_services/2.NTP/0_setup.py            |   36 -
 2.Common_services/2.NTP/ntp.conf              |   70 -
 2.Common_services/3.PostgreSQL/0_setup.py     |   18 -
 2.Common_services/4.Wowza/0_setup.py          |  106 --
 2.Common_services/4.Wowza/Proxy.xml           |   18 -
 2.Common_services/4.Wowza/Tune.xml            |   38 -
 2.Common_services/5.Nginx/0_setup.py          |   66 -
 2.Common_services/6.Munin/0_setup.sh          |   39 -
 2.Common_services/7.LetsEncrypt/0_setup.py    |   95 -
 2.Common_services/7.LetsEncrypt/hook_mkdir.sh |   10 -
 .../7.LetsEncrypt/hook_reload.sh              |    9 -
 2.Common_services/8.Fail2ban/0_setup.py       |   88 -
 .../8.Fail2ban/filter.d/mediaserver.conf      |   13 -
 .../8.Fail2ban/jail.d/mediaserver.conf        |    9 -
 .../8.Fail2ban/jail.d/mirismanager.conf       |    9 -
 .../8.Fail2ban/jail.d/monitor.conf            |    9 -
 .../1.Shell_fake_action/0_setup.sh            |   12 -
 .../2.Python_fake_action/0_setup.py           |   13 -
 .../1.Download_envsetup_config/0_setup.py     |   74 -
 .../fill_empty_conf.sh                        |   44 -
 .../2.Proxy_settings/0_setup.py               |   56 -
 .../3.APT_upgrade/0_setup.sh                  |   63 -
 .../3.APT_upgrade/sources-deb.list            |    5 -
 .../3.APT_upgrade/sources-ubu.list            |    4 -
 4.Monitor/1.Install_monitor/0_setup.sh        |   23 -
 .../1.Install_MediaServer/0_setup.sh          |   26 -
 5.MediaServer/2.Bench_tools/0_setup.sh        |    3 -
 .../1.Install_Miris_Manager/0_setup.sh        |   40 -
 .../2.Configure_apt_cacher_ng/0_setup.py      |   30 -
 7.MediaWorker/1.Celerity_server/0_setup.py    |   18 -
 .../1.Celerity_server/celerity-config.py      |   12 -
 7.MediaWorker/2.Celerity_workers/0_setup.py   |   18 -
 .../2.Celerity_workers/celerity-config.py     |   12 -
 8.MediaCache/1.Install_cache/0_setup.py       |   23 -
 8.MediaCache/1.Install_cache/crossdomain.xml  |    6 -
 8.MediaCache/1.Install_cache/index.html       |   17 -
 .../1.Install_cache/nginx-limits.conf         |    2 -
 8.MediaCache/1.Install_cache/vhost_cache.conf |   63 -
 8.MediaCache/2.Install_ferm/0_setup.sh        |    6 -
 8.MediaCache/2.Install_ferm/ferm.conf         |   43 -
 9.MediaVault/1.Install_MediaVault/0_setup.py  |   34 -
 9.MediaVault/1.Install_MediaVault/README      |    3 -
 .../excluded_patterns.txt                     |   12 -
 .../1.Install_MediaVault/rsync_tmbackup.sh    |  511 ------
 .../0_setup.sh                                |   55 -
 Makefile                                      |  116 ++
 README.md                                     |   46 +-
 ansible.cfg                                   |   38 +
 deprecated-conf.sh                            |   31 -
 doc/config.md                                 |  123 ++
 doc/contrib.md                                |   59 +
 doc/deploy.md                                 |  105 ++
 doc/install.md                                |  134 ++
 envsetup.py                                   |  194 --
 getenvsetup.sh                                |   68 +-
 inventories/example/group_vars/all.yml        |    9 +
 .../example/host_vars/mymediaserver.yml       |    5 +
 .../example/host_vars/mymediavault.yml        |    5 +
 .../example/host_vars/mymediaworker.yml       |    5 +
 .../example/host_vars/mynetcapture.yml        |    5 +
 inventories/example/hosts                     |   42 +
 .../local-full/host_vars/localhost.dist.yml   |    6 +
 inventories/local-full/hosts                  |   25 +
 .../local-server/host_vars/localhost.dist.yml |    6 +
 inventories/local-server/hosts                |   22 +
 .../local-vault/host_vars/localhost.dist.yml  |    6 +
 inventories/local-vault/hosts                 |    4 +
 .../local-worker/host_vars/localhost.dist.yml |    8 +
 inventories/local-worker/hosts                |    8 +
 launcher.sh                                   |  189 --
 library/nmcli.py                              | 1571 +++++++++++++++++
 library/source_file.py                        |  154 ++
 molecule/default/Dockerfile.j2                |    7 +
 molecule/default/molecule.yml                 |   54 +
 molecule/default/tests/test_000_python3.py    |   15 +
 molecule/default/tests/test_010_conf.py       |   63 +
 molecule/default/tests/test_011_init.py       |   80 +
 molecule/default/tests/test_012_postfix.py    |   63 +
 molecule/default/tests/test_013_ntp.py        |   38 +
 molecule/default/tests/test_020_nginx.py      |   32 +
 molecule/default/tests/test_021_monitor.py    |   63 +
 molecule/default/tests/test_022_postgres.py   |   38 +
 molecule/default/tests/test_030_manager.py    |   57 +
 molecule/default/tests/test_040_celerity.py   |   35 +
 molecule/default/tests/test_041_worker.py     |   30 +
 molecule/default/tests/test_050_server.py     |   57 +
 molecule/default/tests/test_060_import.py     |   84 +
 molecule/default/tests/test_070_netcapture.py |   39 +
 packer/aio.yml                                |   80 +
 packer/base.yml                               |   69 +
 packer/celerity.yml                           |   74 +
 packer/custom/example.yml                     |   84 +
 packer/files/preseed.cfg                      |   59 +
 packer/files/root.cfg                         |    8 +
 packer/manager.yml                            |   75 +
 packer/scripts/cleanup-buster.sh              |   29 +
 packer/scripts/reboot.sh                      |    5 +
 packer/scripts/root.sh                        |   14 +
 packer/scripts/upgrade-buster.sh              |   13 +
 packer/scripts/upgrade.sh                     |   12 +
 packer/scripts/yml2json                       |    8 +
 packer/server.yml                             |   76 +
 packer/worker.yml                             |   74 +
 playbooks/bench-server.yml                    |   16 +
 playbooks/bench-worker.yml                    |   16 +
 playbooks/celerity.yml                        |   14 +
 playbooks/cluster.yml                         |   16 +
 playbooks/import.yml                          |   14 +
 playbooks/includes/base.yml                   |   15 +
 playbooks/includes/celerity.yml               |   12 +
 playbooks/includes/certificates.yml           |   20 +
 playbooks/includes/check_docker.yml           |   19 +
 playbooks/includes/cluster.yml                |   12 +
 playbooks/includes/conf.yml                   |   13 +
 playbooks/includes/firewall.yml               |   13 +
 playbooks/includes/import.yml                 |   12 +
 playbooks/includes/init.yml                   |   17 +
 playbooks/includes/manager.yml                |   15 +
 playbooks/includes/monitor.yml                |   15 +
 playbooks/includes/netcapture.yml             |   12 +
 playbooks/includes/network.yml                |   14 +
 playbooks/includes/postgres.yml               |   12 +
 playbooks/includes/python.yml                 |   14 +
 playbooks/includes/server.yml                 |   15 +
 playbooks/includes/vault.yml                  |   12 +
 playbooks/includes/worker.yml                 |   12 +
 playbooks/includes/wowza.yml                  |   12 +
 playbooks/manager.yml                         |   16 +
 playbooks/migrate-debian.yml                  |  116 ++
 playbooks/monitor.yml                         |   15 +
 playbooks/netcapture.yml                      |   14 +
 playbooks/pod.yml                             |  226 +++
 playbooks/rocketchat.yml                      |   10 +
 playbooks/server.yml                          |   16 +
 playbooks/tests.yml                           |   24 +
 playbooks/upgrade.yml                         |   21 +
 playbooks/vault.yml                           |   14 +
 playbooks/worker.yml                          |   14 +
 playbooks/wowza.yml                           |   14 +
 plugins/action/source_file.py                 |   28 +
 requirements.dev.in                           |    7 +
 requirements.dev.txt                          |   92 +
 requirements.in                               |    3 +
 requirements.txt                              |   15 +
 roles/bench-server/defaults/main.yml          |   14 +
 roles/bench-server/handlers/main.yml          |   12 +
 roles/bench-server/tasks/main.yml             |   45 +
 .../templates/bench-server.service.j2         |    9 +
 .../templates/bench-streaming.conf.j2         |   13 +
 roles/bench-worker/defaults/main.yml          |   14 +
 roles/bench-worker/handlers/main.yml          |   12 +
 roles/bench-worker/tasks/main.yml             |   42 +
 .../templates/bench-worker.service.j2         |   11 +
 .../templates/mediaserver-benchmark-start.j2  |    7 +
 roles/celerity/defaults/main.yml              |   44 +
 roles/celerity/handlers/main.yml              |    8 +
 roles/celerity/tasks/main.yml                 |   43 +
 .../celerity/templates/celerity-config.py.j2  |   12 +
 roles/cluster/defaults/main.yml               |   72 +
 roles/cluster/handlers/main.yml               |   13 +
 roles/cluster/tasks/main.yml                  |  255 +++
 roles/cluster/templates/corosync.conf.j2      |   49 +
 roles/conf/defaults/main.yml                  |   22 +
 roles/conf/tasks/main.yml                     |  123 ++
 roles/fail2ban/defaults/main.yml              |   27 +
 roles/fail2ban/handlers/main.yml              |    8 +
 roles/fail2ban/tasks/main.yml                 |   42 +
 roles/fail2ban/templates/jail.local.j2        |    8 +
 roles/ferm/defaults/main.yml                  |   41 +
 roles/ferm/handlers/main.yml                  |   12 +
 roles/ferm/tasks/main.yml                     |   89 +
 roles/ferm/templates/ferm.conf.j2             |   73 +
 roles/import/defaults/main.yml                |   57 +
 .../cron.d => roles/import/files}/mediaimport |    1 -
 roles/import/files/mediaimport.py             |  163 ++
 roles/import/files/on-upload                  |    3 +
 roles/import/files/on-upload.go               |  141 ++
 roles/import/handlers/main.yml                |   26 +
 roles/import/tasks/main.yml                   |  177 ++
 roles/import/templates/mediaimport.json.j2    |   15 +
 roles/import/templates/sftp_config.j2         |   26 +
 roles/init/defaults/main.yml                  |    8 +
 roles/init/tasks/main.yml                     |   15 +
 roles/letsencrypt/defaults/main.yml           |    8 +
 roles/letsencrypt/handlers/main.yml           |    8 +
 roles/letsencrypt/tasks/main.yml              |  123 ++
 roles/locale/defaults/main.yml                |   11 +
 roles/locale/handlers/main.yml                |   11 +
 roles/locale/tasks/main.yml                   |   34 +
 roles/manager/defaults/main.yml               |   47 +
 roles/manager/files/set_site_url.py           |   27 +
 roles/manager/handlers/main.yml               |   13 +
 roles/manager/tasks/main.yml                  |   93 +
 roles/monitor/defaults/main.yml               |   38 +
 roles/monitor/handlers/main.yml               |    8 +
 roles/monitor/tasks/main.yml                  |   70 +
 roles/netcapture/defaults/main.yml            |   14 +
 roles/netcapture/tasks/main.yml               |   73 +
 roles/netcapture/templates/miris-api.json.j2  |    4 +
 roles/netcapture/templates/netcapture.json.j2 |   10 +
 roles/network/defaults/main.yml               |   18 +
 roles/network/tasks/main.yml                  |   63 +
 roles/nginx/defaults/main.yml                 |   13 +
 roles/nginx/handlers/main.yml                 |    8 +
 roles/nginx/tasks/_certs.yml                  |   41 +
 roles/nginx/tasks/_config.yml                 |   12 +
 roles/nginx/tasks/_install.yml                |   16 +
 roles/nginx/tasks/main.yml                    |   13 +
 roles/ntp/defaults/main.yml                   |    5 +
 roles/ntp/handlers/main.yml                   |   12 +
 roles/ntp/tasks/main.yml                      |   48 +
 roles/ntp/templates/ntp.conf.j2               |   26 +
 roles/postfix/defaults/main.yml               |   15 +
 roles/postfix/handlers/main.yml               |   20 +
 roles/postfix/tasks/main.yml                  |   86 +
 .../postfix/templates/main.cf.j2              |   19 +-
 roles/postgres/defaults/main.yml              |   18 +
 roles/postgres/tasks/main.yml                 |   53 +
 roles/proxy/defaults/main.yml                 |   14 +
 roles/proxy/tasks/main.yml                    |   57 +
 roles/python/tasks/main.yml                   |   13 +
 roles/repos/defaults/main.yml                 |   11 +
 roles/repos/handlers/main.yml                 |    6 +
 roles/repos/tasks/main.yml                    |   36 +
 roles/rocketchat/.editorconfig                |    8 +
 roles/rocketchat/.gitignore                   |    1 +
 roles/rocketchat/.yamllint                    |   11 +
 roles/rocketchat/README.md                    |   41 +
 roles/rocketchat/defaults/main.yml            |   60 +
 roles/rocketchat/handlers/main.yml            |   20 +
 roles/rocketchat/meta/.galaxy_install_info    |    2 +
 roles/rocketchat/meta/main.yml                |   22 +
 .../rocketchat/molecule/default/Dockerfile.j2 |   14 +
 .../rocketchat/molecule/default/molecule.yml  |   23 +
 .../rocketchat/molecule/default/playbook.yml  |    5 +
 .../molecule/default/tests/test_default.py    |   95 +
 roles/rocketchat/requirements.dev.in          |    6 +
 roles/rocketchat/requirements.dev.txt         |  109 ++
 roles/rocketchat/requirements.in              |    1 +
 roles/rocketchat/requirements.txt             |   15 +
 roles/rocketchat/tasks/main.yml               |  162 ++
 .../templates/rocketchat.service.j2           |   15 +
 roles/server/defaults/main.yml                |   60 +
 roles/server/handlers/main.yml                |   12 +
 roles/server/tasks/main.yml                   |  128 ++
 roles/server/templates/celerity-config.py.j2  |   12 +
 roles/sysutils/defaults/main.yml              |   50 +
 roles/sysutils/tasks/main.yml                 |   21 +
 roles/users/defaults/main.yml                 |   15 +
 .../bashrc => roles/users/files/.bashrc       |    0
 .../vimrc => roles/users/files/.vimrc         |    0
 .../users/files}/ubicast_support.pub          |    0
 roles/users/handlers/main.yml                 |    8 +
 roles/users/tasks/main.yml                    |   66 +
 roles/vault/defaults/main.yml                 |   58 +
 roles/vault/handlers/main.yml                 |    7 +
 roles/vault/tasks/main.yml                    |  104 ++
 .../vault/templates/systemd-backup-service.j2 |    9 +
 roles/vault/templates/systemd-backup-timer.j2 |    9 +
 .../vault/templates/systemd-mailer-script.j2  |   11 +
 .../vault/templates/systemd-mailer-service.j2 |    8 +
 roles/worker/defaults/main.yml                |   31 +
 roles/worker/handlers/main.yml                |    8 +
 roles/worker/tasks/main.yml                   |   33 +
 roles/worker/templates/celerity-config.py.j2  |   14 +
 roles/wowza/defaults/main.yml                 |   72 +
 roles/wowza/handlers/main.yml                 |   13 +
 roles/wowza/tasks/main.yml                    |  122 ++
 roles/wowza/templates/Server.xml.j2           |  113 ++
 roles/wowza/templates/Tune.xml.j2             |   11 +
 roles/wowza/templates/VHost.xml.j2            |  338 ++++
 .../wowza/templates/live-application.xml.j2   |   50 +-
 setup.cfg                                     |   34 -
 setup.py                                      |    6 -
 site.yml                                      |   68 +
 tests/test_ssl.py                             |    7 +-
 308 files changed, 10197 insertions(+), 3475 deletions(-)
 create mode 100644 .ansible-lint
 create mode 100644 .devcontainer/Dockerfile.root
 create mode 100644 .flake8
 create mode 100644 .gitattributes
 create mode 100644 .yamllint
 delete mode 100644 1.Base/1.Utilities/0_setup.py
 delete mode 100644 1.Base/2.ubicast_shell_access/0_setup.py
 delete mode 100644 1.Base/3.admin_shell_account/0_setup.py
 delete mode 100644 1.Base/4.cockpit/0_setup.py
 delete mode 100644 10.MediaImport/2.Install_FTP_watch_folder/0_setup.py
 delete mode 100755 10.MediaImport/2.Install_FTP_watch_folder/mediaimport_create_ftp_user.sh
 delete mode 100755 10.MediaImport/2.Install_FTP_watch_folder/mediaimport_remove_ftp_user.sh
 delete mode 100755 10.MediaImport/2.Install_FTP_watch_folder/on_ftp_upload.py
 delete mode 100644 10.MediaImport/2.Install_FTP_watch_folder/pure-ftpd-common
 delete mode 100644 10.MediaImport/2.Install_FTP_watch_folder/readme.txt
 delete mode 100755 10.MediaImport/2.Install_FTP_watch_folder/remove_empty_dirs.py
 delete mode 100755 11.VM/1.Export_VM/0_setup.sh
 delete mode 100755 11.VM/2.Export_VM_local/0_setup.sh
 delete mode 100644 12.Netcapture/1.Install_Netcapture/0_setup.py
 delete mode 100644 13.Demokit/1.Deploy_demokit/0_setup.py
 delete mode 100755 13.Demokit/1.Deploy_demokit/mirisconf/recorderd/room-a.json
 delete mode 100755 13.Demokit/1.Deploy_demokit/publish_zip_by_url.py
 delete mode 100644 13.Demokit/2.Reset_demokit/0_setup.py
 delete mode 100644 13.Demokit/3.Generate_SSL_certificate/0_setup.py
 delete mode 100755 14.Dell/1.Dell_openmanage/0_setup.sh
 delete mode 100644 2.Common_services/1.Postfix/0_setup.py
 delete mode 100644 2.Common_services/2.NTP/0_setup.py
 delete mode 100644 2.Common_services/2.NTP/ntp.conf
 delete mode 100644 2.Common_services/3.PostgreSQL/0_setup.py
 delete mode 100644 2.Common_services/4.Wowza/0_setup.py
 delete mode 100644 2.Common_services/4.Wowza/Proxy.xml
 delete mode 100755 2.Common_services/4.Wowza/Tune.xml
 delete mode 100644 2.Common_services/5.Nginx/0_setup.py
 delete mode 100755 2.Common_services/6.Munin/0_setup.sh
 delete mode 100644 2.Common_services/7.LetsEncrypt/0_setup.py
 delete mode 100755 2.Common_services/7.LetsEncrypt/hook_mkdir.sh
 delete mode 100755 2.Common_services/7.LetsEncrypt/hook_reload.sh
 delete mode 100644 2.Common_services/8.Fail2ban/0_setup.py
 delete mode 100644 2.Common_services/8.Fail2ban/filter.d/mediaserver.conf
 delete mode 100644 2.Common_services/8.Fail2ban/jail.d/mediaserver.conf
 delete mode 100644 2.Common_services/8.Fail2ban/jail.d/mirismanager.conf
 delete mode 100644 2.Common_services/8.Fail2ban/jail.d/monitor.conf
 delete mode 100755 20.Envsetup_dev/1.Shell_fake_action/0_setup.sh
 delete mode 100644 20.Envsetup_dev/2.Python_fake_action/0_setup.py
 delete mode 100644 3.New_server_deployment/1.Download_envsetup_config/0_setup.py
 delete mode 100755 3.New_server_deployment/1.Download_envsetup_config/fill_empty_conf.sh
 delete mode 100644 3.New_server_deployment/2.Proxy_settings/0_setup.py
 delete mode 100755 3.New_server_deployment/3.APT_upgrade/0_setup.sh
 delete mode 100644 3.New_server_deployment/3.APT_upgrade/sources-deb.list
 delete mode 100644 3.New_server_deployment/3.APT_upgrade/sources-ubu.list
 delete mode 100755 4.Monitor/1.Install_monitor/0_setup.sh
 delete mode 100755 5.MediaServer/1.Install_MediaServer/0_setup.sh
 delete mode 100644 5.MediaServer/2.Bench_tools/0_setup.sh
 delete mode 100755 6.Miris_Manager/1.Install_Miris_Manager/0_setup.sh
 delete mode 100644 6.Miris_Manager/2.Configure_apt_cacher_ng/0_setup.py
 delete mode 100644 7.MediaWorker/1.Celerity_server/0_setup.py
 delete mode 100644 7.MediaWorker/1.Celerity_server/celerity-config.py
 delete mode 100644 7.MediaWorker/2.Celerity_workers/0_setup.py
 delete mode 100644 7.MediaWorker/2.Celerity_workers/celerity-config.py
 delete mode 100644 8.MediaCache/1.Install_cache/0_setup.py
 delete mode 100644 8.MediaCache/1.Install_cache/crossdomain.xml
 delete mode 100644 8.MediaCache/1.Install_cache/index.html
 delete mode 100644 8.MediaCache/1.Install_cache/nginx-limits.conf
 delete mode 100644 8.MediaCache/1.Install_cache/vhost_cache.conf
 delete mode 100755 8.MediaCache/2.Install_ferm/0_setup.sh
 delete mode 100644 8.MediaCache/2.Install_ferm/ferm.conf
 delete mode 100644 9.MediaVault/1.Install_MediaVault/0_setup.py
 delete mode 100644 9.MediaVault/1.Install_MediaVault/README
 delete mode 100644 9.MediaVault/1.Install_MediaVault/excluded_patterns.txt
 delete mode 100644 9.MediaVault/1.Install_MediaVault/rsync_tmbackup.sh
 delete mode 100755 9.MediaVault/2.Install_MediaVault_Burp_deprecated/0_setup.sh
 create mode 100644 Makefile
 create mode 100644 ansible.cfg
 delete mode 100644 deprecated-conf.sh
 create mode 100644 doc/config.md
 create mode 100644 doc/contrib.md
 create mode 100644 doc/deploy.md
 create mode 100644 doc/install.md
 delete mode 100755 envsetup.py
 create mode 100644 inventories/example/group_vars/all.yml
 create mode 100644 inventories/example/host_vars/mymediaserver.yml
 create mode 100644 inventories/example/host_vars/mymediavault.yml
 create mode 100644 inventories/example/host_vars/mymediaworker.yml
 create mode 100644 inventories/example/host_vars/mynetcapture.yml
 create mode 100644 inventories/example/hosts
 create mode 100644 inventories/local-full/host_vars/localhost.dist.yml
 create mode 100644 inventories/local-full/hosts
 create mode 100644 inventories/local-server/host_vars/localhost.dist.yml
 create mode 100644 inventories/local-server/hosts
 create mode 100644 inventories/local-vault/host_vars/localhost.dist.yml
 create mode 100644 inventories/local-vault/hosts
 create mode 100644 inventories/local-worker/host_vars/localhost.dist.yml
 create mode 100644 inventories/local-worker/hosts
 delete mode 100755 launcher.sh
 create mode 100644 library/nmcli.py
 create mode 100644 library/source_file.py
 create mode 100644 molecule/default/Dockerfile.j2
 create mode 100644 molecule/default/molecule.yml
 create mode 100644 molecule/default/tests/test_000_python3.py
 create mode 100644 molecule/default/tests/test_010_conf.py
 create mode 100644 molecule/default/tests/test_011_init.py
 create mode 100644 molecule/default/tests/test_012_postfix.py
 create mode 100644 molecule/default/tests/test_013_ntp.py
 create mode 100644 molecule/default/tests/test_020_nginx.py
 create mode 100644 molecule/default/tests/test_021_monitor.py
 create mode 100644 molecule/default/tests/test_022_postgres.py
 create mode 100644 molecule/default/tests/test_030_manager.py
 create mode 100644 molecule/default/tests/test_040_celerity.py
 create mode 100644 molecule/default/tests/test_041_worker.py
 create mode 100644 molecule/default/tests/test_050_server.py
 create mode 100644 molecule/default/tests/test_060_import.py
 create mode 100644 molecule/default/tests/test_070_netcapture.py
 create mode 100644 packer/aio.yml
 create mode 100644 packer/base.yml
 create mode 100644 packer/celerity.yml
 create mode 100644 packer/custom/example.yml
 create mode 100644 packer/files/preseed.cfg
 create mode 100644 packer/files/root.cfg
 create mode 100644 packer/manager.yml
 create mode 100644 packer/scripts/cleanup-buster.sh
 create mode 100644 packer/scripts/reboot.sh
 create mode 100644 packer/scripts/root.sh
 create mode 100644 packer/scripts/upgrade-buster.sh
 create mode 100644 packer/scripts/upgrade.sh
 create mode 100755 packer/scripts/yml2json
 create mode 100644 packer/server.yml
 create mode 100644 packer/worker.yml
 create mode 100755 playbooks/bench-server.yml
 create mode 100755 playbooks/bench-worker.yml
 create mode 100755 playbooks/celerity.yml
 create mode 100755 playbooks/cluster.yml
 create mode 100755 playbooks/import.yml
 create mode 100755 playbooks/includes/base.yml
 create mode 100755 playbooks/includes/celerity.yml
 create mode 100755 playbooks/includes/certificates.yml
 create mode 100755 playbooks/includes/check_docker.yml
 create mode 100755 playbooks/includes/cluster.yml
 create mode 100755 playbooks/includes/conf.yml
 create mode 100755 playbooks/includes/firewall.yml
 create mode 100755 playbooks/includes/import.yml
 create mode 100755 playbooks/includes/init.yml
 create mode 100755 playbooks/includes/manager.yml
 create mode 100755 playbooks/includes/monitor.yml
 create mode 100755 playbooks/includes/netcapture.yml
 create mode 100755 playbooks/includes/network.yml
 create mode 100755 playbooks/includes/postgres.yml
 create mode 100755 playbooks/includes/python.yml
 create mode 100755 playbooks/includes/server.yml
 create mode 100755 playbooks/includes/vault.yml
 create mode 100755 playbooks/includes/worker.yml
 create mode 100755 playbooks/includes/wowza.yml
 create mode 100755 playbooks/manager.yml
 create mode 100755 playbooks/migrate-debian.yml
 create mode 100755 playbooks/monitor.yml
 create mode 100755 playbooks/netcapture.yml
 create mode 100755 playbooks/pod.yml
 create mode 100755 playbooks/rocketchat.yml
 create mode 100755 playbooks/server.yml
 create mode 100755 playbooks/tests.yml
 create mode 100755 playbooks/upgrade.yml
 create mode 100755 playbooks/vault.yml
 create mode 100755 playbooks/worker.yml
 create mode 100755 playbooks/wowza.yml
 create mode 100644 plugins/action/source_file.py
 create mode 100644 requirements.dev.in
 create mode 100644 requirements.dev.txt
 create mode 100644 requirements.in
 create mode 100644 requirements.txt
 create mode 100644 roles/bench-server/defaults/main.yml
 create mode 100644 roles/bench-server/handlers/main.yml
 create mode 100644 roles/bench-server/tasks/main.yml
 create mode 100644 roles/bench-server/templates/bench-server.service.j2
 create mode 100644 roles/bench-server/templates/bench-streaming.conf.j2
 create mode 100644 roles/bench-worker/defaults/main.yml
 create mode 100644 roles/bench-worker/handlers/main.yml
 create mode 100644 roles/bench-worker/tasks/main.yml
 create mode 100644 roles/bench-worker/templates/bench-worker.service.j2
 create mode 100755 roles/bench-worker/templates/mediaserver-benchmark-start.j2
 create mode 100644 roles/celerity/defaults/main.yml
 create mode 100644 roles/celerity/handlers/main.yml
 create mode 100644 roles/celerity/tasks/main.yml
 create mode 100644 roles/celerity/templates/celerity-config.py.j2
 create mode 100644 roles/cluster/defaults/main.yml
 create mode 100644 roles/cluster/handlers/main.yml
 create mode 100644 roles/cluster/tasks/main.yml
 create mode 100644 roles/cluster/templates/corosync.conf.j2
 create mode 100644 roles/conf/defaults/main.yml
 create mode 100644 roles/conf/tasks/main.yml
 create mode 100644 roles/fail2ban/defaults/main.yml
 create mode 100644 roles/fail2ban/handlers/main.yml
 create mode 100644 roles/fail2ban/tasks/main.yml
 create mode 100644 roles/fail2ban/templates/jail.local.j2
 create mode 100644 roles/ferm/defaults/main.yml
 create mode 100644 roles/ferm/handlers/main.yml
 create mode 100644 roles/ferm/tasks/main.yml
 create mode 100644 roles/ferm/templates/ferm.conf.j2
 create mode 100644 roles/import/defaults/main.yml
 rename {10.MediaImport/2.Install_FTP_watch_folder/cron.d => roles/import/files}/mediaimport (99%)
 create mode 100644 roles/import/files/mediaimport.py
 create mode 100755 roles/import/files/on-upload
 create mode 100644 roles/import/files/on-upload.go
 create mode 100644 roles/import/handlers/main.yml
 create mode 100644 roles/import/tasks/main.yml
 create mode 100644 roles/import/templates/mediaimport.json.j2
 create mode 100644 roles/import/templates/sftp_config.j2
 create mode 100644 roles/init/defaults/main.yml
 create mode 100644 roles/init/tasks/main.yml
 create mode 100644 roles/letsencrypt/defaults/main.yml
 create mode 100644 roles/letsencrypt/handlers/main.yml
 create mode 100644 roles/letsencrypt/tasks/main.yml
 create mode 100644 roles/locale/defaults/main.yml
 create mode 100644 roles/locale/handlers/main.yml
 create mode 100644 roles/locale/tasks/main.yml
 create mode 100644 roles/manager/defaults/main.yml
 create mode 100644 roles/manager/files/set_site_url.py
 create mode 100644 roles/manager/handlers/main.yml
 create mode 100644 roles/manager/tasks/main.yml
 create mode 100644 roles/monitor/defaults/main.yml
 create mode 100644 roles/monitor/handlers/main.yml
 create mode 100644 roles/monitor/tasks/main.yml
 create mode 100644 roles/netcapture/defaults/main.yml
 create mode 100644 roles/netcapture/tasks/main.yml
 create mode 100644 roles/netcapture/templates/miris-api.json.j2
 create mode 100644 roles/netcapture/templates/netcapture.json.j2
 create mode 100644 roles/network/defaults/main.yml
 create mode 100644 roles/network/tasks/main.yml
 create mode 100644 roles/nginx/defaults/main.yml
 create mode 100644 roles/nginx/handlers/main.yml
 create mode 100644 roles/nginx/tasks/_certs.yml
 create mode 100644 roles/nginx/tasks/_config.yml
 create mode 100644 roles/nginx/tasks/_install.yml
 create mode 100644 roles/nginx/tasks/main.yml
 create mode 100644 roles/ntp/defaults/main.yml
 create mode 100644 roles/ntp/handlers/main.yml
 create mode 100644 roles/ntp/tasks/main.yml
 create mode 100644 roles/ntp/templates/ntp.conf.j2
 create mode 100644 roles/postfix/defaults/main.yml
 create mode 100644 roles/postfix/handlers/main.yml
 create mode 100644 roles/postfix/tasks/main.yml
 rename 2.Common_services/1.Postfix/main.cf => roles/postfix/templates/main.cf.j2 (66%)
 create mode 100644 roles/postgres/defaults/main.yml
 create mode 100644 roles/postgres/tasks/main.yml
 create mode 100644 roles/proxy/defaults/main.yml
 create mode 100644 roles/proxy/tasks/main.yml
 create mode 100644 roles/python/tasks/main.yml
 create mode 100644 roles/repos/defaults/main.yml
 create mode 100644 roles/repos/handlers/main.yml
 create mode 100644 roles/repos/tasks/main.yml
 create mode 100644 roles/rocketchat/.editorconfig
 create mode 100644 roles/rocketchat/.gitignore
 create mode 100644 roles/rocketchat/.yamllint
 create mode 100644 roles/rocketchat/README.md
 create mode 100644 roles/rocketchat/defaults/main.yml
 create mode 100644 roles/rocketchat/handlers/main.yml
 create mode 100644 roles/rocketchat/meta/.galaxy_install_info
 create mode 100644 roles/rocketchat/meta/main.yml
 create mode 100644 roles/rocketchat/molecule/default/Dockerfile.j2
 create mode 100644 roles/rocketchat/molecule/default/molecule.yml
 create mode 100644 roles/rocketchat/molecule/default/playbook.yml
 create mode 100644 roles/rocketchat/molecule/default/tests/test_default.py
 create mode 100644 roles/rocketchat/requirements.dev.in
 create mode 100644 roles/rocketchat/requirements.dev.txt
 create mode 100644 roles/rocketchat/requirements.in
 create mode 100644 roles/rocketchat/requirements.txt
 create mode 100644 roles/rocketchat/tasks/main.yml
 create mode 100644 roles/rocketchat/templates/rocketchat.service.j2
 create mode 100644 roles/server/defaults/main.yml
 create mode 100644 roles/server/handlers/main.yml
 create mode 100644 roles/server/tasks/main.yml
 create mode 100644 roles/server/templates/celerity-config.py.j2
 create mode 100644 roles/sysutils/defaults/main.yml
 create mode 100644 roles/sysutils/tasks/main.yml
 create mode 100644 roles/users/defaults/main.yml
 rename 1.Base/1.Utilities/bashrc => roles/users/files/.bashrc (100%)
 rename 1.Base/1.Utilities/vimrc => roles/users/files/.vimrc (100%)
 rename {1.Base/2.ubicast_shell_access => roles/users/files}/ubicast_support.pub (100%)
 create mode 100644 roles/users/handlers/main.yml
 create mode 100644 roles/users/tasks/main.yml
 create mode 100644 roles/vault/defaults/main.yml
 create mode 100644 roles/vault/handlers/main.yml
 create mode 100644 roles/vault/tasks/main.yml
 create mode 100644 roles/vault/templates/systemd-backup-service.j2
 create mode 100644 roles/vault/templates/systemd-backup-timer.j2
 create mode 100644 roles/vault/templates/systemd-mailer-script.j2
 create mode 100644 roles/vault/templates/systemd-mailer-service.j2
 create mode 100644 roles/worker/defaults/main.yml
 create mode 100644 roles/worker/handlers/main.yml
 create mode 100644 roles/worker/tasks/main.yml
 create mode 100644 roles/worker/templates/celerity-config.py.j2
 create mode 100644 roles/wowza/defaults/main.yml
 create mode 100644 roles/wowza/handlers/main.yml
 create mode 100644 roles/wowza/tasks/main.yml
 create mode 100644 roles/wowza/templates/Server.xml.j2
 create mode 100644 roles/wowza/templates/Tune.xml.j2
 create mode 100644 roles/wowza/templates/VHost.xml.j2
 rename 2.Common_services/4.Wowza/live-application.xml => roles/wowza/templates/live-application.xml.j2 (61%)
 delete mode 100644 setup.cfg
 delete mode 100644 setup.py
 create mode 100755 site.yml

diff --git a/.ansible-lint b/.ansible-lint
new file mode 100644
index 00000000..c81cf5b7
--- /dev/null
+++ b/.ansible-lint
@@ -0,0 +1,3 @@
+---
+
+...
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index c7cb68b0..248bb8cc 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -1,26 +1,24 @@
 FROM registry.ubicast.net/docker/debian-dev:latest
 
 # avoid warnings by switching to noninteractive
-ENV DEBIAN_FRONTEND=noninteractive
+ENV DEBIAN_FRONTEND noninteractive
 # local pyvenv to avoid conflicts with system
 ENV PYVENV ${HOME}/pyvenv
+# go path
+ENV GOPATH ${HOME}/go
 # add pyvenv to path
-ENV PATH ${PYVENV}/bin:${PATH}
+ENV PATH ${GOPATH}/bin:${PYVENV}/bin:/usr/local/go/bin:${PATH}
 
 RUN \
     # install required tools
     sudo apt-get update && \
     sudo apt-get install -y \
-        bsd-mailx \
-        python3-defusedxml \
-        python3-dnspython \
-        python3-openssl \
-        python3-psutil \
-        python3-psycopg2 \
-        python3-pydbus \
-        python3-requests \
-        python3-spf \
-        && \
+        libffi-dev \
+        libncurses5 \
+        libncursesw5 \
+        libssl-dev \
+        python3-netaddr \
+    && \
     # clean up
     sudo apt-get autoremove -y && \
     sudo apt-get clean -y && \
@@ -31,18 +29,58 @@ RUN \
     pip install -U \
         pip \
         wheel \
-    && \
+        && \
+    # import hashicorp gpg key
+    sudo gpg --keyserver hkp://eu.pool.sks-keyservers.net --recv-key 51852D87348FFC4C && \
+    :
+
+ARG PACKER_VERSION=1.4.5
+RUN \
+    # packer
+    sudo curl -LOSs https://releases.hashicorp.com/packer/${PACKER_VERSION}/packer_${PACKER_VERSION}_linux_amd64.zip && \
+    sudo curl -LOSs https://releases.hashicorp.com/packer/${PACKER_VERSION}/packer_${PACKER_VERSION}_SHA256SUMS && \
+    sudo curl -LOSs https://releases.hashicorp.com/packer/${PACKER_VERSION}/packer_${PACKER_VERSION}_SHA256SUMS.sig && \
+    sudo gpg --verify packer_${PACKER_VERSION}_SHA256SUMS.sig packer_${PACKER_VERSION}_SHA256SUMS && \
+    sudo shasum -a 256 -c packer_${PACKER_VERSION}_SHA256SUMS --ignore-missing && \
+    sudo unzip -d /usr/local/bin packer_${PACKER_VERSION}_linux_amd64.zip && \
+    sudo rm -f packer_${PACKER_VERSION}* && \
     :
 
+ARG GO_VERSION=1.13.4
 RUN \
-    # dev requirements
-    pip install \
-        black \
-        flake8 \
-        pylint \
-        pysnooper \
+    # golang
+    sudo curl -LOSs https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz && \
+    sudo tar -C /usr/local/ -xzf go${GO_VERSION}.linux-amd64.tar.gz && \
+    sudo rm -f go${GO_VERSION}* && \
+    go get \
+        github.com/mdempsky/gocode \
+        github.com/uudashr/gopkgs/cmd/gopkgs \
+        github.com/ramya-rao-a/go-outline \
+        github.com/stamblerre/gocode \
+        github.com/rogpeppe/godef \
+        github.com/sqs/goreturns \
+        golang.org/x/lint/golint \
     && \
     :
 
+COPY requirements.dev.txt .
+RUN \
+    # ansible & co
+    pip install -r requirements.dev.txt && \
+    sudo rm requirements.dev.txt && \
+    :
+
+ARG OVFTOOL_VERSION=4.3.0-13981069
+ARG OVFTOOL_NAME=VMware-ovftool-${OVFTOOL_VERSION}-lin.x86_64
+ARG OVFTOOL_URL=https://nextcloud.ubicast.net/s/LEcyMWG9BnKsrHX/download?path=%2FVMware%20Tools&files=${OVFTOOL_NAME}
+RUN \
+    # ovf tool
+    sudo curl -o ${OVFTOOL_NAME}.bundle -LSs "${OVFTOOL_URL}.bundle" && \
+    sudo curl -o ${OVFTOOL_NAME}.sha256 -LSs "${OVFTOOL_URL}.sha256" && \
+    sudo shasum -a 256 -c ${OVFTOOL_NAME}.sha256 --ignore-missing && \
+    sudo sh ${OVFTOOL_NAME}.bundle -p /usr/local --console --required --eulas-agreed && \
+    sudo rm ${OVFTOOL_NAME}.* && \
+    :
+
 # switch back to dialog for any ad-hoc use of apt-get
-ENV DEBIAN_FRONTEND=dialog
+ENV DEBIAN_FRONTEND dialog
diff --git a/.devcontainer/Dockerfile.root b/.devcontainer/Dockerfile.root
new file mode 100644
index 00000000..1205d0dd
--- /dev/null
+++ b/.devcontainer/Dockerfile.root
@@ -0,0 +1,5 @@
+FROM registry.ubicast.net/sys/envsetup
+
+ENV HOME /root
+
+USER root
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 870edb65..40321bc5 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -4,11 +4,37 @@
     "service": "app",
     "workspaceFolder": "/workspace",
     "extensions": [
-      // python
+      "vscoss.vscode-ansible",
+      "redhat.vscode-yaml",
+      "samuelcolvin.jinjahtml",
       "ms-python.python",
+      "ms-vscode.go"
+    ],
+    "vim-extensions": [
+      "deoplete-plugins/deoplete-jedi",
+      "psf/black"
     ],
     "settings": {
-        "python.pythonPath": "/home/vscode/pyvenv/bin/python",
+        "files.associations": {
+            "**/requirements.in/*.txt": "pip-requirements",
+            "requirements.*.txt": "pip-requirements",
+            "**/defaults/**/*": "ansible",
+            "**/tasks/**/*.yml": "ansible",
+            "**/handler/*.yml": "ansible",
+            "**/*_vars/**/*.yml": "ansible",
+            "**/roles/**/*.yml": "ansible",
+            "**/role.d/**/*.yml": "ansible",
+            "**/playbooks/**/*.yml": "ansible",
+            "**/playbook.d/**/*.yml": "ansible",
+            "**/*ansible*/**/*.yml": "ansible",
+            "**/vars/**/*.yml": "ansible",
+            "**/inventories/**/hosts": "ini",
+            "**/inventories/**/*": "ansible",
+            "**/inventory/**/*": "ansible",
+            "**/inventory.d/**/*": "ansible"
+        },
+        "python.pythonPath": "/home/code/pyvenv/bin/python",
         "python.formatting.provider": "black",
-    },
+        "python.linting.flake8Enabled": true
+    }
 }
diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml
index b17312a6..3ad46327 100644
--- a/.devcontainer/docker-compose.yml
+++ b/.devcontainer/docker-compose.yml
@@ -7,13 +7,19 @@ services:
     build:
       context: ".."
       dockerfile: ".devcontainer/Dockerfile"
-    user: "vscode"
+    user: "code"
     volumes:
-      - "~/.config/git:/home/vscode/.config/git:ro"
-      - "~/.ssh:/home/vscode/.ssh:ro"
-      - "${SSH_AUTH_SOCK}:/ssh-agent:ro"
-      - "/var/run/docker.sock:/var/run/docker.sock"
+      # workspace
       - "..:/workspace"
+      # vim config
+      - "$HOME/.config/nvim:/home/code/.config/nvim:ro"
+      # git config
+      - "$HOME/.config/git:/home/code/.config/git:ro"
+      # ssh config
+      - "$HOME/.ssh:/home/code/.ssh:ro"
+      - "$SSH_AUTH_SOCK:/ssh-agent:ro"
+      # docker socket
+      - "/var/run/docker.sock:/var/run/docker.sock"
     environment:
       - "SSH_AUTH_SOCK=/ssh-agent"
     working_dir: "/workspace"
diff --git a/.editorconfig b/.editorconfig
index 3c44241c..b7c4f15b 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -2,8 +2,31 @@ root = true
 
 [*]
 indent_style = space
-indent_size = 4
+indent_size = 2
 end_of_line = lf
 charset = utf-8
 trim_trailing_whitespace = true
 insert_final_newline = true
+
+[*.md]
+trim_trailing_whitespace = false
+
+[Makefile]
+indent_style = tab
+indent_size = 4
+
+[*.go]
+indent_style = tab
+indent_size = 4
+
+[*.py]
+indent_size = 4
+
+[*.sh]
+indent_size = 4
+
+[Dockerfile]
+indent_size = 4
+
+[*.pem]
+insert_final_newline = false
diff --git a/.flake8 b/.flake8
new file mode 100644
index 00000000..1059d326
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,12 @@
+[flake8]
+
+ignore =
+    E501
+    E265
+    W503
+    W505
+
+per-file-ignores =
+    roles/manager/files/set_site_url.py:E402
+    library/*:E402
+    library/nmcli.py:E402,F401
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000..d80a9fe1
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+roles/import/files/on-upload filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
index 9f45b3fc..413bcdf5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,45 +1,42 @@
-*.py[cod]
-
-# C extensions
-*.so
-
-# Packages
-*.egg
-*.egg-info
-dist
-build
-eggs
-parts
-bin
-var
-sdist
-develop-eggs
-.installed.cfg
-lib
-lib64
-__pycache__
-
-# Installer logs
-pip-log.txt
-
-# Unit test / coverage reports
-.coverage
-.tox
-nosetests.xml
+# virtualenv
+.venv/
 
-# Envsetup
+# python
+__pycache__/
+*.pyc
+
+# ansible
+inventories/*
+!inventories/example
+!inventories/local*
+inventories/local*/host_vars/localhost.yml
+playbooks/_*
+roles/_*
+logs/
+
+# packer
+packer_cache/
+output/
+packer/custom/*
+!packer/custom/example.yml
+
+# ide
+.vscode/
+*.code-workspace
+.idea/
+*.sublime-workspace
+*.sublime-project
+
+# secrets
+.env/*
+!.env/_reset
+!.env/_config
+!.env/*example
+!.env/example
+
+# envsetup
 conf*.sh
 auto-generated-conf.sh
 log*.txt
 tests/ms-testing-suite
 *.log
-
-# virtualenv
-.venv/
-venv/
-
-# cache
-.mypy_cache/
-
-# settings
-.vscode/
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 8e9ac78f..71197610 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,10 +1,79 @@
-flake8:
-  image: python:3-alpine
+---
+
+image: registry.ubicast.net/mediaserver/envsetup
+
+stages:
+  - docker
+  - lint
+  - test
+
+before_script:
+  - make requirements-dev
+
+docker:build:
+  image: docker:stable
+  stage: docker
   tags:
     - docker
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME == "master"'
+      changes:
+        - .devcontainer/Dockerfile
+        - requirements.dev.txt
   before_script:
-    - python -m pip install --upgrade pip
-    - pip3 install flake8
+    - apk add make
+    - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN registry.ubicast.net
   script:
-    # Run flake8 (pycodestyle + pyflakes) check.
-    - flake8 .
+    - make docker-build
+    - make docker-push
+
+docker:rebuild:
+  image: docker:stable
+  stage: docker
+  tags:
+    - docker
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "schedule"'
+    - if: '$CI_PIPELINE_SOURCE == "web"'
+  before_script:
+    - apk add make
+    - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN registry.ubicast.net
+  script:
+    - make docker-build
+    - make docker-push
+
+lint:
+  image: registry.ubicast.net/mediaserver/envsetup
+  stage: lint
+  tags:
+    - docker
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "web"'
+    - if: '$CI_PIPELINE_SOURCE == "merge_requests"'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME == "master"'
+      changes:
+        - "*.yml"
+        - "*.py"
+  script:
+    - make lint
+
+test:
+  image: registry.ubicast.net/mediaserver/envsetup:root
+  stage: test
+  tags:
+    - docker
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "web"'
+    - if: '$CI_PIPELINE_SOURCE == "merge_requests"'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME == "master"'
+      changes:
+        - inventories/**/*
+        - library/**/*
+        - molecule/**/*
+        - playbooks/**/*
+        - plugins/**/*
+        - roles/**/*
+  script:
+    - make test
+
+...
diff --git a/.yamllint b/.yamllint
new file mode 100644
index 00000000..faf11063
--- /dev/null
+++ b/.yamllint
@@ -0,0 +1,23 @@
+---
+
+extends: default
+
+ignore: |
+  .venv/
+
+rules:
+  braces:
+    min-spaces-inside-empty: 0
+    max-spaces-inside-empty: 0
+    min-spaces-inside: 1
+    max-spaces-inside: 1
+    level: error
+  brackets:
+    min-spaces-inside-empty: 0
+    max-spaces-inside-empty: 0
+    min-spaces-inside: 1
+    max-spaces-inside: 1
+    level: error
+  line-length: disable
+
+...
diff --git a/1.Base/1.Utilities/0_setup.py b/1.Base/1.Utilities/0_setup.py
deleted file mode 100644
index f31d25c5..00000000
--- a/1.Base/1.Utilities/0_setup.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python3
-
-import utils
-
-packages = [
-    "apt-utils",
-    "bmon",
-    "curl",
-    "gawk",
-    "git",
-    "host",
-    "htop",
-    "iotop",
-    "ipython3",
-    "lm-sensors",
-    "make",
-    "net-tools",
-    "netcat",
-    "nfs-client",
-    "pciutils",
-    "pwgen",
-    "python3-openssl",
-    "python3-psutil",
-    "python3-requests",
-    "python3-spf",
-    "rsync",
-    "smartmontools",
-    "sudo",
-    "vim",
-]
-
-
-def setup(interactive=True):
-    dir_path = utils.get_dir(__file__)
-    cmds = [
-        "apt-get clean",
-        "apt-get update",
-        "apt-get install --yes {}".format(" ".join(packages)),
-        # Locale
-        "locale -a",
-        "update-locale LANG='C.UTF-8' LANGUAGE='C.UTF-8' LC_ALL='C.UTF-8' LC_ADDRESS='C.UTF-8' LC_COLLATE='C.UTF-8' LC_CTYPE='C.UTF-8' LC_IDENTIFICATION='C.UTF-8' LC_MEASUREMENT='C.UTF-8' LC_MESSAGES='C.UTF-8' LC_MONETARY='C.UTF-8' LC_NAME='C.UTF-8' LC_NUMERIC='C.UTF-8' LC_PAPER='C.UTF-8' LC_TELEPHONE='C.UTF-8' LC_TIME='C.UTF-8'",
-        # Copy vimrc file
-        "cp '{}/vimrc' /root/.vimrc".format(dir_path),
-        # Copy bashrc file
-        "cp '{}/bashrc' /root/.bashrc".format(dir_path),
-        # Generate SSH key if not already done
-        "[ -f /root/.ssh/id_rsa ] || ssh-keygen -b 4096 -t rsa -f /root/.ssh/id_rsa -P ''",
-    ]
-    utils.run_commands(cmds)
diff --git a/1.Base/2.ubicast_shell_access/0_setup.py b/1.Base/2.ubicast_shell_access/0_setup.py
deleted file mode 100644
index 09bf4bfa..00000000
--- a/1.Base/2.ubicast_shell_access/0_setup.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import os
-import subprocess
-
-import utils
-
-
-def add_allowed_keys(path, keys):
-    content = ''
-    if os.path.exists(path):
-        with open(path, 'r') as fo:
-            content = fo.read()
-    elif not os.path.exists(os.path.dirname(path)):
-        os.makedirs(os.path.dirname(path))
-    new_content = content.strip() + '\n'
-    for key in keys:
-        key = key.strip()
-        if not key:
-            continue
-        if key not in new_content:
-            new_content += key + '\n'
-            utils.log('The key "%s" will be added in "%s".' % (key.split(' ')[-1], path))
-        else:
-            utils.log('The key "%s" is already in "%s".' % (key.split(' ')[-1], path))
-    if new_content != content:
-        with open(path, 'w') as fo:
-            fo.write(new_content)
-        utils.log('The file "%s" has been updated.' % path)
-    else:
-        utils.log('The file "%s" is already up to date.' % path)
-
-
-def setup(interactive=True):
-    dir_path = utils.get_dir(__file__)
-    # Set allowed SSH keys
-    allowed_keys = utils.get_conf('SSH_ALLOWED_KEYS', '').strip().split('\n')
-    with open('%s/ubicast_support.pub' % dir_path, 'r') as fo:
-        support_key = fo.read()
-    allowed_keys.append(support_key.strip())
-    add_allowed_keys('/root/.ssh/authorized_keys', allowed_keys)
-    add_allowed_keys('/home/ubicast/.ssh/authorized_keys', allowed_keys)
-    # Create / update ubicast account
-    cmds = list()
-    cmds.append('echo "Checking ubicast account"')
-    code, out = utils.exec_cmd(['id', 'ubicast'])
-    if code != 0:
-        cmds.append('useradd -m -s /bin/bash ubicast')
-        out = ''
-    if 'sudo' not in out:
-        cmds.append('usermod -aG sudo ubicast')
-    cmds.append('cp "/root/.bashrc" "/home/ubicast/.bashrc"')
-    cmds.append('chown ubicast:ubicast /home/ubicast')
-    cmds.append('chown ubicast:ubicast /home/ubicast/.bashrc')
-    # Set SSH files permissions
-    cmds.append('echo "Set SSH files permissions"')
-    cmds.append('chmod 700 /root/.ssh')
-    cmds.append('chmod 700 /home/ubicast/.ssh')
-    cmds.append('chown -R ubicast:ubicast /home/ubicast/.ssh')
-
-    utils.run_commands(cmds)
-    # Set ubicast password if any
-    pwd = utils.get_conf('SHELL_UBICAST_PWD')
-    if pwd:
-        p = subprocess.Popen(['passwd', '-q', 'ubicast'], stdin=subprocess.PIPE)
-        p.communicate(input=b'%(pwd)s\n%(pwd)s' % {b'pwd': pwd.encode('utf-8')})
-        if p.returncode != 0:
-            raise Exception('Failed to set ubicast account password.')
-        utils.log('\033[1;33m The ubicast account password has been set. \033[0m')
diff --git a/1.Base/3.admin_shell_account/0_setup.py b/1.Base/3.admin_shell_account/0_setup.py
deleted file mode 100644
index 5c71b3d2..00000000
--- a/1.Base/3.admin_shell_account/0_setup.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import subprocess
-
-import utils
-
-
-def setup(interactive=True):
-    # Create / update admin account
-    utils.log('Checking admin account')
-    cmds = list()
-    code, out = utils.exec_cmd(['id', 'admin'])
-    if code != 0:
-        cmds.append('groupadd -f admin')
-        cmds.append('useradd -m -g admin -s /bin/bash admin')
-        out = ''
-    if 'sudo' not in out:
-        cmds.append('usermod -aG sudo admin')
-    cmds.append('mkdir -p /home/admin/.ssh')
-    cmds.append('chmod 700 /home/admin/.ssh')
-    cmds.append('chown -R admin:admin /home/admin/.ssh')
-    utils.run_commands(cmds)
-    # Set password if any
-    pwd = utils.get_conf('SHELL_ADMIN_PWD')
-    if pwd:
-        p = subprocess.Popen(['passwd', '-q', 'admin'], stdin=subprocess.PIPE)
-        p.communicate(input=b'%(pwd)s\n%(pwd)s' % {b'pwd': pwd.encode('utf-8')})
-        if p.returncode != 0:
-            raise Exception('Failed to set admin account password.')
-        utils.log('\033[1;33m The admin account password has been set. \033[0m')
diff --git a/1.Base/4.cockpit/0_setup.py b/1.Base/4.cockpit/0_setup.py
deleted file mode 100644
index f8a23921..00000000
--- a/1.Base/4.cockpit/0_setup.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python3
-import utils
-import os
-
-
-def setup(interactive=False):
-    if os.system('lsb_release -a | grep "18.04"') != 0:
-        utils.log('Skipping cockpit setup because OS is not Ubuntu 18.04.')
-        return
-    cmds = [
-        'apt install -y software-properties-common',
-        'add-apt-repository universe',
-        'apt update',
-        'apt install -y cockpit packagekit ifupdown',
-        'apt purge --auto-remove -y netplan.io',
-        'mkdir -p /etc/NetworkManager/conf.d',
-        'touch /etc/NetworkManager/conf.d/10-globally-managed-devices.conf',
-    ]
-    if os.path.exists('/run/systemd/resolve/resolv.conf'):
-        cmds.append('rm /etc/resolv.conf && cp /run/systemd/resolve/resolv.conf /etc/resolv.conf')
-
-    cmds.extend([
-        'systemctl disable --now systemd-resolved.service',
-        'printf "[main]\ndns=default" > /etc/NetworkManager/conf.d/disable-systemd-resolved.conf',
-    ])
-    utils.run_commands(cmds)
diff --git a/10.MediaImport/2.Install_FTP_watch_folder/0_setup.py b/10.MediaImport/2.Install_FTP_watch_folder/0_setup.py
deleted file mode 100644
index d6a04c8a..00000000
--- a/10.MediaImport/2.Install_FTP_watch_folder/0_setup.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import os
-import socket
-import utils
-
-
-def setup(interactive=True):
-    dir_path = utils.get_dir(__file__)
-    pwd_path = '/etc/pure-ftpd/pureftpd.passwd'
-    # Get passwords
-    ftpincoming_users = utils.get_conf('FTP_INCOMING_USERS', '').strip(',').split(',')
-    if not ftpincoming_users:
-        raise Exception('No users specified in the configuration FTP_INCOMING_USERS variable.')
-    for user in ftpincoming_users:
-        if ':' not in user or user.count(':') > 1:
-            raise Exception('Invalid user/pass definition, separator not present or too many detected')
-    # Run commands
-    cmds = [
-        'apt-get install --yes pure-ftpd python3-unidecode openssl ubicast-mediaimport',
-        dict(line='adduser --disabled-login --gecos "" --shell /bin/false ftp', cond='id ftp', cond_neg=True, cond_skip=True),
-        'mkdir -p /usr/local/bin',
-        'cp "%s/mediaimport_create_ftp_user.sh" /usr/local/bin' % (dir_path),
-        'cp "%s/mediaimport_remove_ftp_user.sh" /usr/local/bin' % (dir_path),
-        'cp "%s/cron.d/"* /etc/cron.d' % dir_path,
-        'mkdir -p /home/ftp/storage',
-        'mkdir -p /home/ftp/storage/incoming',
-        'mkdir -p /home/ftp/storage/watchfolder',
-        # Config
-        'echo "no" > /etc/pure-ftpd/conf/AllowDotFiles',
-        'echo "yes" > /etc/pure-ftpd/conf/CallUploadScript',
-        'echo "yes" > /etc/pure-ftpd/conf/ChrootEveryone',
-        'echo "yes" > /etc/pure-ftpd/conf/DontResolve',
-        'echo "no" > /etc/pure-ftpd/conf/PAMAuthentication',
-        'echo 1 > /etc/pure-ftpd/conf/TLS',
-        'openssl req -x509 -nodes -days 7300 -newkey rsa:2048 -keyout /etc/ssl/private/pure-ftpd.pem -out /etc/ssl/private/pure-ftpd.pem -subj "/C=FR/ST=NA/L=Paris/O=Ubicast/CN=%s"' % socket.gethostname(),
-        # Post upload script
-        'cp "%s/on_ftp_upload.py" /home/ftp/on_ftp_upload.py' % dir_path,
-        'chown ftp:ftp /home/ftp/on_ftp_upload.py',
-        'chmod +x /home/ftp/on_ftp_upload.py',
-        'pure-uploadscript -p /home/ftp/.on_upload.pid -B -g $(id -g ftp) -r /home/ftp/on_ftp_upload.py -u $(id -u ftp)',
-        'cp "%s/pure-ftpd-common" /etc/default/pure-ftpd-common.tmp' % dir_path,
-        'sed "s/UPLOADUID=UID/UPLOADUID=$(id -u ftp)/g" /etc/default/pure-ftpd-common.tmp > /etc/default/pure-ftpd-common.tmp2',
-        'mv -f /etc/default/pure-ftpd-common.tmp2 /etc/default/pure-ftpd-common.tmp',
-        'sed "s/UPLOADGID=GID/UPLOADGID=$(id -g ftp)/g" /etc/default/pure-ftpd-common.tmp > /etc/default/pure-ftpd-common.tmp2',
-        'mv -f /etc/default/pure-ftpd-common.tmp2 /etc/default/pure-ftpd-common',
-        'rm -f /etc/default/pure-ftpd-common.tmp',
-        'cp "%s/remove_empty_dirs.py" /etc/cron.hourly/remove_empty_dirs.py' % dir_path,
-        # Create FTP accounts
-        '([ -f "%s" ] || [ -f "%s" ] && cp "%s" "%s") || true' % (pwd_path + '.back', pwd_path, pwd_path, pwd_path + '.back'),
-        '([ -f "%s" ] && mv -f "%s" pureftpd.passwd.tmp) || true' % (pwd_path, pwd_path),
-    ]
-    for ftpuser in ftpincoming_users:
-        login, password = ftpuser.split(':')
-        # FTP_INCOMING_USERS='user/subfolder:pass
-        if '/' in login:
-            folder = login
-            login = login.split('/')[0]
-        else:
-            folder = login
-        cmds.extend([
-            'mkdir -p /home/ftp/storage/incoming/%s' % folder,
-            'mkdir -p /home/ftp/storage/watchfolder/%s' % folder,
-            '"%s/mediaimport_create_ftp_user.sh" %s "%s"' % (dir_path, login, password),
-        ])
-    cmds.extend([
-        'chmod -R 775 /home/ftp/storage/incoming',
-        'chmod -R 775 /home/ftp/storage/watchfolder',
-        'chown -R ftp:ftp /home/ftp/storage',
-        'rm -f pureftpd.passwd.tmp',
-        'pure-pw mkdb',
-        'ln -sfn /etc/pure-ftpd/conf/PureDB /etc/pure-ftpd/auth/50puredb',
-        '/etc/init.d/pure-ftpd force-reload',
-    ])
-
-    try:
-        utils.run_commands(cmds)
-    except Exception:
-        raise
-    finally:
-        # Restore password conf if required
-        if os.path.exists('pureftpd.passwd.tmp'):
-            os.rename('pureftpd.passwd.tmp', pwd_path)
diff --git a/10.MediaImport/2.Install_FTP_watch_folder/mediaimport_create_ftp_user.sh b/10.MediaImport/2.Install_FTP_watch_folder/mediaimport_create_ftp_user.sh
deleted file mode 100755
index f62f3628..00000000
--- a/10.MediaImport/2.Install_FTP_watch_folder/mediaimport_create_ftp_user.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-if [  $# -le 1 ]
-then
-    echo "Usage: $0 username password"
-    exit 1
-fi
-
-mkdir -p /home/ftp/storage/incoming/$1
-mkdir -p /home/ftp/storage/watchfolder/$1
-echo -e "$2\n$2" | pure-pw useradd $1 -u ftp -d /home/ftp/storage/incoming/$1
-chmod -R 775 /home/ftp/storage/incoming
-chmod -R 775 /home/ftp/storage/watchfolder
-chown -R ftp:ftp /home/ftp/storage
-pure-pw mkdb
-/etc/init.d/pure-ftpd force-reload
diff --git a/10.MediaImport/2.Install_FTP_watch_folder/mediaimport_remove_ftp_user.sh b/10.MediaImport/2.Install_FTP_watch_folder/mediaimport_remove_ftp_user.sh
deleted file mode 100755
index 0982bd00..00000000
--- a/10.MediaImport/2.Install_FTP_watch_folder/mediaimport_remove_ftp_user.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-if [  $# -le 0 ]
-then
-    echo "Usage: $0 username"
-    exit 1
-fi
-
-read -p "This will remove all files in /home/ftp/storage/incoming/$1, are you sure? " -n 1 -r
-echo    # (optional) move to a new line
-if [[ $REPLY =~ ^[Yy]$ ]]
-then
-    rm -rf /home/ftp/storage/incoming/$1
-    rm -rf /home/ftp/storage/watchfolder/$1
-    pure-pw userdel $1
-    pure-pw mkdb
-    /etc/init.d/pure-ftpd force-reload
-fi
diff --git a/10.MediaImport/2.Install_FTP_watch_folder/on_ftp_upload.py b/10.MediaImport/2.Install_FTP_watch_folder/on_ftp_upload.py
deleted file mode 100755
index 57ab8a7e..00000000
--- a/10.MediaImport/2.Install_FTP_watch_folder/on_ftp_upload.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import logging
-import os
-import shutil
-import sys
-import unicodedata
-# command line
-# pure-uploadscript -p /home/ftp/.on_ftp_upload.pid -B -g 1001 -r /home/ftp/on_ftp_upload.py -u 1001
-
-BASE_DIR = '/home/ftp/storage/'
-INCOMING_DIR = BASE_DIR + 'incoming/'
-DEST_DIR = BASE_DIR + 'watchfolder/'
-ALLOWED_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-.'
-
-LOG_FILE = '/home/ftp/on_ftp_upload.log'
-LOG_LEVEL = 'INFO'
-
-
-def clean_name(name):
-    # strip accents and replace non allowed characters
-    return ''.join((c if c in ALLOWED_CHARS else '_') for c in unicodedata.normalize('NFD', name) if unicodedata.category(c) != 'Mn')
-
-
-if __name__ == '__main__':
-    # setup logging
-    logging.basicConfig(
-        filename=LOG_FILE,
-        format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
-        level=getattr(logging, LOG_LEVEL),
-    )
-
-    try:
-        logging.debug('Starting script')
-        # see man pure-uploadscript
-        if len(sys.argv) < 2:
-            logging.info('Not enough arguments')
-            sys.exit(1)
-
-        src_path = sys.argv[1]
-        if not src_path.startswith(BASE_DIR):
-            logging.info('File %s will not be moved because it is not in base dir', src_path)
-            sys.exit(0)
-
-        # remove special characters
-        name = os.path.basename(src_path)
-        new_name = clean_name(name)
-        if name != new_name:
-            new_path = os.path.join(os.path.dirname(src_path), new_name)
-            os.rename(src_path, new_path)
-            logging.info('File %s has been renamed to %s', src_path, new_path)
-            src_path = new_path
-
-        # move file
-        if not src_path.startswith(INCOMING_DIR):
-            logging.info('File %s will not be moved because it is not in the incoming dir', src_path)
-            sys.exit(0)
-
-        dest_path = src_path.replace(INCOMING_DIR, DEST_DIR)
-
-        if not os.path.exists(os.path.dirname(dest_path)):
-            os.system('mkdir -p -m 775 "%s"' % os.path.dirname(dest_path))
-
-        logging.info('Moving %s to %s' % (src_path, dest_path))
-        shutil.move(src_path, dest_path)
-        logging.info('Done')
-    except Exception as e:
-        logging.error('Failed to move file %s. Error: %s', src_path, e)
-        sys.exit(1)
-    else:
-        sys.exit(0)
diff --git a/10.MediaImport/2.Install_FTP_watch_folder/pure-ftpd-common b/10.MediaImport/2.Install_FTP_watch_folder/pure-ftpd-common
deleted file mode 100644
index c5d42089..00000000
--- a/10.MediaImport/2.Install_FTP_watch_folder/pure-ftpd-common
+++ /dev/null
@@ -1,26 +0,0 @@
-# Configuration for pure-ftpd
-# (this file is sourced by /bin/sh, edit accordingly)
-
-# STANDALONE_OR_INETD
-# valid values are "standalone" and "inetd".
-# Any change here overrides the setting in debconf.
-STANDALONE_OR_INETD=standalone
-
-# VIRTUALCHROOT:
-# whether to use binary with virtualchroot support
-# valid values are "true" or "false" 
-# Any change here overrides the setting in debconf.
-VIRTUALCHROOT=false
-
-# UPLOADSCRIPT: if this is set and the daemon is run in standalone mode,
-# pure-uploadscript will also be run to spawn the program given below
-# for handling uploads. see /usr/share/doc/pure-ftpd/README.gz or
-# pure-uploadscript(8)
-
-# example: UPLOADSCRIPT=/usr/local/sbin/uploadhandler.pl
-UPLOADSCRIPT=/home/ftp/on_ftp_upload.py
-
-# if set, pure-uploadscript will spawn $UPLOADSCRIPT running as the
-# given uid and gid
-UPLOADUID=UID
-UPLOADGID=GID
diff --git a/10.MediaImport/2.Install_FTP_watch_folder/readme.txt b/10.MediaImport/2.Install_FTP_watch_folder/readme.txt
deleted file mode 100644
index 7330d190..00000000
--- a/10.MediaImport/2.Install_FTP_watch_folder/readme.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Firewall
-========
-
-# echo "40110 40210" > /etc/pure-ftpd/conf/PassivePortRange
-
-ferm.conf
-        # FTP upload for MediaFolder
-        saddr $NET_FTP proto tcp dport (ftp 40110:40210) ACCEPT;
-        mod helper helper ftp ACCEPT;
-
-Debugging
-=========
-
-To enable debugging within pure-ftpd, 
-
-echo yes > /etc/pure-ftpd/conf/VerboseLog
-
-logs are visible with
-
-journalctl -f
-
-Create new users
-================
-
-root@worker1:/home/ftp/storage/incoming# create_ftp_account.sh ubicast pass
-
-Reset user password
-===================
-
-pure-pw passwd username
-pure-pw mkdb
diff --git a/10.MediaImport/2.Install_FTP_watch_folder/remove_empty_dirs.py b/10.MediaImport/2.Install_FTP_watch_folder/remove_empty_dirs.py
deleted file mode 100755
index 17c50913..00000000
--- a/10.MediaImport/2.Install_FTP_watch_folder/remove_empty_dirs.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-'''
-Script to remove empty dirs from FTP incoming dir.
-'''
-import datetime
-import os
-import shutil
-import sys
-import traceback
-
-
-INCOMING_DIR = '/home/ftp/storage/incoming/'
-DAYS_OLD = 1
-
-
-def _can_be_removed(path):
-    if not os.path.isdir(path):
-        return False
-    for name in os.listdir(path):
-        subpath = os.path.join(path, name)
-        if not _can_be_removed(subpath):
-            return False
-    mtime = os.path.getmtime(path)
-    mtime = datetime.datetime.fromtimestamp(mtime)
-    if mtime < datetime.datetime.now() - datetime.timedelta(days=DAYS_OLD):
-        return True
-    return False
-
-
-if __name__ == '__main__':
-    script_name = os.path.basename(__file__)
-    try:
-        if not os.path.isdir(INCOMING_DIR):
-            print('%s: The FTP incoming dir does not exist (%s).' % (script_name, INCOMING_DIR))
-            sys.exit(1)
-
-        for name in os.listdir(INCOMING_DIR):
-            path = os.path.join(INCOMING_DIR, name)
-            if _can_be_removed(path):
-                shutil.rmtree(path)
-                print('%s: Dir "%s" removed.' % (script_name, path))
-    except Exception:
-        print('%s: Script crashed:\n%s' % (script_name, traceback.format_exc()))
-        sys.exit(1)
diff --git a/11.VM/1.Export_VM/0_setup.sh b/11.VM/1.Export_VM/0_setup.sh
deleted file mode 100755
index 4f92fdb1..00000000
--- a/11.VM/1.Export_VM/0_setup.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-source /root/envsetup/global-conf.sh
-
-KEY=~/.ssh/ubicast_support
-VM_HYPER=$(grep VM_HYPER ${CONF} | awk -F "=" '{print$2}')
-VM_STORE=$(grep VM_STORE ${CONF} | head -1 | awk -F "=" '{print$2}')
-VM_STORE_LOCAL=$(grep VM_STORE_LOCAL ${CONF} | awk -F "=" '{print$2}')
-
-if ( test -z $VM_NAME ); then
-    VM_NAME=$(cat ${CONF} | egrep ^ETC_HOSTNAME | head -1 | awk -F "=" '{print$2}')
-else
-    VM_NAME=$(grep VM ${CONF} | awk -F "=" '{print$2}')
-fi
-
-ssh -i ${KEY} -o User=root ${VM_HYPER} "VBoxManage export ${VM_NAME} -o ${VM_STORE}/${VM_NAME}.ovf --ovf10"
-
-# vbox conf file created
-
-# generating vmware conf file
-cp ${VM_STORE_LOCAL}/${VM_NAME}.ovf ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<vssd:VirtualSystemType>virtualbox-2.2</vssd:VirtualSystemType>@<vssd:VirtualSystemType>vmx-07</vssd:VirtualSystemType>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<rasd:Caption>sataController0</rasd:Caption>@<rasd:Caption>SCSIController</rasd:Caption>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<rasd:Description>SATA Controller</rasd:Description>@<rasd:Description>SCSIController</rasd:Description>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<rasd:ElementName>sataController0</rasd:ElementName>@<rasd:ElementName>SCSIController</rasd:ElementName>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<rasd:ResourceSubType>AHCI</rasd:ResourceSubType>@<rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<rasd:ResourceType>20</rasd:ResourceType>@<rasd:ResourceType>6</rasd:ResourceType>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-
-# recherche n° ligne paragraphe à supp.
-LIG=$(grep -n '<rasd:AddressOnParent>3</rasd:AddressOnParent>' ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf | awk -F ":" '{print$1}')
-LIG0=$(( $LIG - 1 ))
-LIG1=$(( $LIG0 + 9 ))
-sed -i "${LIG0},${LIG1}d" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-
-# converting disk to qemu image
-qemu-img convert -c -O qcow2 ${VM_STORE_LOCAL}/${VM_NAME}-disk1.vmdk ${VM_STORE_LOCAL}/${VM_NAME}.qcow2
-
-echo -e "${CYAN}Files are available at ${VM_STORE_LOCAL}${NC}"
diff --git a/11.VM/2.Export_VM_local/0_setup.sh b/11.VM/2.Export_VM_local/0_setup.sh
deleted file mode 100755
index 2c4a320c..00000000
--- a/11.VM/2.Export_VM_local/0_setup.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-source /root/envsetup/global-conf.sh
-
-VM_STORE=/home/jallary/ubicast/TMP/ENVOI
-VM_STORE_LOCAL=/home/jallary/ubicast/TMP/ENVOI
-# depuis poste local
-VBoxManage export ${VM_NAME} -o ${VM_STORE}/${VM_NAME}.ovf --ovf10
-
-# generating vmware conf file
-cp ${VM_STORE_LOCAL}/${VM_NAME}.ovf ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<vssd:VirtualSystemType>virtualbox-2.2</vssd:VirtualSystemType>@<vssd:VirtualSystemType>vmx-07</vssd:VirtualSystemType>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<rasd:Caption>sataController0</rasd:Caption>@<rasd:Caption>SCSIController</rasd:Caption>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<rasd:Description>SATA Controller</rasd:Description>@<rasd:Description>SCSIController</rasd:Description>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<rasd:ElementName>sataController0</rasd:ElementName>@<rasd:ElementName>SCSIController</rasd:ElementName>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<rasd:ResourceSubType>AHCI</rasd:ResourceSubType>@<rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-sed -i "s@<rasd:ResourceType>20</rasd:ResourceType>@<rasd:ResourceType>6</rasd:ResourceType>@" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-
-# recherche n° ligne paragraphe à supp.
-LIG=$(grep -n '<rasd:AddressOnParent>3</rasd:AddressOnParent>' ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf | awk -F ":" '{print$1}')
-LIG0=$(( $LIG - 1 ))
-LIG1=$(( $LIG0 + 9 ))
-sed -i "${LIG0},${LIG1}d" ${VM_STORE_LOCAL}/${VM_NAME}_vmware.ovf
-
-# converting disk to qemu image
-qemu-img convert -c -O qcow2 ${VM_STORE_LOCAL}/${VM_NAME}-disk1.vmdk ${VM_STORE_LOCAL}/${VM_NAME}.qcow2
-
-echo -e "${CYAN}Files are available at ${VM_STORE_LOCAL}${NC}"
diff --git a/12.Netcapture/1.Install_Netcapture/0_setup.py b/12.Netcapture/1.Install_Netcapture/0_setup.py
deleted file mode 100644
index db315657..00000000
--- a/12.Netcapture/1.Install_Netcapture/0_setup.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python3
-
-import utils
-import json
-import random
-import string
-
-
-class JsonConfig:
-    def __init__(self, path):
-        self.path = path
-        with open(path, 'r') as f:
-            self.conf = json.load(f)
-
-    def write(self):
-        with open(self.path, 'w') as f:
-            json.dump(self.conf, f, indent=2, sort_keys=True)
-
-    def set(self, key, val):
-        self.conf[key] = val
-
-    def get(self):
-        return self.conf
-
-
-def get_random_id(length=12):
-    return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(length))
-
-
-def write_netcapture_conf():
-    conf = JsonConfig('/etc/miris/netcapture.json.example')
-    conf.set('docker_registry_login', utils.get_conf('NETCAPTURE_DOCKER_LOGIN', ''))
-    conf.set('docker_registry_password', utils.get_conf('NETCAPTURE_DOCKER_PWD', ''))
-    conf.set('campusmanager_url', 'https://%s' % utils.get_conf('CM_SERVER_NAME', ''))
-    conf.path = '/etc/miris/netcapture.json'
-    conf.write()
-    return conf.get()
-
-
-def write_miris_conf():
-    conf = JsonConfig('/etc/miris/conf/api.json')
-    conf.set('auth_user_password', get_random_id())
-    conf.write()
-
-
-def setup(interactive=True):
-    if not utils.supported_platform():
-        utils.log("unsupported os", error=True)
-        exit(1)
-
-    dist, _ = utils.dist()
-
-    # Run commands
-    cmds = [
-        'apt-get install --yes apt-transport-https ca-certificates curl gnupg-agent lsb-release software-properties-common',
-        f'curl -fsSL https://download.docker.com/linux/{dist}/gpg | apt-key add -',
-        f'add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/{dist} $(lsb_release -cs) stable"',
-        'apt-get update && apt-get install --yes docker-ce',
-        'apt-get install --yes python3-miris-netcapture',
-    ]
-
-    try:
-        utils.run_commands(cmds)
-        conf = write_netcapture_conf()
-        write_miris_conf()
-        cmds_post = list()
-        for f in ['media', 'conf']:
-            cmds_post.extend([
-                'mkdir -p %s' % conf['netcapture_%s_folder' % f],
-                'chgrp -R video %s' % conf['netcapture_%s_folder' % f],
-                'chmod -R 774 %s' % conf['netcapture_%s_folder' % f],
-            ])
-        utils.run_commands(cmds_post)
-    except Exception:
-        raise
diff --git a/13.Demokit/1.Deploy_demokit/0_setup.py b/13.Demokit/1.Deploy_demokit/0_setup.py
deleted file mode 100644
index 23861a72..00000000
--- a/13.Demokit/1.Deploy_demokit/0_setup.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import utils
-import json
-import subprocess
-import os
-
-CONTENT = [
-    "https://nextcloud.ubicast.net/s/LEcyMWG9BnKsrHX/download?path=%2FTradeshowDemoKit&files=medical_education.zip",
-    "https://nextcloud.ubicast.net/s/LEcyMWG9BnKsrHX/download?path=%2FTradeshowDemoKit&files=rich-media-sneak-peek.zip"
-]
-
-
-def setup(interactive=True):
-    if os.path.exists('/etc/nginx/sites-enabled/mediaserver-msuser.conf'):
-        cmds = list()
-        options = {
-            "ms_url": utils.get_conf('MS_SERVER_NAME'),
-            "ms_apikey": utils.get_conf('MS_API_KEY')
-        }
-        cmd_template = "./publish_zip_by_url.py -w https://{ms_url} -u '%s' -a {ms_apikey}".format(**options)
-        # https://192.168.43.72/api/v2/search/?search=medical
-        for c in CONTENT:
-            cmd = cmd_template % c
-            cmds.append(cmd)
-        utils.run_commands(cmds)
-
-    if os.path.exists('/etc/miris/netcapture.json'):
-        cmds = list()
-        with open('/etc/miris/netcapture.json', 'r') as f:
-            c = json.load(f)
-        c['mirismanager_check_ssl'] = False
-        with open('/etc/miris/netcapture.json', 'w') as f:
-            json.dump(c, f)
-
-        cmds.extend([
-            'rsync -r mirisconf/ /etc/miris/conf',
-            'chgrp -R video /etc/miris/conf',
-            'chmod -R 774 /etc/miris/conf',
-        ])
-        if subprocess.getstatusoutput("netcapturectl ls")[0] != 0:
-            # start netcapture without hw accel
-            cmds.append('netcapturectl add')
-        utils.run_commands(cmds)
-
-        # try to enable hw accel if available
-        subprocess.getstatusoutput("apt install -y vainfo")
-        if subprocess.getstatusoutput("vainfo")[0] == 0:
-            cmds = list()
-            # hw acceleration requires boot-time module options so a reboot will be needed
-            c['enable_hw_acceleration'] = True
-            if subprocess.getstatusoutput('dmesg | grep "GuC: Loaded"')[0] != 0:
-                cmds.append('echo "options i915 enable_guc_loading=1 enable_guc_submission=1" > /etc/modprobe.d/netcapture.conf')
-                cmds.append('update-initramfs -u')
-                print('A reboot is required')
-            with open('/etc/miris/netcapture.json', 'w') as f:
-                json.dump(c, f)
-            utils.run_commands(cmds)
diff --git a/13.Demokit/1.Deploy_demokit/mirisconf/recorderd/room-a.json b/13.Demokit/1.Deploy_demokit/mirisconf/recorderd/room-a.json
deleted file mode 100755
index ed75f3ee..00000000
--- a/13.Demokit/1.Deploy_demokit/mirisconf/recorderd/room-a.json
+++ /dev/null
@@ -1,82 +0,0 @@
-{
-    "audio_output_enabled": false,
-    "videomixer_width": 2560,
-    "videomixer_height": 1440,
-    "gst_debug_string": "3",
-    "framerate": 25,
-    "autoremove": true,
-    "gop_size_s": 1,
-    "benchmark_dur_s": 10,
-    "benchmark_mode": false,
-    "videosources": [
-        {
-            "enabled": true,
-            "name": "sonycam_1",
-            "template": "rtspvsource",
-            "rtsp_uri": "rtsp://camera.demo/media/video1",
-            "width": 1280,
-            "height": 720,
-            "custom_delay_ms": 100
-        },
-        {
-            "enabled": true,
-            "name": "sonyve_1",
-            "template": "rtspvsource",
-            "rtsp_uri": "rtsp://display.demo/media/video1",
-            "width": 1920,
-            "height": 1080,
-            "custom_delay_ms": 150,
-            "enable_change_detection": true
-        }
-    ],
-    "audiosources": [
-        {
-            "enabled": true,
-            "name": "sonycam_1",
-            "template": "rtspasource",
-            "channels": 1,
-            "rate": 44100
-        },
-        {
-            "enabled": true,
-            "name": "sonyve_1",
-            "template": "rtspasource",
-            "channels": 1,
-            "rate": 44100
-        }
-    ],
-    "streams": [
-        {
-            "width": 2560,
-            "height": 1440,
-            "framerate": 30,
-            "h264profile": "baseline",
-            "video_bitrate": 5000000,
-            "audio_bitrate": 128000
-        },
-        {
-            "width": 1920,
-            "height": 1080,
-            "framerate": 30,
-            "h264profile": "baseline",
-            "video_bitrate": 4000000,
-            "audio_bitrate": 128000
-        },
-        {
-            "width": 1280,
-            "height": 720,
-            "framerate": 30,
-            "h264profile": "baseline",
-            "video_bitrate": 2500000,
-            "audio_bitrate": 128000
-        },
-        {
-            "width": 640,
-            "height": 360,
-            "framerate": 30,
-            "h264profile": "baseline",
-            "video_bitrate": 1000000,
-            "audio_bitrate": 128000
-        }
-    ]
-}
diff --git a/13.Demokit/1.Deploy_demokit/publish_zip_by_url.py b/13.Demokit/1.Deploy_demokit/publish_zip_by_url.py
deleted file mode 100755
index 598a3b5c..00000000
--- a/13.Demokit/1.Deploy_demokit/publish_zip_by_url.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2017, Florent Thiery, Stéphane Diemer
-import argparse
-import logging
-import os
-import requests
-import imp
-
-logger = logging.getLogger('mediaserver_client')
-
-MiB = 1024 * 1024
-session = None
-
-MS_CLIENT_URL = 'https://raw.githubusercontent.com/UbiCastTeam/mediaserver-client/acb84733ad342be1f1d8df23f791a591a57b2e1e/mediaserver_api_client.py'
-MS_CLIENT_PATH = '/tmp/mediaserver_api_client.py'
-
-
-def get_client_module():
-    # download and load client script
-    if os.path.exists(MS_CLIENT_PATH):
-        logger.info('MediaServer client is already downloaded: "%s".', MS_CLIENT_PATH)
-    else:
-        req = requests.get(MS_CLIENT_URL, timeout=10)
-        if not req.ok:
-            raise Exception('Failed to download MS client. Status: %s, response: %s' % (req.status_code, req.text))
-        with open(MS_CLIENT_PATH, 'w') as fo:
-            fo.write(req.text)
-        logger.info('MediaServer client downloaded: "%s".', MS_CLIENT_PATH)
-    ms_client = imp.load_source('ms_client', MS_CLIENT_PATH)
-    return ms_client
-
-
-if __name__ == '__main__':
-    log_format = '%(asctime)s %(name)s %(levelname)s %(message)s'
-    logging.basicConfig(level=logging.DEBUG, format=log_format)
-    urllib3_logger = logging.getLogger('requests.packages.urllib3')
-    urllib3_logger.setLevel(logging.WARNING)
-
-    parser = argparse.ArgumentParser(description='Import media from a zip archive into a portal on the commandline')
-    parser.add_argument('-w', '--webtv', required=True, help='webtv url')
-    parser.add_argument('-u', '--url', required=True, help='media zip url')
-    parser.add_argument('-a', '--apikey', required=True, help='apikey')
-    args = parser.parse_args()
-
-    CONFIG = {
-        'SERVER_URL': args.webtv,
-        'API_KEY': args.apikey,
-        'PROXIES': {'http': '', 'https': ''},
-        'UPLOAD_CHUNK_SIZE': 5 * MiB,
-        'VERIFY_SSL': False,
-        'CLIENT_ID': 'python-api-client',
-        'USE_SESSION': False
-    }
-
-    mscli = get_client_module()
-
-    msc = mscli.MediaServerClient()
-    msc.config = CONFIG
-    # ping
-    print(msc.api('/', method='get'))
-
-    basename = os.path.basename(args.url)
-    r = msc.api('/search', method='get', params={'search': basename})
-    if r.get('videos'):
-        print('Video %s seems to be already present on the portal, not reuploading' % basename)
-    else:
-        with open('/tmp/file.zip', 'wb') as f:
-            f.write(requests.get(args.url).content)
-        # add media with a zip
-        # print(msc.add_media('Test multichunk upload zip', file_path='/tmp/file.zip'))
-        print(msc.add_media(file_path='/tmp/file.zip'))
-
-    # add user
-    # print(msc.api('users/add/', method='post', data={'email': 'test@test.com'}))
-
-    # add users with csv file; example file (header should be included):
-    # Firstname;Lastname;Email;Company
-    # Albert;Einstein;albert.einstein@test.com;Humanity
-    # msc.import_users_csv('users.csv')
diff --git a/13.Demokit/2.Reset_demokit/0_setup.py b/13.Demokit/2.Reset_demokit/0_setup.py
deleted file mode 100644
index af48fa06..00000000
--- a/13.Demokit/2.Reset_demokit/0_setup.py
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env python3
-import utils
-import json
-import os
-
-
-def setup(interactive=True):
-    with open('/etc/miris/netcapture.json', 'r') as f:
-        c = json.load(f)
-
-    # we want to preserve the netcapture instance currently registered in miris manager
-    folders_remove_cmd = 'rm -rf %s/*' % c['netcapture_media_folder']
-    nc_conf_folder = c['netcapture_conf_folder']
-    for f in ['recorderd', 'accounts', 'targets']:
-        folders_remove_cmd += ' %s' % os.path.join(nc_conf_folder, f)
-
-    cmds = [
-        'msinstaller.py msuser delete',
-        'msinstaller.py msuser add',
-        folders_remove_cmd,
-        'apt-get install -y python3-miris-netcapture --reinstall -o Dpkg::Options::="--force-confask,confnew,confmiss"',
-        'rsync -r ../1.Deploy_demokit/mirisconf/ /etc/miris/conf',
-        'chgrp -R video /etc/miris/conf',
-        'chmod -R 774 /etc/miris/conf',
-        'netcapturectl restart all',
-    ]
-    utils.run_commands(cmds)
diff --git a/13.Demokit/3.Generate_SSL_certificate/0_setup.py b/13.Demokit/3.Generate_SSL_certificate/0_setup.py
deleted file mode 100644
index 9d1be7f8..00000000
--- a/13.Demokit/3.Generate_SSL_certificate/0_setup.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import utils
-
-
-def setup(interactive=True):
-    # deploy nginx SSL certificates
-    domains = []
-    conf_name_items = ["MS_SERVER_NAME", "MONITOR_SERVER_NAME", "CM_SERVER_NAME", "CACHE_SERVER_NAME"]
-    for name in conf_name_items:
-        domain = utils.get_conf(name)
-        if utils.get_conf(name):
-            domains.append(domain)
-    utils.mkcert(domains, ecc=False)
-    with open("/etc/nginx/conf.d/ssl_certificate.conf", "w") as ssl_conf_fh:
-        ssl_conf_fh.writelines([
-            "ssl_certificate /etc/ssl/envsetup/cert.pem;",
-            "ssl_certificate_key /etc/ssl/envsetup/key.pem;"
-        ])
-    cmds = ["systemctl restart nginx"]
-    utils.run_commands(cmds)
diff --git a/14.Dell/1.Dell_openmanage/0_setup.sh b/14.Dell/1.Dell_openmanage/0_setup.sh
deleted file mode 100755
index aae8ebd3..00000000
--- a/14.Dell/1.Dell_openmanage/0_setup.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-source ../../global-conf.sh
-
-# DELL server - install dell openmanage
-# http://linux.dell.com/repo/community/openmanage/
-if ( ! dpkg -l | grep dmidecode ); then
-	apt-get install -y dmidecode
-fi
-
-if ( dmidecode | grep Dell ); then
-	if ( grep -qa container=lxc /proc/1/environ ); then
-		echo "The system is running in a LXC container, no Dell package will be installed."
-		exit 0
-	fi
-
-	echo 'deb http://linux.dell.com/repo/community/openmanage/930/bionic bionic main' > /etc/apt/sources.list.d/linux.dell.com.sources.list
-	gpg --keyserver pool.sks-keyservers.net --recv-key 1285491434D8786F
-	gpg -a --export 1285491434D8786F | sudo apt-key add -
-
-	apt-get update
-	apt-get install -y srvadmin-all
-
-	# dsm_om_connsvc is the service to start a web interface on https://<ip_address>:1311/
-	# by default, the interface is only available to the root user
-	# sed -i "s@^root@root,admin@" /opt/dell/srvadmin/etc/omarolemap
-	systemctl enable dsm_om_connsvc
-	systemctl start dsm_om_connsvc
-fi
diff --git a/2.Common_services/1.Postfix/0_setup.py b/2.Common_services/1.Postfix/0_setup.py
deleted file mode 100644
index 9807ca0f..00000000
--- a/2.Common_services/1.Postfix/0_setup.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import utils
-
-
-def setup(interactive=True):
-    # Get hostname
-    utils.log('Getting system hostname.')
-    code, hostname = utils.exec_cmd('hostname')
-    if code == 0:
-        utils.log('Hostname is %s.' % hostname)
-    else:
-        raise Exception('Failed to get hostname.')
-    # Install and configure postfix
-    dir_path = utils.get_dir(__file__)
-    server = utils.get_conf('EMAIL_SMTP_SERVER', '')
-    cmds = [
-        'DEBIAN_FRONTEND=noninteractive apt-get install -y postfix',
-        dict(line='write', template='%s/main.cf' % dir_path, target='/etc/postfix/main.cf', params=(
-            ('{{ hostname }}', hostname),
-            ('{{ smtp }}', server),
-        )),
-    ]
-    # Configure mail aliases
-    if not server:
-        # with relayless cases emails are not always delivered to google mailing lists unless mailname is ubicast.eu and DNS spf records are set
-        mailname = 'ubicast.eu'
-    else:
-        mailname = utils.get_conf('MS_SERVER_NAME')
-        if not mailname or mailname == 'mediaserver':
-            mailname = hostname
-    cmds.extend([
-        'echo "%s" > /etc/mailname' % mailname,
-        'rgrep "root:" /etc/aliases || echo "root: sysadmin@ubicast.eu" >> /etc/aliases',
-        'newaliases',
-    ])
-    # Configure mail sender
-    sender = utils.get_conf('EMAIL_SENDER', '').strip(' \t@')
-    if sender and sender.count('@') != 1:
-        utils.log('Invalid sender address: "%s" (one "@" must be in the sender address).' % sender)
-        sender = None
-    if not sender and hostname:
-        if utils.get_conf('MS_SERVER_NAME', '') not in ('', 'mediaserver'):
-            sender = '%s@%s' % (hostname, utils.get_conf('MS_SERVER_NAME'))
-        elif utils.get_conf('CM_SERVER_NAME', '') not in ('', 'mirismanager'):
-            sender = '%s@%s' % (hostname, utils.get_conf('CM_SERVER_NAME'))
-        elif utils.get_conf('MONITOR_SERVER_NAME', '') not in ('', 'monitor'):
-            sender = '%s@%s' % (hostname, utils.get_conf('MONITOR_SERVER_NAME'))
-    if not sender:
-        utils.warning('No sender address found.')
-    else:
-        utils.log('Sender address is "%s".' % sender)
-        cmds.extend([
-            'rm -f /etc/postfix/generic',
-            'echo "root@localhost %s" >> /etc/postfix/generic' % sender,
-            'echo "root@%s %s" >> /etc/postfix/generic' % (hostname, sender),
-            'echo "@%s %s" >> /etc/postfix/generic' % (hostname, sender),
-            'postmap hash:/etc/postfix/generic',
-            'sed -i "s/#smtp_generic_maps/smtp_generic_maps/" /etc/postfix/main.cf',
-        ])
-    cmds.append('service postfix restart')
-    utils.run_commands(cmds)
-    # Setup authentication if any
-    user = utils.get_conf('EMAIL_SMTP_USER')
-    pwd = utils.get_conf('EMAIL_SMTP_PWD')
-    if user and pwd:
-        utils.log('Enabling authentication for SMTP relay.')
-        with open('/etc/postfix/sasl-passwords', 'w') as fo:
-            fo.write('%s %s:%s\n' % (server, user, pwd))
-        auth_conf = '''
-# SMTP relay authentication
-smtp_sasl_auth_enable = yes
-smtp_sasl_password_maps = hash:/etc/postfix/sasl-passwords
-smtp_sasl_security_options = noanonymous
-'''
-        with open('/etc/postfix/main.cf', 'a') as fo:
-            fo.write(auth_conf)
-        cmds = [
-            'postmap hash:/etc/postfix/sasl-passwords',
-            'service postfix restart',
-        ]
-        utils.run_commands(cmds)
diff --git a/2.Common_services/2.NTP/0_setup.py b/2.Common_services/2.NTP/0_setup.py
deleted file mode 100644
index 087a1ad1..00000000
--- a/2.Common_services/2.NTP/0_setup.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import subprocess
-import utils
-
-
-def setup(interactive=True):
-    dir_path = utils.get_dir(__file__)
-    servers = ''
-    if utils.get_conf('NTP_SERVER'):
-        for server in utils.get_conf('NTP_SERVER').split(','):
-            if server.strip():
-                servers += 'pool %s\n' % server.strip()
-    if not servers:
-        if 'Ubuntu' in subprocess.getoutput('lsb_release -a'):
-            servers += 'pool 0.ubuntu.pool.ntp.org iburst\n'
-            servers += 'pool 1.ubuntu.pool.ntp.org iburst\n'
-            servers += 'pool 2.ubuntu.pool.ntp.org iburst\n'
-            servers += 'pool 3.ubuntu.pool.ntp.org iburst\n'
-            servers += 'pool ntp.ubuntu.com\n'
-        else:
-            servers += 'pool 0.debian.pool.ntp.org iburst\n'
-            servers += 'pool 1.debian.pool.ntp.org iburst\n'
-            servers += 'pool 2.debian.pool.ntp.org iburst\n'
-            servers += 'pool 3.debian.pool.ntp.org iburst\n'
-    cmds = [
-        # NTP
-        'timedatectl set-ntp false',
-        'DEBIAN_FRONTEND=noninteractive apt-get install -y ntp',
-        'echo "Replacing /etc/ntp.conf"',
-        dict(line='write', template='%s/ntp.conf' % dir_path, target='/etc/ntp.conf', params=(
-            ('{{ servers }}', servers),
-        )),
-        'systemctl restart ntp.service',
-    ]
-    utils.run_commands(cmds)
diff --git a/2.Common_services/2.NTP/ntp.conf b/2.Common_services/2.NTP/ntp.conf
deleted file mode 100644
index 09405534..00000000
--- a/2.Common_services/2.NTP/ntp.conf
+++ /dev/null
@@ -1,70 +0,0 @@
-# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
-
-driftfile /var/lib/ntp/ntp.drift
-
-# Leap seconds definition provided by tzdata
-leapfile /usr/share/zoneinfo/leap-seconds.list
-
-# Enable this if you want statistics to be logged.
-#statsdir /var/log/ntpstats/
-
-statistics loopstats peerstats clockstats
-filegen loopstats file loopstats type day enable
-filegen peerstats file peerstats type day enable
-filegen clockstats file clockstats type day enable
-
-# Specify one or more NTP servers.
-{{ servers }}
-
-# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
-# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
-# more information.
-#pool 0.ubuntu.pool.ntp.org iburst
-#pool 1.ubuntu.pool.ntp.org iburst
-#pool 2.ubuntu.pool.ntp.org iburst
-#pool 3.ubuntu.pool.ntp.org iburst
-
-# Use Ubuntu's ntp server as a fallback.
-#pool ntp.ubuntu.com
-
-# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
-# details.  The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
-# might also be helpful.
-#
-# Note that "restrict" applies to both servers and clients, so a configuration
-# that might be intended to block requests from certain clients could also end
-# up blocking replies from your own upstream servers.
-
-# By default, exchange time with everybody, but don't allow configuration.
-restrict -4 default kod notrap nomodify nopeer noquery limited
-restrict -6 default kod notrap nomodify nopeer noquery limited
-
-# Local users may interrogate the ntp server more closely.
-restrict 127.0.0.1
-restrict ::1
-
-# Needed for adding pool entries
-restrict source notrap nomodify noquery
-
-# Clients from this (example!) subnet have unlimited access, but only if
-# cryptographically authenticated.
-#restrict 192.168.123.0 mask 255.255.255.0 notrust
-
-
-# If you want to provide time to your local subnet, change the next line.
-# (Again, the address is an example only.)
-#broadcast 192.168.123.255
-
-# If you want to listen to time broadcasts on your local subnet, de-comment the
-# next lines.  Please do this only if you trust everybody on the network!
-#disable auth
-#broadcastclient
-
-#Changes recquired to use pps synchonisation as explained in documentation:
-#http://www.ntp.org/ntpfaq/NTP-s-config-adv.htm#AEN3918
-
-#server 127.127.8.1 mode 135 prefer    # Meinberg GPS167 with PPS
-#fudge 127.127.8.1 time1 0.0042        # relative to PPS for my hardware
-
-#server 127.127.22.1                   # ATOM(PPS)
-#fudge 127.127.22.1 flag3 1            # enable PPS API
diff --git a/2.Common_services/3.PostgreSQL/0_setup.py b/2.Common_services/3.PostgreSQL/0_setup.py
deleted file mode 100644
index 1139263b..00000000
--- a/2.Common_services/3.PostgreSQL/0_setup.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import utils
-
-
-def setup(interactive=True):
-    db_host = utils.get_conf('DB_HOST')
-    db_port = utils.get_conf('DB_PORT') or '5432'
-    if (db_host and not db_host.startswith('127') and db_host != 'localhost') or db_port != '5432':
-        utils.log('Skipping postgresql setup because the database host is set to "%s:%s".' % (db_host, db_port))
-        return
-    cmds = [
-        'DEBIAN_FRONTEND=noninteractive apt-get install -y postgresql',
-    ]
-    root_pwd = utils.get_conf('DB_PG_ROOT_PWD')
-    if root_pwd:
-        cmds.append('sudo su - postgres -c "psql -w -q -A -c \\"ALTER USER postgres WITH PASSWORD \'%s\';\\""' % root_pwd)
-    utils.run_commands(cmds)
diff --git a/2.Common_services/4.Wowza/0_setup.py b/2.Common_services/4.Wowza/0_setup.py
deleted file mode 100644
index a2e2a693..00000000
--- a/2.Common_services/4.Wowza/0_setup.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import os
-import re
-
-import utils
-
-
-def setup(interactive=True):
-    dir_path = utils.get_dir(__file__)
-    license = utils.get_conf('WOWZA_LICENSE')
-    if not license:
-        utils.log('No Wowza license set, skipping Wowza installation.')
-        return
-    wowza_setup_name = 'WowzaStreamingEngine-4.7.7-linux-x64-installer.deb'
-    utils.log('It may take a while to download the Wowza installer from the UbiCast server.')
-    cmds = [
-        'apt-get install -y openjdk-11-jre-headless',
-        # Get and install Wowza
-        '[ -f "/tmp/%(name)s" ] && (dpkg -I "/tmp/%(name)s" || rm "/tmp/%(name)s") || true' % {'name': wowza_setup_name},
-        '[ -f "/tmp/%(name)s" ] || wget -q "https://panel.ubicast.eu/media/storage/%(name)s" -O "/tmp/%(name)s"' % {'name': wowza_setup_name},
-        'dpkg -i "/tmp/%s"' % wowza_setup_name,
-        # Configure Wowza
-        'echo "%s" > /usr/local/WowzaStreamingEngine/conf/Server.license' % license,
-        'echo "ubicast %s admin" > /usr/local/WowzaStreamingEngine/conf/admin.password' % utils.get_conf('WOWZA_MANAGER_PWD'),
-        'chmod +x /usr/local/WowzaStreamingEngine/logs',
-        'cp -R /usr/local/WowzaStreamingEngine/examples/LiveVideoStreaming/conf/live /usr/local/WowzaStreamingEngine/conf/',
-        'mkdir -p /usr/local/WowzaStreamingEngine/applications/live',
-        dict(line='write', template='%s/live-application.xml' % dir_path, target='/usr/local/WowzaStreamingEngine/conf/live/Application.xml', backup=True, params=(
-            ('{{ live_pwd }}', utils.get_conf('WOWZA_LIVE_PWD')),
-        )),
-        'cp "%s/Tune.xml" /usr/local/WowzaStreamingEngine/conf/Tune.xml' % dir_path,
-        'sed -i "s@#### BEGIN INIT INFO@### BEGIN INIT INFO@" /etc/init.d/WowzaStreamingEngine',
-        'sed -i "s@#### BEGIN INIT INFO@### BEGIN INIT INFO@" /etc/init.d/WowzaStreamingEngineManager',
-        'sed -i "s@<IPAddress>*</IPAddress>@<IPAddress>127.0.0.1</IPAddress>@" /usr/local/WowzaStreamingEngine/conf/Server.xml',
-        'sed -i "s@<IpAddress>*</IpAddress>@<IpAddress>127.0.0.1</IpAddress>@" /usr/local/WowzaStreamingEngine/conf/Server.xml',
-        '''gawk -i inplace '/<IpAddress>/{c++; if (c==3) {sub("*","127.0.0.1"); c=0}}1' /usr/local/WowzaStreamingEngine/conf/VHost.xml''',
-        'sed -i "s@war --httpPort@war --httpListenAddress=127.0.0.1 --httpPort@" /usr/local/WowzaStreamingEngine/manager/bin/startmgr.sh',
-        '''gawk -i inplace '/<Enable>/{c++; if (c==3) {sub("true","false"); c=0}}1' /usr/local/WowzaStreamingEngine/conf/Server.xml''',
-        'systemctl enable WowzaStreamingEngine',
-        'systemctl enable WowzaStreamingEngineManager',
-        'systemctl restart WowzaStreamingEngine',
-        'systemctl restart WowzaStreamingEngineManager',
-    ]
-    ms_conf = '/etc/mediaserver/lives_conf.py'
-    if os.path.exists(ms_conf):
-        utils.log('The file "%s" already exists, it will not be changed.' % ms_conf)
-    else:
-        cmds.extend([
-            'mkdir -p /etc/mediaserver',
-            'echo "RTMP_PWD = \'%s\'" > %s' % (utils.get_conf('WOWZA_LIVE_PWD'), ms_conf),
-        ])
-    if utils.get_conf('WOWZA_SERVER_NAME'):
-        cmds.append('mkdir -p /var/www/streaming')
-    if os.path.exists('/home/ftp/storage/www'):
-        cmds.extend([
-            '[ -d "/usr/local/WowzaStreamingEngine/content-back" ] || mv /usr/local/WowzaStreamingEngine/content /usr/local/WowzaStreamingEngine/content-back',
-            'ln -sfn /home/ftp/storage/www /usr/local/WowzaStreamingEngine/content',
-        ])
-    utils.run_commands(cmds)
-    # Write cron script to clean logs
-    with open('/etc/cron.daily/wowza-logs-cleaning', 'w') as fo:
-        fo.write('#!/bin/sh\nfind /usr/local/WowzaStreamingEngine/logs/ -type f -mtime +7 -delete')
-    os.chmod('/etc/cron.daily/wowza-logs-cleaning', 0o755)
-    # Proxy for license key
-    path = '/usr/local/WowzaStreamingEngine/conf/Server.xml'
-    with open(path, 'r') as fo:
-        content = fo.read()
-    start_index = content.rfind('<Properties>')
-    if start_index < 0:
-        raise ValueError('Unexpected content in "%s". Properties section not found.' % path)
-    start_index += len('<Properties>')
-    properties = content[start_index:]
-    end_index = properties.find('</Properties>')
-    if end_index < 0:
-        raise ValueError('Unexpected content in "%s". Properties section not found.' % path)
-    properties = properties[:end_index]
-    end_index += start_index
-    http_proxy = utils.get_conf('PROXY_HTTP')
-    if http_proxy:
-        regexp = r'http(s){0,1}://(([\w_\-]*)(:[\w_\-]*){0,1}@){0,1}([\w_\-\.]*)(:[\d]*){0,1}[/]*'
-        m = re.match(regexp, http_proxy)
-        if not m:
-            raise ValueError('Invalid value for PROXY_HTTP (value do not match reg exp: %s).' % regexp)
-        https, creds, user, pwd, host, port = m.groups()
-        if port:
-            port = port.strip(':')
-        else:
-            port = '443' if https else '80'
-        pwd = pwd.strip(':') if pwd else ''
-        user = user if user else ''
-        if not host:
-            raise ValueError('Invalid value for PROXY_HTTP (no host found using regexp: %s).' % regexp)
-        with open('%s/Proxy.xml' % dir_path, 'r') as fo:
-            proxy_tplt = fo.read()
-        new_properties = proxy_tplt % dict(user=user, pwd=pwd, host=host, port=port)
-    else:
-        new_properties = ''
-    new_properties += '\n\t\t'
-    if properties != new_properties:
-        new_content = content[:start_index] + new_properties + content[end_index:]
-        with open(path, 'w') as fo:
-            fo.write(new_content)
-        utils.log('The file "%s" has been updated.' % path)
-    utils.log('Edit /usr/local/WowzaStreamingEngine/conf/admin.password to change web manager access password.')
-    utils.log('Edit /usr/local/WowzaStreamingEngine/conf/Server.license to change license key.')
diff --git a/2.Common_services/4.Wowza/Proxy.xml b/2.Common_services/4.Wowza/Proxy.xml
deleted file mode 100644
index 690e6c99..00000000
--- a/2.Common_services/4.Wowza/Proxy.xml
+++ /dev/null
@@ -1,18 +0,0 @@
-
-			<Property>
-				<Name>licenseServerProxyAddress</Name>
-				<Value>%(host)s</Value>
-			</Property>
-			<Property>
-				<Name>licenseServerProxyPort</Name>
-				<Value>%(port)s</Value>
-				<Type>Integer</Type>
-			</Property>
-			<Property>
-				<Name>licenseServerProxyUsername</Name>
-				<Value>%(user)s</Value>
-			</Property>
-			<Property>
-				<Name>licenseServerProxyPassword</Name>
-				<Value>%(pwd)s</Value>
-			</Property>
\ No newline at end of file
diff --git a/2.Common_services/4.Wowza/Tune.xml b/2.Common_services/4.Wowza/Tune.xml
deleted file mode 100755
index efc6cf09..00000000
--- a/2.Common_services/4.Wowza/Tune.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<Root>
-	<Tune>
-		<!--
-			HeapSize
-
-			${com.wowza.wms.TuningHeapSizeProduction} - Assumes Wowza Streaming Engine is only application running on server
-			${com.wowza.wms.TuningHeapSizeDevelopment} - Assumes Wowza Streaming Engine is sharing resources with other applications
-
-			or specify heap size directly (ex: <HeapSize>8000M</HeapSize>)
-		-->
-		<HeapSize>2000M</HeapSize>
-
-		<!--
-			GarbageCollector
-
-			${com.wowza.wms.TuningGarbageCollectorConcurrentDefault} - Concurrent Collector
-			${com.wowza.wms.TuningGarbageCollectorG1Default} - G1 (Garbage First) Collector (recommended)
-
-			or specify custom GC settings directly (ex: <GarbageCollector>-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:NewSize=512m</GarbageCollector>)
-		-->
-		<GarbageCollector>${com.wowza.wms.TuningGarbageCollectorG1Default}</GarbageCollector>
-
-		<!--
-			VM Options - other VM startup options
-
-			${com.wowza.wms.AppHome} - Application home directory
-			${com.wowza.wms.StartupDateTime} - Date and time the server was started
-		-->
-		<VMOptions>
-			<VMOption>-server</VMOption>
-			<VMOption>-Djava.net.preferIPv4Stack=false</VMOption>
-			<!-- <VMOption>-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath="${com.wowza.wms.AppHome}/logs"</VMOption> -->
-			<!-- <VMOption>-Duser.language=en -Duser.country=US -Dfile.encoding=Cp1252</VMOption> -->
-			<!-- <VMOption>-verbose:gc -Xloggc:"${com.wowza.wms.AppHome}/logs/gc_${com.wowza.wms.StartupDateTime}.log" -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintHeapAtGC -XX:+PrintGCApplicationConcurrentTime -XX:+PrintGCApplicationStoppedTime</VMOption> -->
-		</VMOptions>
-	</Tune>
-</Root>
diff --git a/2.Common_services/5.Nginx/0_setup.py b/2.Common_services/5.Nginx/0_setup.py
deleted file mode 100644
index 802df276..00000000
--- a/2.Common_services/5.Nginx/0_setup.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import os
-import re
-
-import utils
-
-
-def setup(interactive=True):
-    cmds = [
-        'apt-get remove -y apache2',
-        'apt-get install -y nginx',
-        'rm -f /etc/nginx/sites-enabled/default',
-        'rm -f /etc/nginx/sites-enabled/default.conf',
-    ]
-    hosts = list()
-    need_uwsgi = False
-    # MediaServer vhost (mediaserver-msuser)
-    if os.path.exists('/home/msuser/msinstance'):
-        need_uwsgi = True
-        hosts.append(utils.get_conf('MS_SERVER_NAME') or 'mediaserver')
-    # Monitor vhost
-    if os.path.exists('/home/msmonitor/msmonitor'):
-        need_uwsgi = True
-        hosts.append(utils.get_conf('MONITOR_SERVER_NAME') or 'msmonitor')
-    # SkyReach vhost
-    if os.path.exists('/home/skyreach/htdocs'):
-        need_uwsgi = True
-        hosts.append(utils.get_conf('CM_SERVER_NAME') or 'mirismanager')
-    # TODO: get domains by parsing files in sites-enabled
-    if need_uwsgi:
-        cmds.append('apt-get install -y uwsgi uwsgi-plugin-python3')
-    utils.run_commands(cmds)
-    # Update hosts file
-    if hosts:
-        utils.add_hosts_to_localhost(hosts)
-    # Move ssl.conf
-    ssl_conf = '/etc/nginx/conf.d/ssl.conf'
-    if os.path.exists(ssl_conf):
-        utils.run_commands([
-            'grep ssl_certificate /etc/nginx/conf.d/ssl.conf > /etc/nginx/conf.d/ssl_certificate.conf',
-            'mv /etc/nginx/conf.d/ssl.conf /etc/nginx/conf.d/ssl.conf.old',
-        ])
-    # Update certificate in ssl_certificate.conf
-    ssl_conf = '/etc/nginx/conf.d/ssl_certificate.conf'
-    if not os.path.exists(ssl_conf):
-        utils.log('The SSL configuration file "%s" does not exist, SSL certificate not updated.' % ssl_conf)
-    else:
-        default_cert = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
-        ssl_cert = utils.get_conf('SSL_CERTIFICATE') or default_cert
-        ssl_key = utils.get_conf('SSL_CERTIFICATE_KEY') or '/etc/ssl/private/ssl-cert-snakeoil.key'
-        if ssl_cert == default_cert:
-            utils.log('The configuration uses the default certificate, no modification will be made in "%s".' % ssl_conf)
-        else:
-            with open(ssl_conf, 'r') as fo:
-                content = fo.read()
-            new_content = content
-            new_content = re.sub(r'ssl_certificate\s+([\w/\-\_\.]+);', 'ssl_certificate %s;' % ssl_cert, new_content)
-            new_content = re.sub(r'ssl_certificate_key\s+([\w/\-\_\.]+);', 'ssl_certificate_key %s;' % ssl_key, new_content)
-            if new_content != content:
-                with open(ssl_conf, 'w') as fo:
-                    fo.write(new_content)
-                utils.log('SSL configuration file "%s" updated.' % ssl_conf)
-            else:
-                utils.log('SSL configuration file "%s" already up to date.' % ssl_conf)
-    utils.run_commands(['nginx -t', 'service nginx restart'])
diff --git a/2.Common_services/6.Munin/0_setup.sh b/2.Common_services/6.Munin/0_setup.sh
deleted file mode 100755
index e53de4c4..00000000
--- a/2.Common_services/6.Munin/0_setup.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-source /root/envsetup/global-conf.sh
-
-# This script should be run after Nginx, MySQL and Wowza setup (if they should be installed)
-
-# install munin and configure it through ubicast-config
-if ( dpkg -s ubicast-config >/dev/null 2>&1 ); then
-	DEBIAN_FRONTEND=noninteractive apt-get install -y --reinstall ubicast-config
-else
-	DEBIAN_FRONTEND=noninteractive apt-get install -y ubicast-config
-fi
-
-# configure alerts
-# detect where to insert
-#LIG=$(grep -n contact.nagios.command /etc/munin/munin.conf | awk -F ":" '{print$1}')
-#LIG=$(( $LIG + 1 ))
-#head -n ${LIG} /etc/munin/munin.conf > /etc/munin/munin.conf.tmp
-
-#cat << EOF >> /etc/munin/munin.conf.tmp
-#contacts alert
-#contact.alert.command mail -a "From:sysadmin <sysadmin@ubicast.eu>" -s "[munin] Alert on ${var:host}" sysadmin@ubicast.eu
-#contact.alert.always_send warning critical
-
-#[${MS_SERVER_NAME}]
-#    address 127.0.0.1
-#    use_node_name yes
-#    memory.free.warning 512000000:
-#EOF
-
-#mv /etc/munin/munin.conf.tmp /etc/munin/munin.conf
-
-
-# new skin templates
-#cd /etc/munin
-#git clone https://panel.ubicast.eu/git/mediaserver/munin-monitoring-theme.git
-#mv /etc/munin/static /etc/munin/static.orig
-#mv /etc/munin/templates /etc/munin/templates.orig
-#cp -pr munin-monitoring-theme/templates/munstrap/static /etc/munin/
-#cp -pr munin-monitoring-theme/templates/munstrap/templates /etc/munin/
diff --git a/2.Common_services/7.LetsEncrypt/0_setup.py b/2.Common_services/7.LetsEncrypt/0_setup.py
deleted file mode 100644
index 047d9fd2..00000000
--- a/2.Common_services/7.LetsEncrypt/0_setup.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import os
-import re
-import subprocess
-
-import utils
-
-
-def setup(interactive=True):
-    # Move ssl.conf
-    ssl_conf = '/etc/nginx/conf.d/ssl.conf'
-    if os.path.exists(ssl_conf):
-        utils.run_commands([
-            'grep ssl_certificate /etc/nginx/conf.d/ssl.conf > /etc/nginx/conf.d/ssl_certificate.conf',
-            'mv /etc/nginx/conf.d/ssl.conf /etc/nginx/conf.d/ssl.conf.old',
-        ])
-    # Check if a custom SSL certificate is used
-    ssl_conf = '/etc/nginx/conf.d/ssl_certificate.conf'
-    if not os.path.exists(ssl_conf):
-        utils.log('The SSL configuration file "%s" does not exist, letsencrypt will not be used.' % ssl_conf)
-        return
-    default_cert = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
-    ssl_cert = utils.get_conf('SSL_CERTIFICATE') or default_cert
-    if ssl_cert != default_cert:
-        utils.log('The configuration does not use the default certificate, letsencrypt will not be used.')
-        return
-    # Install certbot
-    lsb_release = subprocess.run(['lsb_release', '-a'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
-    if b'Ubuntu' in lsb_release:
-        # Add repo if OS is Ubuntu
-        cmds = [
-            'apt-get update',
-            'apt-get install -y software-properties-common',
-            'add-apt-repository ppa:certbot/certbot -y',
-        ]
-    else:
-        cmds = []
-    cmds.extend([
-        'apt-get update',
-        'apt-get install -y certbot',
-    ])
-    utils.run_commands(cmds)
-    # Get system domains
-    domains = list()
-    nginx_dir = '/etc/nginx/sites-enabled'
-    names = os.listdir(nginx_dir)
-    names.sort()
-    for name in names:
-        path = os.path.join(nginx_dir, name)
-        with open(path, 'r') as fo:
-            vhost = fo.read()
-        vhost = re.sub(r'\s+', ' ', vhost)
-        matching = re.findall(r'[^#][ ]*server_name ([0-9a-zA-Z\.\-\_\ ]+);', vhost)
-        if not matching:
-            print('The server_name was not found in: "%s".' % path)
-            continue
-        matching = ' '.join(matching)
-        for domain in matching.strip().split(' '):
-            domain = domain.strip()
-            if domain and domain != 'localhost' and '.' in domain and domain not in domains:
-                domains.append(domain)
-    # Get certificates
-    cmds = [
-        'mkdir -p /tmp/letsencrypt',
-        'chmod 755 /tmp/letsencrypt',
-        'certbot certonly --agree-tos --no-eff-email --rsa-key-size 4096 --webroot --webroot-path /tmp/letsencrypt --domains "%s" --email sysadmin@ubicast.eu' % (','.join(domains)),
-    ]
-    utils.run_commands(cmds)
-    # Update Nginx configuration in ssl_certificate.conf
-    ssl_cert = '/etc/letsencrypt/live/%s/fullchain.pem' % domains[0]
-    ssl_key = '/etc/letsencrypt/live/%s/privkey.pem' % domains[0]
-    if not os.path.exists(ssl_cert):
-        raise Exception('The certificate file "%s" does not exist. Was it correclty created by the certbot command ? Has it been moved ? Take a look in "/etc/letsencrypt/live/" to see if it is in it.' % ssl_cert)
-    if not os.path.exists(ssl_key):
-        raise Exception('The key file "%s" does not exist. Was it correclty created by the certbot command ? Has it been moved ? Take a look in "/etc/letsencrypt/live/" to see if it is in it.' % ssl_key)
-    with open(ssl_conf, 'r') as fo:
-        content = fo.read()
-    new_content = content
-    new_content = re.sub(r'ssl_certificate\s+([\w/\-\_\.]+);', 'ssl_certificate %s;' % ssl_cert, new_content)
-    new_content = re.sub(r'ssl_certificate_key\s+([\w/\-\_\.]+);', 'ssl_certificate_key %s;' % ssl_key, new_content)
-    if new_content != content:
-        with open(ssl_conf, 'w') as fo:
-            fo.write(new_content)
-        utils.log('SSL configuration file "%s" updated.' % ssl_conf)
-    else:
-        utils.log('SSL configuration file "%s" already up to date.' % ssl_conf)
-    utils.run_commands(['nginx -t', 'systemctl restart nginx'])
-    # add pre and post certbot hooks
-    dir_path = utils.get_dir(__file__)
-    cmds = [
-        'cp %s/hook_mkdir.sh /etc/letsencrypt/renewal-hooks/pre/mkdir.sh' % dir_path,
-        'cp %s/hook_reload.sh /etc/letsencrypt/renewal-hooks/post/reload.sh' % dir_path,
-    ]
-    utils.run_commands(cmds)
diff --git a/2.Common_services/7.LetsEncrypt/hook_mkdir.sh b/2.Common_services/7.LetsEncrypt/hook_mkdir.sh
deleted file mode 100755
index 26e8553f..00000000
--- a/2.Common_services/7.LetsEncrypt/hook_mkdir.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env bash
-
-CERTBOT_DOCROOT=/tmp/letsencrypt
-
-if [ ! -d "$CERTBOT_DOCROOT" ]; then
-	mkdir -p "$CERTBOT_DOCROOT"
-	chmod 755 "$CERTBOT_DOCROOT"
-fi
-
-exit 0
diff --git a/2.Common_services/7.LetsEncrypt/hook_reload.sh b/2.Common_services/7.LetsEncrypt/hook_reload.sh
deleted file mode 100755
index 6d7592c4..00000000
--- a/2.Common_services/7.LetsEncrypt/hook_reload.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env bash
-
-if nginx -t > /dev/null 2>&1; then
-    systemctl reload nginx
-else
-    exit 1
-fi
-
-exit 0
diff --git a/2.Common_services/8.Fail2ban/0_setup.py b/2.Common_services/8.Fail2ban/0_setup.py
deleted file mode 100644
index b7a9421b..00000000
--- a/2.Common_services/8.Fail2ban/0_setup.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-
-import utils
-
-
-def setup(interactive=True):
-    # install fail2ban
-    utils.log('Install fail2ban')
-    cmds = [
-        'apt-get update',
-        'apt-get install -y --no-install-recommends fail2ban',
-    ]
-    utils.run_commands(cmds)
-    # configure fail2ban
-    utils.log('Configure fail2ban')
-    os.makedirs('/etc/fail2ban/filter.d', exist_ok=True)
-    os.makedirs('/etc/fail2ban/jail.d', exist_ok=True)
-    os.makedirs('/etc/fail2ban/action.d', exist_ok=True)
-    dir_path = utils.get_dir(__file__)
-    action = 'action_mwl' if utils.get_conf('FAIL2BAN_SEND_EMAIL', '') == '1' else 'action_'
-    sender = utils.get_conf('EMAIL_SENDER', 'root@localhost')
-    destemail = utils.get_conf('FAIL2BAN_DEST_EMAIL', '') or utils.get_conf('EMAIL_ADMINS', 'root@localhost')
-    maxretry = utils.get_conf('FAIL2BAN_MAXRETRY', '6')
-    bantime = utils.get_conf('FAIL2BAN_BANTIME', '30')
-    enabled = 'true' if utils.get_conf('FAIL2BAN_ENABLED', '0') == '1' else 'false'
-    cmds = [
-        'rm -f /etc/fail2ban/jail.d/campusmanager.conf',
-        dict(
-            line='write',
-            template='%s/filter.d/mediaserver.conf' % dir_path,
-            target='/etc/fail2ban/filter.d/mediaserver.conf'
-        ),
-    ]
-    # if mediaserver is installed
-    if os.path.exists("/home/msuser/mstmp/mediaserver.log") or os.path.exists("/home/ubicasttv/mstmp/mediaserver.log"):
-        cmds.append(dict(
-            line='write',
-            template='%s/jail.d/mediaserver.conf' % dir_path,
-            target='/etc/fail2ban/jail.d/mediaserver.conf',
-            params=(
-                ('{{ action }}', action),
-                ('{{ sender }}', sender),
-                ('{{ destemail }}', destemail),
-                ('{{ maxretry }}', maxretry),
-                ('{{ bantime }}', bantime),
-                ('{{ enabled }}', enabled),
-            )
-        ))
-    # if mirismanager is installed
-    if os.path.exists("/home/skyreach/.skyreach/logs/skyreach.log"):
-        cmds.append(dict(
-            line='write',
-            template='%s/jail.d/mirismanager.conf' % dir_path,
-            target='/etc/fail2ban/jail.d/mirismanager.conf',
-            params=(
-                ('{{ action }}', action),
-                ('{{ sender }}', sender),
-                ('{{ destemail }}', destemail),
-                ('{{ maxretry }}', maxretry),
-                ('{{ bantime }}', bantime),
-                ('{{ enabled }}', enabled),
-            )
-        ))
-    # if monitor is installed
-    if os.path.exists("/home/msmonitor/msmonitor/logs/site.log"):
-        cmds.append(dict(
-            line='write',
-            template='%s/jail.d/monitor.conf' % dir_path,
-            target='/etc/fail2ban/jail.d/monitor.conf',
-            params=(
-                ('{{ action }}', action),
-                ('{{ sender }}', sender),
-                ('{{ destemail }}', destemail),
-                ('{{ maxretry }}', maxretry),
-                ('{{ bantime }}', bantime),
-                ('{{ enabled }}', enabled),
-            )
-        ))
-    utils.run_commands(cmds)
-    # restart fail2ban
-    utils.log('Enable and restart fail2ban')
-    cmds = [
-        'systemctl enable fail2ban',
-        'systemctl restart fail2ban',
-    ]
-    utils.run_commands(cmds)
diff --git a/2.Common_services/8.Fail2ban/filter.d/mediaserver.conf b/2.Common_services/8.Fail2ban/filter.d/mediaserver.conf
deleted file mode 100644
index 508b3551..00000000
--- a/2.Common_services/8.Fail2ban/filter.d/mediaserver.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-# Fail2Ban filter for MediaServer
-
-[INCLUDES]
-
-before = common.conf
-after = mediaserver.local
-
-[Definition]
-
-failregex = INFO Wrong credentials given to login\. IP: <HOST>, username: \S+\.$
-            INFO Wrong crendentials given to login\. IP: <HOST>, username: \S+\.$
-
-ignoreregex =
diff --git a/2.Common_services/8.Fail2ban/jail.d/mediaserver.conf b/2.Common_services/8.Fail2ban/jail.d/mediaserver.conf
deleted file mode 100644
index cb12ccda..00000000
--- a/2.Common_services/8.Fail2ban/jail.d/mediaserver.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[mediaserver]
-enabled = {{ enabled }}
-filter = mediaserver
-maxretry = {{ maxretry }}
-bantime = {{ bantime }}
-logpath = /home/*/mstmp/mediaserver.log
-action = %({{ action }})s
-sender = {{ sender }}
-destemail = {{ destemail }}
diff --git a/2.Common_services/8.Fail2ban/jail.d/mirismanager.conf b/2.Common_services/8.Fail2ban/jail.d/mirismanager.conf
deleted file mode 100644
index 11a21642..00000000
--- a/2.Common_services/8.Fail2ban/jail.d/mirismanager.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[mirismanager]
-enabled = {{ enabled }}
-filter = mediaserver
-maxretry = {{ maxretry }}
-bantime = {{ bantime }}
-logpath = /home/skyreach/.skyreach/logs/skyreach.log
-action = %({{ action }})s
-sender = {{ sender }}
-destemail = {{ destemail }}
diff --git a/2.Common_services/8.Fail2ban/jail.d/monitor.conf b/2.Common_services/8.Fail2ban/jail.d/monitor.conf
deleted file mode 100644
index 386aef30..00000000
--- a/2.Common_services/8.Fail2ban/jail.d/monitor.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[monitor]
-enabled = {{ enabled }}
-filter = mediaserver
-maxretry = {{ maxretry }}
-bantime = {{ bantime }}
-logpath = /home/msmonitor/msmonitor/logs/site.log
-action = %({{ action }})s
-sender = {{ sender }}
-destemail = {{ destemail }}
diff --git a/20.Envsetup_dev/1.Shell_fake_action/0_setup.sh b/20.Envsetup_dev/1.Shell_fake_action/0_setup.sh
deleted file mode 100755
index 86dd1251..00000000
--- a/20.Envsetup_dev/1.Shell_fake_action/0_setup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-source ../../global-conf.sh
-
-echo "Working dir: \"$(pwd)\"."
-
-if test -z "${ENVSETUP_BRANCH}"; then
-	echo "Test failed, the configuration \"ENVSETUP_BRANCH\" is empty."
-	exit 1
-fi
-
-echo "Branch is \"${ENVSETUP_BRANCH}\"."
-echo -e "${GREEN}Test OK.${NC}"
diff --git a/20.Envsetup_dev/2.Python_fake_action/0_setup.py b/20.Envsetup_dev/2.Python_fake_action/0_setup.py
deleted file mode 100644
index b443d010..00000000
--- a/20.Envsetup_dev/2.Python_fake_action/0_setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import os
-
-import utils
-
-
-def setup(interactive=True):
-    # retrieve a configuration attribute to test it
-    utils.log('Working dir: "%s".' % os.getcwd())
-    branch = utils.get_conf('ENVSETUP_BRANCH')
-    utils.log('Branch is "%s".' % branch)
-    utils.success('Test OK.')
diff --git a/3.New_server_deployment/1.Download_envsetup_config/0_setup.py b/3.New_server_deployment/1.Download_envsetup_config/0_setup.py
deleted file mode 100644
index 5c6ffa13..00000000
--- a/3.New_server_deployment/1.Download_envsetup_config/0_setup.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import os
-import subprocess
-
-import utils
-
-
-def setup(interactive=True):
-    # Generate SSH key if not already done
-    if not os.path.exists('/root/.ssh/id_rsa'):
-        code, out = utils.exec_cmd('ssh-keygen -b 4096 -t rsa -f /root/.ssh/id_rsa -P ""')
-        if code != 0:
-            raise Exception('Failed to create SSH key: ssh-keygen returned code %s:\n%s' % (code, out))
-    with open('/root/.ssh/id_rsa.pub', 'r') as fo:
-        public_key = fo.read()
-    # Get requests module
-    if subprocess.run(['dpkg', '-s', 'python3-requests'], stdout=subprocess.DEVNULL).returncode != 0:
-        cmds = [
-            'apt-get update',
-            'apt-get install -y python3-requests',
-        ]
-        utils.run_commands(cmds)
-    import requests
-    # Check skyreach url
-    verify = utils.get_conf('SKYREACH_SSL_VERIFY') != '0'
-    sk_url = utils.get_conf('SKYREACH_HOST')
-    if not sk_url:
-        raise Exception('No URL defined to contact Panel / Skyreach.')
-    sk_url = 'https://' + sk_url
-    req = requests.head(sk_url, verify=verify, timeout=20)
-    if req.status_code not in (301, 302):
-        raise Exception('Unexpected response from "%s": code %s, should have been 301 or 302.' % (sk_url, req.status_code))
-    # Get conf using API key if already set or using an activation key
-    api_key = utils.get_conf('SKYREACH_API_KEY')
-    act_key = utils.get_conf('SKYREACH_ACTIVATION_KEY')
-    req = None
-    if act_key == 'no-dl':
-        utils.log('\033[1;34m The activation key is set to "no-dl", skipping configuration download. \033[0m')
-        return
-    elif api_key:
-        req = requests.post(sk_url + '/erp/credentials/envsetup-conf.sh', params=dict(api_key=api_key), data=dict(public_key=public_key), verify=verify, timeout=20)
-    elif act_key:
-        req = requests.post(sk_url + '/erp/credentials/envsetup-conf.sh', data=dict(key=act_key, public_key=public_key), verify=verify, timeout=20)
-    if req is None:
-        utils.log('\033[1;33m No activation key nor API key are set, skipping configuration download. \033[0m')
-        return
-    # Write conf
-    if req.status_code != 200:
-        if len(req.text) > 300:
-            with open('/tmp/envsetup-conf-dl.txt', 'w') as fo:
-                fo.write(req.text)
-            raise Exception('Request on "%s" failed with status %s. Full response content available in "/tmp/envsetup-conf-dl.txt".' % (req.url, req.status_code))
-        else:
-            raise Exception('Request on "%s" failed with status %s. Response: "%s".' % (req.url, req.status_code, req.text))
-    path = os.path.join(os.path.dirname(os.path.dirname(utils.get_dir(__file__))), 'auto-generated-conf.sh')
-    utils.log('Configuration path: %s' % path)
-    with open(path, 'w') as fo:
-        fo.write(req.text)
-    utils.log('Configuration written.')
-
-    utils.log('Comment activation key in conf.sh')
-    path = os.path.join(os.path.dirname(os.path.dirname(utils.get_dir(__file__))), 'conf.sh')
-    with open(path, 'r') as fo:
-        content = fo.read()
-    content = content.replace('SKYREACH_ACTIVATION_KEY', '#SKYREACH_ACTIVATION_KEY').replace('##SKYREACH_ACTIVATION_KEY', '#SKYREACH_ACTIVATION_KEY')
-    with open(path, 'w') as fo:
-        fo.write(content)
-
-    utils.log('Autogenerate empty conf.')
-    cmds = [
-        'bash fill_empty_conf.sh',
-    ]
-    utils.run_commands(cmds)
diff --git a/3.New_server_deployment/1.Download_envsetup_config/fill_empty_conf.sh b/3.New_server_deployment/1.Download_envsetup_config/fill_empty_conf.sh
deleted file mode 100755
index 8031be0c..00000000
--- a/3.New_server_deployment/1.Download_envsetup_config/fill_empty_conf.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-source ../../global-conf.sh
-
-if ( dpkg -s pwgen >/dev/null 2>&1 ); then
-	echo "The pwgen package is already installed."
-else
-	apt-get install -y -o Dpkg::Options::="--force-confold" pwgen
-fi
-
-conf_path="../../auto-generated-conf.sh"
-
-# Autogenerate missing values
-if [ "${MS_ID}" = "" ]; then
-	MS_ID=$(echo "$(hostname)_msuser")
-	if ( cat "$conf_path" | grep "MS_ID" >/dev/null ); then
-		sed -i "s@^MS_ID=.*@MS_ID='${MS_ID}'@" "$conf_path"
-	else
-		echo "MS_ID='${MS_ID}'" >> "$conf_path"
-	fi
-	echo -e "${YELLOW}The config MS_ID has been set. If you forgot to fill it before, please change the value in the envsetup configuration.${NC}"
-	sleep 3
-fi
-if [ "${MS_API_KEY}" = "" ]; then
-	MS_API_KEY=$(echo "s$(pwgen 4)-$(pwgen 5)-$(pwgen 5)-$(pwgen 5)-$(pwgen 5)")
-	# respect API pattern
-	MS_API_KEY=$(echo $MS_API_KEY | sed "s@[iloILO]@$((${RANDOM} / 10000))@g")
-	if ( cat "$conf_path" | grep "MS_API_KEY" >/dev/null ); then
-		sed -i "s@^MS_API_KEY=.*@MS_API_KEY='${MS_API_KEY}'@" "$conf_path"
-	else
-		echo "MS_API_KEY='${MS_API_KEY}'" >> "$conf_path"
-	fi
-	echo -e "${YELLOW}The config MS_API_KEY has been set. If you forgot to fill it before, please change the value in the envsetup configuration.${NC}"
-	sleep 3
-fi
-if [ "${MS_SECRET}" = "secret" ]; then
-	MS_SECRET=$(echo "$(pwgen 40)")
-	if ( cat "$conf_path" | grep "MS_SECRET" >/dev/null ); then
-		sed -i "s@^MS_SECRET=.*@MS_SECRET='${MS_SECRET}'@" "$conf_path"
-	else
-		echo "MS_SECRET='${MS_SECRET}'" >> "$conf_path"
-	fi
-	echo -e "${YELLOW}The config MS_SECRET has been set. If you forgot to fill it before, please change the value in the envsetup configuration.${NC}"
-	sleep 3
-fi
diff --git a/3.New_server_deployment/2.Proxy_settings/0_setup.py b/3.New_server_deployment/2.Proxy_settings/0_setup.py
deleted file mode 100644
index ef315c58..00000000
--- a/3.New_server_deployment/2.Proxy_settings/0_setup.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import os
-
-import utils
-
-
-def setup(interactive=True):
-    # Get conf
-    http_proxy = utils.get_conf('PROXY_HTTP')
-    https_proxy = utils.get_conf('PROXY_HTTPS')
-    no_proxy = utils.get_conf('PROXY_EXCLUDE')
-    # Environment
-    environment_path = '/etc/environment'
-    environment = 'PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games"\n'
-    if http_proxy:
-        environment += 'http_proxy="%s"\n' % http_proxy
-        environment += 'HTTP_PROXY="%s"\n' % http_proxy
-    if https_proxy:
-        environment += 'https_proxy="%s"\n' % https_proxy
-        environment += 'HTTPS_PROXY="%s"\n' % https_proxy
-    if http_proxy or https_proxy:
-        no_proxy = no_proxy + ',' if no_proxy else ''
-        no_proxy += 'localhost'
-        no_proxy += ',' + utils.get_conf('MS_SERVER_NAME', 'mediaserver')
-        no_proxy += ',' + utils.get_conf('MONITOR_SERVER_NAME', 'monitor')
-        no_proxy += ',' + utils.get_conf('CM_SERVER_NAME', 'mirismanager')
-        environment += 'no_proxy="%s"\n' % no_proxy
-        environment += 'NO_PROXY="%s"\n' % no_proxy
-    # apt
-    apt_proxy_path = '/etc/apt/apt.conf.d/proxy'
-    apt_proxy = ''
-    if http_proxy:
-        apt_proxy += 'Acquire::http::Proxy "%s";\n' % http_proxy
-    if https_proxy:
-        apt_proxy += 'Acquire::https::Proxy "%s";\n' % https_proxy
-
-    # write changes
-    files = (
-        (environment_path, environment),
-        (apt_proxy_path, apt_proxy),
-    )
-    for path, content in files:
-        if os.path.exists(path):
-            with open(path, 'r') as fo:
-                current = fo.read()
-        else:
-            current = ''
-        if current != content:
-            if content:
-                with open(path, 'w') as fo:
-                    fo.write(content)
-                utils.log('File "%s" updated.' % path)
-            else:
-                os.remove(path)
-                utils.log('File "%s" removed.' % path)
diff --git a/3.New_server_deployment/3.APT_upgrade/0_setup.sh b/3.New_server_deployment/3.APT_upgrade/0_setup.sh
deleted file mode 100755
index cc5d3611..00000000
--- a/3.New_server_deployment/3.APT_upgrade/0_setup.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-source /root/envsetup/global-conf.sh
-
-DEBIAN_FRONTEND=noninteractive
-export DEBIAN_FRONTEND
-
-apt-get update
-apt-get install -y apt-transport-https gnupg lsb-release
-
-# move 50unattended-upgrades conf if any
-if [ -f '/etc/apt/apt.conf.d/50unattended-upgrades.ucf-dist' ]; then
-	mv '/etc/apt/apt.conf.d/50unattended-upgrades.ucf-dist' '/etc/apt/apt.conf.d/50unattended-upgrades'
-fi
-
-# update sources.list
-if ( ! test -f '/etc/apt/sources.list.save' ); then
-	cp -a '/etc/apt/sources.list' '/etc/apt/sources.list.save'
-fi
-rm -f /etc/apt/sources.list~
-if ( lsb_release -a | grep 'Ubuntu' >/dev/null 2>&1 ); then
-	echo 'Updating /etc/apt/sources.list file.'
-	cp 'sources-ubu.list' '/etc/apt/sources.list'
-	if ( lsb_release -a | grep '14.04' >/dev/null 2>&1 ); then
-		sed -i 's@bionic@trusty@' /etc/apt/sources.list
-	elif ( lsb_release -a | grep '16.04' >/dev/null 2>&1 ); then
-		sed -i 's@bionic@xenial@' /etc/apt/sources.list
-	fi
-else
-	# Debian
-	cp 'sources-deb.list' '/etc/apt/sources.list'
-fi
-
-# modify sources.list to use ubicast cache
-if ( ! test -z ${APT_CACHE_URL} ); then
-	if ( ! grep "${APT_CACHE_URL}" /etc/apt/sources.list >/dev/null ); then
-		echo "Updating /etc/apt/sources.list to use cache ${APT_CACHE_URL}."
-		sed -i "s@http://@${APT_CACHE_URL}@" /etc/apt/sources.list
-	fi
-fi
-
-# update packages
-apt-get update
-apt-get install -f -y -o Dpkg::Options::="--force-confold"
-apt-get dist-upgrade -y -o Dpkg::Options::="--force-confold"
-apt-get install -y apt-transport-https
-apt-get autoremove -y
-
-# APT panel
-if ( ! test -z ${SKYREACH_APT_TOKEN} ); then
-	echo "Adding skyreach.list to APT sources."
-	wget -q "https://${SKYREACH_HOST}/media/public.gpg" -O- | apt-key add -
-	echo "deb https://${SKYREACH_HOST} packaging/apt/${SKYREACH_APT_TOKEN}/" > /etc/apt/sources.list.d/skyreach.list
-fi
-
-# update
-apt-get update
-apt-get dist-upgrade -y -o Dpkg::Options::="--force-confold"
-
-# unattended-upgrades
-apt-get install -y unattended-upgrades
-
-# remove old kernels
-echo 'Unattended-Upgrade::Remove-Unused-Dependencies "true";' >> /etc/apt/apt.conf.d/50unattended-upgrades
diff --git a/3.New_server_deployment/3.APT_upgrade/sources-deb.list b/3.New_server_deployment/3.APT_upgrade/sources-deb.list
deleted file mode 100644
index 09940822..00000000
--- a/3.New_server_deployment/3.APT_upgrade/sources-deb.list
+++ /dev/null
@@ -1,5 +0,0 @@
-deb http://ftp.debian.org/debian buster main contrib non-free
-
-deb http://ftp.debian.org/debian buster-updates main contrib non-free
-
-deb http://security.debian.org buster/updates main contrib non-free
diff --git a/3.New_server_deployment/3.APT_upgrade/sources-ubu.list b/3.New_server_deployment/3.APT_upgrade/sources-ubu.list
deleted file mode 100644
index cc10e6e4..00000000
--- a/3.New_server_deployment/3.APT_upgrade/sources-ubu.list
+++ /dev/null
@@ -1,4 +0,0 @@
-deb http://archive.ubuntu.com/ubuntu/ bionic           main restricted universe multiverse
-deb http://archive.ubuntu.com/ubuntu/ bionic-updates   main restricted universe multiverse
-deb http://archive.ubuntu.com/ubuntu/ bionic-backports main restricted universe multiverse
-deb http://security.ubuntu.com/ubuntu bionic-security  main restricted universe multiverse
diff --git a/4.Monitor/1.Install_monitor/0_setup.sh b/4.Monitor/1.Install_monitor/0_setup.sh
deleted file mode 100755
index 117294b8..00000000
--- a/4.Monitor/1.Install_monitor/0_setup.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-source /root/envsetup/global-conf.sh
-
-# MS package install
-if ( dpkg -s ubicast-monitor-runtime >/dev/null 2>&1 ); then
-	DEBIAN_FRONTEND=noninteractive apt-get install -y --reinstall ubicast-monitor ubicast-monitor-runtime
-else
-	DEBIAN_FRONTEND=noninteractive apt-get install -y ubicast-monitor-runtime
-fi
-
-if [[ "${MONITOR_SHELL_PWD}" != "" ]]; then
-	echo -e "${MONITOR_SHELL_PWD}\n${MONITOR_SHELL_PWD}" | passwd -q msmonitor
-	echo -e "\033[1;33mThe msmonitor account password has been set.\033[0m"
-fi
-
-# configure nginx
-if [[ "${MONITOR_SERVER_NAME}" != "" ]]; then
-	/root/envsetup/set_app_domain.py mon $MONITOR_SERVER_NAME
-fi
-nginx -t
-service nginx restart
-
-systemctl enable msmonitor
diff --git a/5.MediaServer/1.Install_MediaServer/0_setup.sh b/5.MediaServer/1.Install_MediaServer/0_setup.sh
deleted file mode 100755
index 8a749556..00000000
--- a/5.MediaServer/1.Install_MediaServer/0_setup.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-source /root/envsetup/global-conf.sh
-
-# MS package install
-if ( dpkg -s ubicast-mediaserver-runtime >/dev/null 2>&1 ); then
-	DEBIAN_FRONTEND=noninteractive apt-get install -y --reinstall ubicast-mediaserver ubicast-mediaserver-runtime
-else
-	DEBIAN_FRONTEND=noninteractive apt-get install -y ubicast-mediaserver-runtime
-fi
-
-# set sender address
-sender="${EMAIL_SENDER}"
-if [[ "${sender}" == "" || "${sender}" == "noreply@ubicast.eu" ]]; then
-	if [[ "${MS_SERVER_NAME}" =~ .*\..* ]]; then
-		sender="noreply@${MS_SERVER_NAME}"
-	fi
-fi
-if [[ "${sender}" != "" && "${sender}" != "noreply@ubicast.eu" ]]; then
-	echo "Using ${sender} as sender address for MS."
-	sed -i "s?[# ]*DEFAULT_FROM_EMAIL.*?DEFAULT_FROM_EMAIL = '${sender}'?" /etc/mediaserver/msconf.py
-fi
-
-# MS instance
-msinstaller.py msuser
-
-systemctl enable mediaserver
diff --git a/5.MediaServer/2.Bench_tools/0_setup.sh b/5.MediaServer/2.Bench_tools/0_setup.sh
deleted file mode 100644
index 6f173e96..00000000
--- a/5.MediaServer/2.Bench_tools/0_setup.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-DEBIAN_FRONTEND=noninteractive apt-get install -y python-mediaserver-benchmark
diff --git a/6.Miris_Manager/1.Install_Miris_Manager/0_setup.sh b/6.Miris_Manager/1.Install_Miris_Manager/0_setup.sh
deleted file mode 100755
index a40d19b8..00000000
--- a/6.Miris_Manager/1.Install_Miris_Manager/0_setup.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-source /root/envsetup/global-conf.sh
-
-if ( apt-cache madison ubicast-skyreach-runtime | grep "ubicast-skyreach-runtime | " ); then
-	echo "The ubicast-skyreach-runtime package is available in APT repositories."
-else
-	echo "${YELLOW}The ubicast-skyreach-runtime package is not available in APT repositories, install skipped.${NC}"
-	exit 0
-fi
-
-# install cm
-if ( dpkg -s ubicast-skyreach-runtime >/dev/null 2>&1 ); then
-	DEBIAN_FRONTEND=noninteractive apt-get install -y --reinstall ubicast-skyreach ubicast-skyreach-runtime
-else
-	DEBIAN_FRONTEND=noninteractive apt-get install -y ubicast-skyreach-runtime
-fi
-# set sender address
-sender="${EMAIL_SENDER}"
-if [[ "${sender}" == "" || "${sender}" == "noreply@ubicast.eu" ]]; then
-	if [[ "${CM_SERVER_NAME}" =~ .*\..* ]]; then
-		sender="noreply@${CM_SERVER_NAME}"
-	fi
-fi
-if [[ "${sender}" != "" && "${sender}" != "noreply@ubicast.eu" ]]; then
-	echo "Using ${sender} as sender address for CM."
-	if grep "DEFAULT_FROM_EMAIL" /home/skyreach/htdocs/skyreach_site/settings_override.py >/dev/null; then
-		sed -i "s?[# ]*DEFAULT_FROM_EMAIL.*?DEFAULT_FROM_EMAIL = '${sender}'?" /home/skyreach/htdocs/skyreach_site/settings_override.py
-	else
-		echo "DEFAULT_FROM_EMAIL = '${sender}'" >> /home/skyreach/htdocs/skyreach_site/settings_override.py
-	fi
-fi
-
-# configure nginx
-if [[ "${CM_SERVER_NAME}" != "" ]]; then
-	/root/envsetup/set_app_domain.py mm $CM_SERVER_NAME
-fi
-nginx -t
-service nginx restart
-
-systemctl enable skyreach
diff --git a/6.Miris_Manager/2.Configure_apt_cacher_ng/0_setup.py b/6.Miris_Manager/2.Configure_apt_cacher_ng/0_setup.py
deleted file mode 100644
index 07a3b156..00000000
--- a/6.Miris_Manager/2.Configure_apt_cacher_ng/0_setup.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import os
-import re
-
-import utils
-
-
-def setup(interactive=True):
-    http_proxy = utils.get_conf('PROXY_HTTP')
-    # apt-cacher-ng
-    path = '/etc/apt-cacher-ng/acng.conf'
-    if os.path.exists(path):
-        with open(path, 'r') as fo:
-            current_conf = fo.read()
-        new_conf = re.sub(r'\nProxy:.*', '', current_conf).strip() + '\n\n'
-        if http_proxy:
-            new_conf += 'Proxy: %s\n' % http_proxy
-        if current_conf != new_conf:
-            with open(path, 'w') as fo:
-                fo.write(new_conf)
-            utils.log('File "%s" updated.' % path)
-        else:
-            utils.log('File "%s" already up to date.' % path)
-        cmds = [
-            'service apt-cacher-ng restart',
-        ]
-        utils.run_commands(cmds)
-    else:
-        utils.log('The configuration file "%s" does not exist, nothing to do.' % path)
diff --git a/7.MediaWorker/1.Celerity_server/0_setup.py b/7.MediaWorker/1.Celerity_server/0_setup.py
deleted file mode 100644
index 719b72ed..00000000
--- a/7.MediaWorker/1.Celerity_server/0_setup.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import utils
-
-
-def setup(interactive=True):
-    dir_path = utils.get_dir(__file__)
-    cmds = [
-        'apt-get install --yes celerity-server',
-        dict(line='write', template='%s/celerity-config.py' % dir_path, target='/etc/celerity/config.py', params=(
-            ('{{ signing_key }}', utils.get_conf('CELERITY_SIGNING_KEY', 'undefined')),
-            ('{{ MS_SERVER_NAME }}', utils.get_conf('MS_SERVER_NAME', 'undefined')),
-            ('{{ MS_ID }}', utils.get_conf('MS_ID', 'MS_ID')),
-            ('{{ MS_API_KEY }}', utils.get_conf('MS_API_KEY', 'MS_API_KEY')),
-        )),
-        'service celerity-server restart',
-    ]
-    utils.run_commands(cmds)
diff --git a/7.MediaWorker/1.Celerity_server/celerity-config.py b/7.MediaWorker/1.Celerity_server/celerity-config.py
deleted file mode 100644
index 1c1b518c..00000000
--- a/7.MediaWorker/1.Celerity_server/celerity-config.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-SIGNING_KEY = '{{ signing_key }}'
-SERVER_URL = 'https://{{ MS_SERVER_NAME }}:6200'
-
-# WORKERS_COUNT = 2
-
-# MediaServer interactions
-MEDIASERVERS = {
-    '{{ MS_ID }}': {'url': 'https://{{ MS_SERVER_NAME }}', 'api_key': '{{ MS_API_KEY }}'},
-}
diff --git a/7.MediaWorker/2.Celerity_workers/0_setup.py b/7.MediaWorker/2.Celerity_workers/0_setup.py
deleted file mode 100644
index 1b2abcaf..00000000
--- a/7.MediaWorker/2.Celerity_workers/0_setup.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import utils
-
-
-def setup(interactive=True):
-    dir_path = utils.get_dir(__file__)
-    cmds = [
-        'apt-get install --yes celerity-workers',
-        dict(line='write', template='%s/celerity-config.py' % dir_path, target='/etc/celerity/config.py', params=(
-            ('{{ signing_key }}', utils.get_conf('CELERITY_SIGNING_KEY', 'undefined')),
-            ('{{ MS_SERVER_NAME }}', utils.get_conf('MS_SERVER_NAME', 'undefined')),
-            ('{{ MS_ID }}', utils.get_conf('MS_ID', 'MS_ID')),
-            ('{{ MS_API_KEY }}', utils.get_conf('MS_API_KEY', 'MS_API_KEY')),
-        )),
-        'service celerity-workers restart',
-    ]
-    utils.run_commands(cmds)
diff --git a/7.MediaWorker/2.Celerity_workers/celerity-config.py b/7.MediaWorker/2.Celerity_workers/celerity-config.py
deleted file mode 100644
index 1c1b518c..00000000
--- a/7.MediaWorker/2.Celerity_workers/celerity-config.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-SIGNING_KEY = '{{ signing_key }}'
-SERVER_URL = 'https://{{ MS_SERVER_NAME }}:6200'
-
-# WORKERS_COUNT = 2
-
-# MediaServer interactions
-MEDIASERVERS = {
-    '{{ MS_ID }}': {'url': 'https://{{ MS_SERVER_NAME }}', 'api_key': '{{ MS_API_KEY }}'},
-}
diff --git a/8.MediaCache/1.Install_cache/0_setup.py b/8.MediaCache/1.Install_cache/0_setup.py
deleted file mode 100644
index a12e66f7..00000000
--- a/8.MediaCache/1.Install_cache/0_setup.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import utils
-
-
-def setup(interactive=True):
-    dir_path = utils.get_dir(__file__)
-    server_name = utils.get_conf('CACHE_SERVER_NAME') or 'cache'
-    source_server = utils.get_conf('CACHE_SOURCE') or 'http://undefined'
-    cmds = [
-        'apt-get remove -y apache2',
-        'apt-get install -y nginx',
-        'mkdir -p /var/www/cache',
-        'cp %s/index.html /var/www/cache/index.html' % dir_path,
-        'cp %s/crossdomain.xml /var/www/cache/crossdomain.xml' % dir_path,
-        'cp %s/nginx-limits.conf /etc/security/limits.d/nginx.conf' % dir_path,
-        'cp %s/vhost_cache.conf /etc/nginx/sites-available/cache.conf' % dir_path,
-        'sed -i "s@server_name cache;@server_name %s;@" "/etc/nginx/sites-available/cache.conf"' % server_name,
-        'sed -i "s@proxy_pass http://undefined;@proxy_pass %s;@" "/etc/nginx/sites-available/cache.conf"' % source_server,
-        'ln -sfn ../sites-available/cache.conf /etc/nginx/sites-enabled/cache.conf',
-    ]
-    utils.run_commands(cmds)
-    utils.add_hosts_to_localhost([server_name])
diff --git a/8.MediaCache/1.Install_cache/crossdomain.xml b/8.MediaCache/1.Install_cache/crossdomain.xml
deleted file mode 100644
index ee662133..00000000
--- a/8.MediaCache/1.Install_cache/crossdomain.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE cross-domain-policy SYSTEM "http://www.adobe.com/xml/dtds/cross-domain-policy.dtd">
-<cross-domain-policy>
-	<allow-access-from domain="*" secure="false"/>
-	<site-control permitted-cross-domain-policies="all"/>
-</cross-domain-policy>
diff --git a/8.MediaCache/1.Install_cache/index.html b/8.MediaCache/1.Install_cache/index.html
deleted file mode 100644
index 2881bde2..00000000
--- a/8.MediaCache/1.Install_cache/index.html
+++ /dev/null
@@ -1,17 +0,0 @@
-<!DOCTYPE html>
-<html xmlns="http://www.w3.org/1999/xhtml">
-	<head>
-		<title>UbiCast cache server</title>
-		<style>
-			html { background: #222; color: #ddd; }
-			body { margin: 0 auto; max-width: 500px; }
-			a { color: #5cf; text-decoration: none; }
-			a:hover { text-decoration: underline; }
-		</style>
-	</head>
-	<body>
-		<h1>UbiCast cache server</h1>
-		<hr/>
-		<p>Powered by UbiCast -- <a href="https://www.ubicast.eu">https://www.ubicast.eu</a></p>
-	</body>
-</html>
diff --git a/8.MediaCache/1.Install_cache/nginx-limits.conf b/8.MediaCache/1.Install_cache/nginx-limits.conf
deleted file mode 100644
index d0f36474..00000000
--- a/8.MediaCache/1.Install_cache/nginx-limits.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-nginx soft nofile 20000
-nginx hard nofile 40000
diff --git a/8.MediaCache/1.Install_cache/vhost_cache.conf b/8.MediaCache/1.Install_cache/vhost_cache.conf
deleted file mode 100644
index 0f06debc..00000000
--- a/8.MediaCache/1.Install_cache/vhost_cache.conf
+++ /dev/null
@@ -1,63 +0,0 @@
-proxy_cache_path /tmp/nginx-uc-cache levels=1:2 keys_zone=uc-cache:10m max_size=10g inactive=300s;
-
-log_format cache '$remote_addr - $host [$time_local] "$request" $status '
-	'$body_bytes_sent "$http_referer" '
-	'rt=$request_time ut="$upstream_response_time" '
-	'cs=$upstream_cache_status';
-
-server {
-	listen 80 default_server;
-	listen 443 default_server ssl;
-	server_name cache;
-
-	root /var/www/cache/;
-
-	access_log /var/log/nginx/access_cache.log cache;
-	error_log /var/log/nginx/error_cache.log;
-
-	location /.well-known/acme-challenge {
-		default_type "text/plain";
-		root /tmp/letsencrypt;
-	}
-
-	location /crossdomain {
-	}
-
-	location /streaming/ {
-		# Live; expiration headers are defined by upstream (nginx/wowza)
-		rewrite ^/(.*)$ /$1? break;
-		proxy_pass http://undefined;
-		proxy_cache uc-cache;
-		# do not consider secure urls as new files
-		proxy_cache_key $scheme$proxy_host$uri;
-		# only one request at a time will be allowed to populate a new cache element
-		proxy_cache_lock on;
-		# hide upstream X-Cache header
-		proxy_hide_header X-Cache;
-		# add own X-Cache header
-		add_header X-Cache $upstream_cache_status;
-	}
-	location /resources/ {
-		# VOD
-		location ~ \.(m3u8|ts|mp4|mp3|webm|oga|ogv|ogg|mov|flv)$ {
-			rewrite ^/(.*)$ /$1? break;
-			proxy_pass http://undefined;
-			proxy_cache uc-cache;
-			# do not consider secure urls as new files
-			proxy_cache_key $scheme$proxy_host$uri;
-			# only one request at a time will be allowed to populate a new cache element
-			proxy_cache_lock on;
-			# how long should the data be kept in the cache
-			proxy_cache_valid 200 7d;
-			# instruct browser to cache this
-			expires 7d;
-			# headers
-			proxy_ignore_headers "Cache-Control" "X-Accel-Expires" "Expires";
-			add_header X-Cache $upstream_cache_status;
-		}
-	}
-	location / {
-		# only urls to video and audio files are allowed, discard any requested path for other urls
-		rewrite ^/(.*)$ /index.html? break;
-	}
-}
diff --git a/8.MediaCache/2.Install_ferm/0_setup.sh b/8.MediaCache/2.Install_ferm/0_setup.sh
deleted file mode 100755
index 2e701f46..00000000
--- a/8.MediaCache/2.Install_ferm/0_setup.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-source /root/envsetup/global-conf.sh
-
-DEBIAN_FRONTEND=noninteractive apt-get install -y ferm
-cp ferm.conf /etc/ferm/ferm.conf
-service ferm restart
diff --git a/8.MediaCache/2.Install_ferm/ferm.conf b/8.MediaCache/2.Install_ferm/ferm.conf
deleted file mode 100644
index 5f248a50..00000000
--- a/8.MediaCache/2.Install_ferm/ferm.conf
+++ /dev/null
@@ -1,43 +0,0 @@
-# -*- shell-script -*-
-#
-#  Configuration file for ferm(1).
-#
-
-table filter {
-    chain INPUT {
-        policy DROP;
-
-        # connection tracking
-        mod state state INVALID DROP;
-        mod state state (ESTABLISHED RELATED) ACCEPT;
-
-        # allow local packet
-        interface lo ACCEPT;
-
-        # respond to ping
-        proto icmp ACCEPT; 
-
-        # allow SSH connections
-        proto tcp dport ssh ACCEPT;
-
-        # http https
-        proto tcp dport (http https) ACCEPT;
-
-        # snmp
-        proto udp dport snmp ACCEPT;
-    }
-    chain OUTPUT {
-        policy ACCEPT;
-
-        # connection tracking
-        #mod state state INVALID DROP;
-        mod state state (ESTABLISHED RELATED) ACCEPT;
-    }
-    chain FORWARD {
-        policy DROP;
-
-        # connection tracking
-        mod state state INVALID DROP;
-        mod state state (ESTABLISHED RELATED) ACCEPT;
-    }
-}
diff --git a/9.MediaVault/1.Install_MediaVault/0_setup.py b/9.MediaVault/1.Install_MediaVault/0_setup.py
deleted file mode 100644
index 889d675b..00000000
--- a/9.MediaVault/1.Install_MediaVault/0_setup.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2017, Florent Thiery
-import utils
-import socket
-import os
-
-
-def setup(interactive=True):
-    CROND_TEMPLATE = '''# https://github.com/laurent22/rsync-time-backup
-0 22 * * * root /usr/local/sbin/rsync_tmbackup.sh /etc root@{backup_server}:/backup/{hostname}/etc /etc/backup/excluded_patterns.txt
-0 22 * * * root flock -n /etc/backup/home_backup.lock /usr/local/sbin/rsync_tmbackup.sh /home root@{backup_server}:/backup/{hostname}/home /etc/backup/excluded_patterns.txt'''
-    if os.path.exists('/data'):
-        CROND_TEMPLATE += '\n0 22 * * * root flock -n /etc/backup/data_backup.lock /usr/local/sbin/rsync_tmbackup.sh /data root@{backup_server}:/backup/{hostname}/data /etc/backup/excluded_patterns.txt'
-
-    # crontab files can only contain lowercase and dashes, no _ or .
-    cmds = [
-        'cp rsync_tmbackup.sh /usr/local/sbin',
-        'mkdir -p /etc/backup',
-        'cp excluded_patterns.txt /etc/backup',
-        'cp /tmp/backup.cron /etc/cron.d/backup',
-    ]
-
-    server = utils.get_conf('BACKUP_SERVER', '')
-    if server:
-        if os.path.exists('/etc/cron.d/backup.cron'):
-            print('/etc/cron.d/backup.cron already here, skipping install')
-        else:
-            with open('/tmp/backup.cron', 'w') as f:
-                CROND = CROND_TEMPLATE.format(backup_server=server, hostname=socket.gethostname())
-                f.write(CROND)
-            utils.run_commands(cmds)
-    else:
-        print('Missing BACKUP_SERVER in conf')
diff --git a/9.MediaVault/1.Install_MediaVault/README b/9.MediaVault/1.Install_MediaVault/README
deleted file mode 100644
index 4afa9d3f..00000000
--- a/9.MediaVault/1.Install_MediaVault/README
+++ /dev/null
@@ -1,3 +0,0 @@
-The script comes from https://github.com/laurent22/rsync-time-backup/blob/master/rsync_tmbackup.sh
-
-The only changes are that we remove the --compress flag (video does not compress well) and add --timeout 30
diff --git a/9.MediaVault/1.Install_MediaVault/excluded_patterns.txt b/9.MediaVault/1.Install_MediaVault/excluded_patterns.txt
deleted file mode 100644
index 1edfb6d4..00000000
--- a/9.MediaVault/1.Install_MediaVault/excluded_patterns.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-- .zfs/
-- *.log
-- *.pyc
-- *.swp
-- *.pid
-- *chunked_*/
-- __pycache__/
-- apt-cacher-ng/
-- *.lock
-- .nfs*
-- *.m3u8
-- *.ts
diff --git a/9.MediaVault/1.Install_MediaVault/rsync_tmbackup.sh b/9.MediaVault/1.Install_MediaVault/rsync_tmbackup.sh
deleted file mode 100644
index 8b625b4f..00000000
--- a/9.MediaVault/1.Install_MediaVault/rsync_tmbackup.sh
+++ /dev/null
@@ -1,511 +0,0 @@
-#!/usr/bin/env bash
-
-APPNAME=$(basename $0 | sed "s/\.sh$//")
-
-# -----------------------------------------------------------------------------
-# Log functions
-# -----------------------------------------------------------------------------
-
-fn_log_info()  { echo "$APPNAME: $1"; }
-fn_log_warn()  { echo "$APPNAME: [WARNING] $1" 1>&2; }
-fn_log_error() { echo "$APPNAME: [ERROR] $1" 1>&2; }
-fn_log_info_cmd()  {
-	if [ -n "$SSH_DEST_FOLDER_PREFIX" ]; then
-		echo "$APPNAME: $SSH_CMD '$1'";
-	else
-		echo "$APPNAME: $1";
-	fi
-}
-
-# -----------------------------------------------------------------------------
-# Make sure everything really stops when CTRL+C is pressed
-# -----------------------------------------------------------------------------
-
-fn_terminate_script() {
-	fn_log_info "SIGINT caught."
-	exit 1
-}
-
-trap 'fn_terminate_script' SIGINT
-
-# -----------------------------------------------------------------------------
-# Small utility functions for reducing code duplication
-# -----------------------------------------------------------------------------
-fn_display_usage() {
-	echo "Usage: $(basename $0) [OPTION]... <[USER@HOST:]SOURCE> <[USER@HOST:]DESTINATION> [exclude-pattern-file]"
-	echo ""
-	echo "Options"
-	echo " -p, --port           SSH port."
-	echo " -h, --help           Display this help message."
-	echo " --rsync-get-flags    Display the default rsync flags that are used for backup."
-	echo " --rsync-set-flags    Set the rsync flags that are going to be used for backup."
-	echo " --log-dir            Set the log file directory. If this flag is set, generated files will"
-	echo "                      not be managed by the script - in particular they will not be"
-	echo "                      automatically deleted."
-	echo "                      Default: $LOG_DIR"
-	echo " --strategy           Set the expiration strategy. Default: \"1:1 30:7 365:30\" means after one"
-	echo "                      day, keep one backup per day. After 30 days, keep one backup every 7 days."
-	echo "                      After 365 days keep one backup every 30 days."
-	echo " --no-auto-expire     Disable automatically deleting backups when out of space. Instead an error"
-	echo "                      is logged, and the backup is aborted."
-	echo ""
-	echo "For more detailed help, please see the README file:"
-	echo ""
-	echo "https://github.com/laurent22/rsync-time-backup/blob/master/README.md"
-}
-
-fn_parse_date() {
-	# Converts YYYY-MM-DD-HHMMSS to YYYY-MM-DD HH:MM:SS and then to Unix Epoch.
-	case "$OSTYPE" in
-		linux*) date -d "${1:0:10} ${1:11:2}:${1:13:2}:${1:15:2}" +%s ;;
-		cygwin*) date -d "${1:0:10} ${1:11:2}:${1:13:2}:${1:15:2}" +%s ;;
-		darwin8*) yy=`expr ${1:0:4}`
-			mm=`expr ${1:5:2} - 1`
-			dd=`expr ${1:8:2}`
-			hh=`expr ${1:11:2}`
-			mi=`expr ${1:13:2}`
-			ss=`expr ${1:15:2}`
-			# Because under MacOS X Tiger 'date -j' doesn't work, we do this:
-			perl -e 'use Time::Local; print timelocal('$ss','$mi','$hh','$dd','$mm','$yy'),"\n";' ;;
-		darwin*) date -j -f "%Y-%m-%d-%H%M%S" "$1" "+%s" ;;
-		FreeBSD*) date -j -f "%Y-%m-%d-%H%M%S" "$1" "+%s" ;;
-	esac
-}
-
-fn_find_backups() {
-	fn_run_cmd "find "$DEST_FOLDER/" -maxdepth 1 -type d -name \"????-??-??-??????\" -prune | sort -r"
-}
-
-fn_expire_backup() {
-	# Double-check that we're on a backup destination to be completely
-	# sure we're deleting the right folder
-	if [ -z "$(fn_find_backup_marker "$(dirname -- "$1")")" ]; then
-		fn_log_error "$1 is not on a backup destination - aborting."
-		exit 1
-	fi
-
-	fn_log_info "Expiring $1"
-	fn_rm_dir "$1"
-}
-
-fn_expire_backups() {
-	local current_timestamp=$EPOCH
-	local last_kept_timestamp=9999999999
-
-	# Process each backup dir from most recent to oldest
-	for backup_dir in $(fn_find_backups | sort -r); do
-		local backup_date=$(basename "$backup_dir")
-		local backup_timestamp=$(fn_parse_date "$backup_date")
-
-		# Skip if failed to parse date...
-		if [ -z "$backup_timestamp" ]; then
-			fn_log_warn "Could not parse date: $backup_dir"
-			continue
-		fi
-
-		# Find which strategy token applies to this particular backup
-		for strategy_token in $(echo $EXPIRATION_STRATEGY | tr " " "\n" | sort -r -n); do
-			IFS=':' read -r -a t <<< "$strategy_token"
-
-			# After which date (relative to today) this token applies (X)
-			local cut_off_timestamp=$((current_timestamp - ${t[0]} * 86400))
-
-			# Every how many days should a backup be kept past the cut off date (Y)
-			local cut_off_interval=$((${t[1]} * 86400))
-
-			# If we've found the strategy token that applies to this backup
-			if [ "$backup_timestamp" -le "$cut_off_timestamp" ]; then
-
-				# Special case: if Y is "0" we delete every time
-				if [ $cut_off_interval -eq "0" ]; then
-					fn_expire_backup "$backup_dir"
-					break
-				fi
-
-				# Check if the current backup is in the interval between
-				# the last backup that was kept and Y
-				local interval_since_last_kept=$((last_kept_timestamp - backup_timestamp))
-				if [ "$interval_since_last_kept" -lt "$cut_off_interval" ]; then
-					# Yes: Delete that one
-					fn_expire_backup "$backup_dir"
-				else
-					# No: Keep it
-					last_kept_timestamp=$backup_timestamp
-				fi
-				break
-			fi
-		done
-	done
-}
-
-fn_parse_ssh() {
-	# To keep compatibility with bash version < 3, we use grep
-	if echo "$DEST_FOLDER"|grep -Eq '^[A-Za-z0-9\._%\+\-]+@[A-Za-z0-9.\-]+\:.+$'
-	then
-		SSH_USER=$(echo "$DEST_FOLDER" | sed -E  's/^([A-Za-z0-9\._%\+\-]+)@([A-Za-z0-9.\-]+)\:(.+)$/\1/')
-		SSH_HOST=$(echo "$DEST_FOLDER" | sed -E  's/^([A-Za-z0-9\._%\+\-]+)@([A-Za-z0-9.\-]+)\:(.+)$/\2/')
-		SSH_DEST_FOLDER=$(echo "$DEST_FOLDER" | sed -E  's/^([A-Za-z0-9\._%\+\-]+)@([A-Za-z0-9.\-]+)\:(.+)$/\3/')
-		SSH_CMD="ssh -p $SSH_PORT ${SSH_USER}@${SSH_HOST}"
-		SSH_DEST_FOLDER_PREFIX="${SSH_USER}@${SSH_HOST}:"
-	elif echo "$SRC_FOLDER"|grep -Eq '^[A-Za-z0-9\._%\+\-]+@[A-Za-z0-9.\-]+\:.+$'
-	then
-		SSH_USER=$(echo "$SRC_FOLDER" | sed -E  's/^([A-Za-z0-9\._%\+\-]+)@([A-Za-z0-9.\-]+)\:(.+)$/\1/')
-		SSH_HOST=$(echo "$SRC_FOLDER" | sed -E  's/^([A-Za-z0-9\._%\+\-]+)@([A-Za-z0-9.\-]+)\:(.+)$/\2/')
-		SSH_SRC_FOLDER=$(echo "$SRC_FOLDER" | sed -E  's/^([A-Za-z0-9\._%\+\-]+)@([A-Za-z0-9.\-]+)\:(.+)$/\3/')
-		SSH_CMD="ssh -p $SSH_PORT ${SSH_USER}@${SSH_HOST}"
-		SSH_SRC_FOLDER_PREFIX="${SSH_USER}@${SSH_HOST}:"
-	fi
-}
-
-fn_run_cmd() {
-	if [ -n "$SSH_DEST_FOLDER_PREFIX" ]
-	then
-		eval "$SSH_CMD '$1'"
-	else
-		eval $1
-	fi
-}
-
-fn_find() {
-	fn_run_cmd "find '$1'"  2>/dev/null
-}
-
-fn_get_absolute_path() {
-	fn_run_cmd "cd '$1';pwd"
-}
-
-fn_mkdir() {
-	fn_run_cmd "mkdir -p -- '$1'"
-}
-
-# Removes a file or symlink - not for directories
-fn_rm_file() {
-	fn_run_cmd "rm -f -- '$1'"
-}
-
-fn_rm_dir() {
-	fn_run_cmd "rm -rf -- '$1'"
-}
-
-fn_touch() {
-	fn_run_cmd "touch -- '$1'"
-}
-
-fn_ln() {
-	fn_run_cmd "ln -s -- '$1' '$2'"
-}
-
-# -----------------------------------------------------------------------------
-# Source and destination information
-# -----------------------------------------------------------------------------
-SSH_USER=""
-SSH_HOST=""
-SSH_DEST_FOLDER=""
-SSH_SRC_FOLDER=""
-SSH_CMD=""
-SSH_DEST_FOLDER_PREFIX=""
-SSH_SRC_FOLDER_PREFIX=""
-SSH_PORT="22"
-
-SRC_FOLDER=""
-DEST_FOLDER=""
-EXCLUSION_FILE=""
-LOG_DIR="$HOME/.$APPNAME"
-AUTO_DELETE_LOG="1"
-EXPIRATION_STRATEGY="1:1 30:7 365:30"
-AUTO_EXPIRE="1"
-
-RSYNC_FLAGS="-D --numeric-ids --links --hard-links --one-file-system --itemize-changes --times --recursive --perms --owner --group --stats --human-readable --timeout 30"
-
-while :; do
-	case $1 in
-		-h|-\?|--help)
-			fn_display_usage
-			exit
-			;;
-		-p|--port)
-			shift
-			SSH_PORT=$1
-			;;
-		--rsync-get-flags)
-			shift
-			echo $RSYNC_FLAGS
-			exit
-			;;
-		--rsync-set-flags)
-			shift
-			RSYNC_FLAGS="$1"
-			;;
-		--strategy)
-			shift
-			EXPIRATION_STRATEGY="$1"
-			;;
-		--log-dir)
-			shift
-			LOG_DIR="$1"
-			AUTO_DELETE_LOG="0"
-			;;
-		--no-auto-expire)
-			AUTO_EXPIRE="0"
-			;;
-		--)
-			shift
-			SRC_FOLDER="$1"
-			DEST_FOLDER="$2"
-			EXCLUSION_FILE="$3"
-			break
-			;;
-		-*)
-			fn_log_error "Unknown option: \"$1\""
-			fn_log_info ""
-			fn_display_usage
-			exit 1
-			;;
-		*)
-			SRC_FOLDER="$1"
-			DEST_FOLDER="$2"
-			EXCLUSION_FILE="$3"
-			break
-	esac
-
-	shift
-done
-
-# Display usage information if required arguments are not passed
-if [[ -z "$SRC_FOLDER" || -z "$DEST_FOLDER" ]]; then
-	fn_display_usage
-	exit 1
-fi
-
-# Strips off last slash from dest. Note that it means the root folder "/"
-# will be represented as an empty string "", which is fine
-# with the current script (since a "/" is added when needed)
-# but still something to keep in mind.
-# However, due to this behavior we delay stripping the last slash for
-# the source folder until after parsing for ssh usage.
-
-DEST_FOLDER="${DEST_FOLDER%/}"
-
-fn_parse_ssh
-
-if [ -n "$SSH_DEST_FOLDER" ]; then
-	DEST_FOLDER="$SSH_DEST_FOLDER"
-fi
-
-if [ -n "$SSH_SRC_FOLDER" ]; then
-	SRC_FOLDER="$SSH_SRC_FOLDER"
-fi
-
-# Now strip off last slash from source folder.
-SRC_FOLDER="${SRC_FOLDER%/}"
-
-for ARG in "$SRC_FOLDER" "$DEST_FOLDER" "$EXCLUSION_FILE"; do
-	if [[ "$ARG" == *"'"* ]]; then
-		fn_log_error 'Source and destination directories may not contain single quote characters.'
-		exit 1
-	fi
-done
-
-# -----------------------------------------------------------------------------
-# Check that the destination drive is a backup drive
-# -----------------------------------------------------------------------------
-
-# TODO: check that the destination supports hard links
-
-fn_backup_marker_path() { echo "$1/backup.marker"; }
-fn_find_backup_marker() { fn_find "$(fn_backup_marker_path "$1")" 2>/dev/null; }
-
-if [ -z "$(fn_find_backup_marker "$DEST_FOLDER")" ]; then
-	fn_log_info "Safety check failed - the destination does not appear to be a backup folder or drive (marker file not found)."
-	fn_log_info "If it is indeed a backup folder, you may add the marker file by running the following command:"
-	fn_log_info ""
-	fn_log_info_cmd "mkdir -p -- \"$DEST_FOLDER\" ; touch \"$(fn_backup_marker_path "$DEST_FOLDER")\""
-	fn_log_info ""
-	exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# Setup additional variables
-# -----------------------------------------------------------------------------
-
-# Date logic
-NOW=$(date +"%Y-%m-%d-%H%M%S")
-EPOCH=$(date "+%s")
-KEEP_ALL_DATE=$((EPOCH - 86400))       # 1 day ago
-KEEP_DAILIES_DATE=$((EPOCH - 2678400)) # 31 days ago
-
-export IFS=$'\n' # Better for handling spaces in filenames.
-DEST="$DEST_FOLDER/$NOW"
-PREVIOUS_DEST="$(fn_find_backups | head -n 1)"
-INPROGRESS_FILE="$DEST_FOLDER/backup.inprogress"
-MYPID="$$"
-
-# -----------------------------------------------------------------------------
-# Create log folder if it doesn't exist
-# -----------------------------------------------------------------------------
-
-if [ ! -d "$LOG_DIR" ]; then
-	fn_log_info "Creating log folder in '$LOG_DIR'..."
-	mkdir -- "$LOG_DIR"
-fi
-
-# -----------------------------------------------------------------------------
-# Handle case where a previous backup failed or was interrupted.
-# -----------------------------------------------------------------------------
-
-if [ -n "$(fn_find "$INPROGRESS_FILE")" ]; then
-	if [ "$OSTYPE" == "cygwin" ]; then
-		# 1. Grab the PID of previous run from the PID file
-		RUNNINGPID="$(fn_run_cmd "cat $INPROGRESS_FILE")"
-
-		# 2. Get the command for the process currently running under that PID and look for our script name
-		RUNNINGCMD="$(procps -wwfo cmd -p $RUNNINGPID --no-headers | grep "$APPNAME")"
-
-		# 3. Grab the exit code from grep (0=found, 1=not found)
-		GREPCODE=$?
-
-		# 4. if found, assume backup is still running
-		if [ "$GREPCODE" = 0 ]; then
-			fn_log_error "Previous backup task is still active - aborting (command: $RUNNINGCMD)."
-			exit 1
-		fi
-	else
-		RUNNINGPID="$(fn_run_cmd "cat $INPROGRESS_FILE")"
-		if [ "$RUNNINGPID" = "$(pgrep -o -f "$APPNAME")" ]; then
-			fn_log_error "Previous backup task is still active - aborting."
-			exit 1
-		fi
-	fi
-
-	if [ -n "$PREVIOUS_DEST" ]; then
-		# - Last backup is moved to current backup folder so that it can be resumed.
-		# - 2nd to last backup becomes last backup.
-		fn_log_info "$SSH_DEST_FOLDER_PREFIX$INPROGRESS_FILE already exists - the previous backup failed or was interrupted. Backup will resume from there."
-		fn_run_cmd "mv -- $PREVIOUS_DEST $DEST"
-		if [ "$(fn_find_backups | wc -l)" -gt 1 ]; then
-			PREVIOUS_DEST="$(fn_find_backups | sed -n '2p')"
-		else
-			PREVIOUS_DEST=""
-		fi
-		# update PID to current process to avoid multiple concurrent resumes
-		fn_run_cmd "echo $MYPID > $INPROGRESS_FILE"
-	fi
-fi
-
-# Run in a loop to handle the "No space left on device" logic.
-while : ; do
-
-	# -----------------------------------------------------------------------------
-	# Check if we are doing an incremental backup (if previous backup exists).
-	# -----------------------------------------------------------------------------
-
-	LINK_DEST_OPTION=""
-	if [ -z "$PREVIOUS_DEST" ]; then
-		fn_log_info "No previous backup - creating new one."
-	else
-		# If the path is relative, it needs to be relative to the destination. To keep
-		# it simple, just use an absolute path. See http://serverfault.com/a/210058/118679
-		PREVIOUS_DEST="$(fn_get_absolute_path "$PREVIOUS_DEST")"
-		fn_log_info "Previous backup found - doing incremental backup from $SSH_DEST_FOLDER_PREFIX$PREVIOUS_DEST"
-		LINK_DEST_OPTION="--link-dest='$PREVIOUS_DEST'"
-	fi
-
-	# -----------------------------------------------------------------------------
-	# Create destination folder if it doesn't already exists
-	# -----------------------------------------------------------------------------
-
-	if [ -z "$(fn_find "$DEST -type d" 2>/dev/null)" ]; then
-		fn_log_info "Creating destination $SSH_DEST_FOLDER_PREFIX$DEST"
-		fn_mkdir "$DEST"
-	fi
-
-	# -----------------------------------------------------------------------------
-	# Purge certain old backups before beginning new backup.
-	# -----------------------------------------------------------------------------
-
-	fn_expire_backups
-
-	# -----------------------------------------------------------------------------
-	# Start backup
-	# -----------------------------------------------------------------------------
-
-	LOG_FILE="$LOG_DIR/$(date +"%Y-%m-%d-%H%M%S").log"
-
-	fn_log_info "Starting backup..."
-	fn_log_info "From: $SSH_SRC_FOLDER_PREFIX$SRC_FOLDER/"
-	fn_log_info "To:   $SSH_DEST_FOLDER_PREFIX$DEST/"
-
-	CMD="rsync"
-	if [ -n "$SSH_CMD" ]; then
-		CMD="$CMD  -e 'ssh -p $SSH_PORT -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'"
-	fi
-	CMD="$CMD $RSYNC_FLAGS"
-	CMD="$CMD --log-file '$LOG_FILE'"
-	if [ -n "$EXCLUSION_FILE" ]; then
-		# We've already checked that $EXCLUSION_FILE doesn't contain a single quote
-		CMD="$CMD --exclude-from '$EXCLUSION_FILE'"
-	fi
-	CMD="$CMD $LINK_DEST_OPTION"
-	CMD="$CMD -- '$SSH_SRC_FOLDER_PREFIX$SRC_FOLDER/' '$SSH_DEST_FOLDER_PREFIX$DEST/'"
-
-	fn_log_info "Running command:"
-	fn_log_info "$CMD"
-
-	fn_run_cmd "echo $MYPID > $INPROGRESS_FILE"
-	eval $CMD
-
-	# -----------------------------------------------------------------------------
-	# Check if we ran out of space
-	# -----------------------------------------------------------------------------
-
-	NO_SPACE_LEFT="$(grep "No space left on device (28)\|Result too large (34)" "$LOG_FILE")"
-
-	if [ -n "$NO_SPACE_LEFT" ]; then
-
-		if [[ $AUTO_EXPIRE == "0" ]]; then
-			fn_log_error "No space left on device, and automatic purging of old backups is disabled."
-			exit 1
-		fi
-
-		fn_log_warn "No space left on device - removing oldest backup and resuming."
-
-		if [[ "$(fn_find_backups | wc -l)" -lt "2" ]]; then
-			fn_log_error "No space left on device, and no old backup to delete."
-			exit 1
-		fi
-
-		fn_expire_backup "$(fn_find_backups | tail -n 1)"
-
-		# Resume backup
-		continue
-	fi
-
-	# -----------------------------------------------------------------------------
-	# Check whether rsync reported any errors
-	# -----------------------------------------------------------------------------
-
-	EXIT_CODE="1"
-	if [ -n "$(grep "rsync error:" "$LOG_FILE")" ]; then
-		fn_log_error "Rsync reported an error. Run this command for more details: grep -E 'rsync:|rsync error:' '$LOG_FILE'"
-	elif [ -n "$(grep "rsync:" "$LOG_FILE")" ]; then
-		fn_log_warn "Rsync reported a warning. Run this command for more details: grep -E 'rsync:|rsync error:' '$LOG_FILE'"
-	else
-		fn_log_info "Backup completed without errors."
-		if [[ $AUTO_DELETE_LOG == "1" ]]; then
-			rm -f -- "$LOG_FILE"
-		fi
-		EXIT_CODE="0"
-	fi
-
-	# -----------------------------------------------------------------------------
-	# Add symlink to last backup
-	# -----------------------------------------------------------------------------
-
-	fn_rm_file "$DEST_FOLDER/latest"
-	fn_ln "$(basename -- "$DEST")" "$DEST_FOLDER/latest"
-
-	fn_rm_file "$INPROGRESS_FILE"
-
-	exit $EXIT_CODE
-done
-
diff --git a/9.MediaVault/2.Install_MediaVault_Burp_deprecated/0_setup.sh b/9.MediaVault/2.Install_MediaVault_Burp_deprecated/0_setup.sh
deleted file mode 100755
index 43ad62f5..00000000
--- a/9.MediaVault/2.Install_MediaVault_Burp_deprecated/0_setup.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-set -e
-source /root/envsetup/global-conf.sh
-
-apt-get install -y dialog build-essential
-
-cd /root
-rm -rf burp-custom
-git clone https://panel.ubicast.eu/git/mediaserver/burp-custom.git burp-custom
-
-# set conf
-sed -i "s@^BURP_STATUS_IP=.*@BURP_STATUS_IP=${BURP_STATUS_IP}@" /root/burp-custom/burp-custom.sh
-sed -i "s@^BURPUI_PASSWORD=.*@BURPUI_PASSWORD=${BURPUI_PASSWORD}@" /root/burp-custom/burp-custom.sh
-sed -i "s@^BURP_SERVER=.*@BURP_SERVER=${BURP_SERVER}@" /root/burp-custom/burp-custom.sh
-sed -i "s/^BURP_MAIL_DEST=.*/BURP_MAIL_DEST=${BURP_MAIL_DEST}/" /root/burp-custom/burp-custom.sh
-sed -i "s@^BURP_CLIENT_NAME=.*@BURP_CLIENT_NAME=${BURP_CLIENT_NAME}@" /root/burp-custom/burp-custom.sh
-sed -i 's@^echo "password = ${BURP_CLIENT_PASSWORD}".*@@' /root/burp-custom/burp-custom.sh
-sed -i 's@^echo "restore_client = ${BURP_CLIENT_NAME}".*@@' /root/burp-custom/burp-custom.sh
-
-# run burp-custom.sh install_fullserver
-cd /root/burp-custom
-bash /root/burp-custom/burp-custom.sh install_fullserver
-
-# get admin_shell
-mv /root/burp-custom/admin_shell.sh /home/admin/admin_shell.sh
-# get MS_restore.sh
-cp /root/burp-custom/MS_restore.sh /home/admin/MS_restore.sh
-
-# MS_restore ajout clef ssh ??
-sed -i "s@scp /root@scp -i /home/admin/.ssh/id_rsa /root@" /home/admin/MS_restore.sh
-sed -i "s@ssh ${CLIENT}@ssh -i /home/admin/.ssh/id_rsa root\@${CLIENT}@" /home/admin/MS_restore.sh
-
-
-# correct rights
-echo "bash /home/admin/admin_shell.sh" > /home/admin/.bash_profile
-
-# generate admin ssh key
-if ( ! test -f /home/admin/.ssh/id_rsa ); then
-	mkdir -p /home/admin/.ssh
-	chown -R admin:admin /home/admin/.ssh
-	su admin -c 'ssh-keygen -t rsa -N "" -f /home/admin/.ssh/id_rsa'
-fi
-
-# permit admin to transmit burp-custom
-cp /root/burp-custom/burp-custom.sh /home/admin/
-cp /root/burp-custom/burp*.bz2 /home/admin/
-# restore permissions
-chown -R admin:admin /home/admin/
-
-# mail alias for admin
-echo "admin: ${BURP_MAIL_DEST}" >> /etc/aliases
-newaliases
-
-# give root rights to admin
-#sed -i "s@admin:x:.*@admin:x:0:0::/home/admin:/bin/bash@" /etc/passwd
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..c07fa9b6
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,116 @@
+SHELL := /bin/bash
+DOCKER_IMAGE_NAME := registry.ubicast.net/mediaserver/envsetup
+ifdef debug
+	MOLECULE_FLAGS += --debug
+endif
+ifdef keep
+	MOLECULE_TEST_FLAGS += --destroy=never
+endif
+VENV := /tmp/pyvensetup
+PIP_BIN = $(shell command -v $(VENV)/bin/pip3 || command -v pip3 || echo pip3)
+PIP_COMPILE_BIN = $(shell command -v $(VENV)/bin/pip-compile || command -v pip-compile)
+ANSIBLE_BIN = $(shell command -v ansible || command -v $(VENV)/bin/ansible)
+ANSIBLE_PLAYBOOK_BIN = $(shell command -v ansible-playbook || command -v $(VENV)/bin/ansible-playbook)
+ANSIBLE_LINT_BIN = $(shell command -v ansible-lint || command -v $(VENV)/bin/ansible-lint)
+YAMLLINT_BIN = $(shell command -v yamllint || command -v $(VENV)/bin/yamllint)
+FLAKE8_BIN = $(shell command -v flake8 || command -v $(VENV)/bin/flake8)
+
+.PHONY: all
+## TARGET: DESCRIPTION: ARGS
+all: help
+
+.PHONY: venv
+## venv: Install python3-venv and create a temporary virtualenv
+venv:
+	-@command -v apt-get >/dev/null && apt-get update && apt-get install -y python3-venv
+	@command -v $(PIP_BIN) > /dev/null || python3 -m venv $(VENV)
+
+## requirements.txt: Update requirements and their dependencies
+## requirements.dev.txt: Update development requirements and their dependencies
+%.txt: %.in
+	$(PIP_COMPILE_BIN) -U $^ -o $@
+	chmod 644 $@
+
+.PHONY: install
+## install: Install requirements
+install: venv
+	$(PIP_BIN) install -U pip wheel
+	$(PIP_BIN) install -r requirements.txt
+
+.PHONY: install-dev
+## install-dev: Install development requirements
+install-dev: requirements
+	$(PIP_BIN) install -r requirements.dev.txt
+
+.PHONY: lint
+## lint: Run linters on the project
+lint:
+	$(FLAKE8_BIN) .
+	$(YAMLLINT_BIN) .
+	$(ANSIBLE_LINT_BIN) playbooks/*.yml
+
+.PHONY: test
+## test: Run development tests on the project : debug=1, keep=1
+test:
+ifndef SKYREACH_SYSTEM_KEY
+	$(error SKYREACH_SYSTEM_KEY is undefined)
+endif
+	molecule $(MOLECULE_FLAGS) test $(MOLECULE_TEST_FLAGS)
+
+.PHONY: deploy
+## deploy: Run deployment playbooks : i=<inventory-path>, l=<host-or-group>, t=<tag>
+deploy:
+ifndef i
+	$(error i is undefined)
+endif
+ifndef l
+	$(eval l=all)
+endif
+ifndef t
+	$(eval t=all)
+endif
+	$(ANSIBLE_BIN) -i $(i) -l $(l) -m ping all
+	$(ANSIBLE_PLAYBOOK_BIN) -i $(i) site.yml -e conf_update=true -l $(l) -t $(t)
+
+.PHONY: image-validate
+## image-validate: Check that Packer image is valid : build=<path-to-packer-file>
+image-validate:
+ifndef build
+	$(error build is undefined)
+endif
+	cat $(build) | ./packer/scripts/yml2json | packer validate -
+
+.PHONY: image
+## image: Run Packer image build : build=<path-to-packer-file>
+image: image-validate
+	cat $(build) | ./packer/scripts/yml2json | packer build -force -
+
+.PHONY: docker-build
+## docker-build: Run docker image build for CI and devcontainer
+docker-build: docker-pull
+	docker build -t $(DOCKER_IMAGE_NAME) -f .devcontainer/Dockerfile .
+	docker build -t $(DOCKER_IMAGE_NAME):root -f .devcontainer/Dockerfile.root .
+
+.PHONY: docker-rebuild
+## docker-rebuild: Force docker image rebuild
+docker-rebuild:
+	docker build --pull --no-cache -t $(DOCKER_IMAGE_NAME) -f .devcontainer/Dockerfile .
+	docker build --pull --no-cache -t $(DOCKER_IMAGE_NAME):root -f .devcontainer/Dockerfile.root .
+
+.PHONY: docker-pull
+## docker-pull: Pull Docker image from registry
+docker-pull:
+	-docker pull $(DOCKER_IMAGE_NAME)
+	-docker pull $(DOCKER_IMAGE_NAME):root
+
+.PHONY: docker-push
+## docker-push: Push Docker image to registry
+docker-push:
+	docker push $(DOCKER_IMAGE_NAME)
+	docker push $(DOCKER_IMAGE_NAME):root
+
+.PHONY: help
+## help: Print this help message
+help:
+	@echo -e "Usage: \n"
+	@sed -n 's/^##//p' ${MAKEFILE_LIST} | column -t -s ':' | sed -e 's/^/ /'
diff --git a/README.md b/README.md
index 287ae707..604152bb 100644
--- a/README.md
+++ b/README.md
@@ -1,38 +1,20 @@
-# EnvSetup
+# EnvSetup3
 
-Script to setup and configure softwares used in our servers.
+## Usage
 
-## Dependencies
+How to deploy UbiCast products:
 
-* python3
+- [Installation of required tools](/doc/install.md)
+- [Configuration of the controller and inventory](/doc/config.md)
+- [Deployment of UbiCast softwares](/doc/deploy.md)
+- [Build an image]() (TODO)
 
-## How to add a software setup
+## Development
 
-* Add a folder next to "envsetup.py" named using the following pattern: "<number>.<name>".
-    The number should not be used by any other setup.
+How to contribute:
 
-* Add a file named "0_setup.py" or "0_setup.sh" in this folder.
-
-* If the file you use is the python one ("0_setup.py"), you should add a function named setup in it.
-    The file will be run with python3.
-    For example:
-    ```python
-    def setup(interactive=True):
-        pass
-    ```
-
-## Important notes
-
-* All setup actions should be safe to be run multiple times (to allow reconfiguration of the service).
-* All the adjustable configurations must be in the "conf.sh" file.
-
-## Test EnvSetup loading
-
-Two fake tasks are used to test that EnvSetup can correctly be loaded.
-
-These tasks can be started with the following commands:
-
-```bash
-./envsetup.py -d 201
-./envsetup.py -d 202
-```
+- [EnvSetup3 contributing guide](/doc/contrib.md)
+- [Ansible documentation ](https://docs.ansible.com/ansible/latest/)
+  - [Molecule documentation](https://molecule.readthedocs.io/en/latest/)
+  - [TestInfra documentation](https://testinfra.readthedocs.io/en/latest/)
+- [Packer documentation](http://packer.io/docs/index.html)
diff --git a/ansible.cfg b/ansible.cfg
new file mode 100644
index 00000000..76f3db3f
--- /dev/null
+++ b/ansible.cfg
@@ -0,0 +1,38 @@
+[defaults]
+
+# logging
+log_path = logs/envsetup.log
+
+# use python3 by default
+interpreter_python = /usr/bin/python3
+
+# disable output for skipped hosts and tasks
+display_skipped_hosts = false
+# skip ssh host key checking
+host_key_checking = false
+# disable creation of *.retry files when playbook fails
+retry_files_enabled = false
+
+# connect as root on hosts
+remote_user = root
+
+# cutom path for roles
+roles_path = roles
+# custom path for modules
+library = library
+# custom path for action plugins
+action_plugins = plugins/action
+
+# improve output format (with line return)
+stdout_callback = debug
+
+# ignore files directory
+inventory_ignore_patterns = files
+
+[ssh_connection]
+
+# add custom ssh options
+ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null
+
+# use scp instead of sftp
+scp_if_ssh = true
diff --git a/deprecated-conf.sh b/deprecated-conf.sh
deleted file mode 100644
index 74cbfc48..00000000
--- a/deprecated-conf.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-# DO NOT EDIT THIS FILE!
-# Put your local configuration in conf.sh
-
-# -- Database --
-DB_MYSQL_ROOT_PWD=
-
-# -- Backup server specific (burp) --
-BURP_STATUS_IP=
-BURPUI_PASSWORD='1234'
-# FQDN
-BURP_SERVER=''
-BURP_MAIL_DEST='sysadmin@ubicast.eu'
-# default mediaserver
-BURP_CLIENT_NAME=
-BURP_CLIENT_MAIL_DEST='sysadmin@ubicast.eu'
-
-# -- FTP --
-# move uploaded files into hotfolder
-# login:pass CSV separated
-#FTP_INCOMING_USERS='ftpuser1:ftppass1,ftpuser2:ftppass2'
-FTP_INCOMING_USERS=
-
-# -- HOTFOLDER --
-# csv-separated
-HOTFOLDERS='/home/ftp/storage/hotfolder'
-
-# -- Network configuration --
-NETWORK_DNS1=
-NETWORK_DNS2=
-NETWORK_DNS3=
diff --git a/doc/config.md b/doc/config.md
new file mode 100644
index 00000000..32a0e2b3
--- /dev/null
+++ b/doc/config.md
@@ -0,0 +1,123 @@
+# Configuration
+
+## SSH
+
+The Ansible deployment will be done through SSH, so you must be able to connect to all the involved hosts by using SSH public key authentication.
+
+If you or your team do not have a SSH key pair, please create one with `ssh-keygen` :
+
+```sh
+ssh-keygen -t ed25519
+```
+
+The SSH **public key** (`~/.ssh/id_ed25519.pub`) must then be added to the authorized keys (`~/.ssh/authorized_keys`) of the `root` account of all the involved hosts (mymediaserver, mymediaworker, mymediavault, etc.).
+
+It is recommended to also use a SSH config file for your hosts, especially if some of them are protected by a bastion host. For example, let's assume that "mymediaserver" is used as a bastion to join "mymediaworker" and "mymediavault":
+
+```ssh
+Host mymediaserver
+	Hostname	1.2.3.4
+  IdentityFile	~/.ssh/id_ed25519
+  User		root
+
+Host mymediaworker
+	Hostname	10.0.0.2
+  IdentityFile	~/.ssh/id_ed25519
+  User		root
+  # use mymediaserver as a jump host
+  ProxyJump	mymediaserver
+
+Host mymediavault
+	Hostname	10.0.0.3
+  IdentityFile	~/.ssh/id_ed25519
+  User		root
+  # use mymediaserver as a jump host
+  ProxyJump	mymediaserver
+```
+
+## Customer informations
+
+The customer must provides some informations about its network, desired URLs, etc. You can use this [deployment form template](https://docs.google.com/document/d/13_t6LqlIkIMo3KEOsLWKfk_kB3Xw1JHktOOFbCHhxwY/) as a base to send to the customer.
+
+When the deployment form is completed by the customer, send it to the [UbiCast deployment team](mailto:deploiements@ubicast.eu) who will put the data in the [fleet management](https://mirismanager.ubicast.eu/) tool.
+
+Once the deployment team have updated the data for the hosts, the deployment can begin.
+
+## Inventory
+
+Make a copy of the `example` inventory and eventually customize it with the customer informations.
+
+```sh
+cp -r inventories/example inventories/my-customer
+```
+
+### Hosts and Groups
+
+Edit `inventories/my-customer/hosts` to match with `my-customer` inrastructure.
+
+For example, if there is only a MediaServer and a MediaWorker you can remove all other hosts and groups:
+
+```ini
+mymediaserver
+mymediaworker
+mymediavault
+
+[monitor]
+mymediaserver
+
+[postgres]
+mymediaserver
+
+[manager]
+mymediaserver
+
+[server]
+mymediaserver
+
+[wowza]
+mymediaserver
+
+[celerity]
+mymediaserver
+
+[worker]
+mymediaworker
+
+[vault]
+mymediavault
+```
+
+### Variables
+
+You **must at least** configure:
+- `skyreach_system_key` values in `inventories/my-customer/host_vars/<host>.yml`
+
+If you want to set/override a variable for:
+- all: `inventories/my-customer/group_vars/all.yml`.
+- a group:`inventories/my-customer/group_vars/<group>.yml`.
+- a host: `inventories/my-customer/host_vars/<host>.yml`.
+
+## Testing
+
+Make sure Ansible can connect to all the hosts:
+
+```sh
+ansible -i inventories/my-customer -m ping all
+```
+
+If it works, it should looks like this:
+
+```
+mymediaserver | SUCCESS => {
+    "changed": false,
+    "ping": "pong"
+}
+mymediaworker | SUCCESS => {
+    "changed": false,
+    "ping": "pong"
+}
+mymediavault | SUCCESS => {
+    "changed": false,
+    "ping": "pong"
+}
+```
diff --git a/doc/contrib.md b/doc/contrib.md
new file mode 100644
index 00000000..0ff3022c
--- /dev/null
+++ b/doc/contrib.md
@@ -0,0 +1,59 @@
+# Contributing guide
+
+- Work in a Docker container:
+
+```sh
+docker run \
+  `# run an interactive pseudo-TTY` \
+  -it \
+  `# remove the container once you leave it` \
+  --rm \
+  `# share the current directory` \
+  -v $(pwd):/workspace \
+  `# share your SSH configuration` \
+  -v $HOME/.ssh:/home/code/.ssh:ro \
+  `# share your SSH agent` \
+  -v $SSH_AUTH_SOCK:/ssh-agent:ro \
+  `# let container know where is mapped the SSH agent` \
+  -e SSH_AUTH_SOCK=/ssh-agent \
+  `# container image to use` \
+  registry.ubicast.net/sys/envsetup \
+  `# executable to run` \
+  bash
+```
+
+- Install development requirements:
+
+```sh
+make requirements-dev
+```
+
+- Quickly check that your "code" is compliant:
+
+```sh
+make lint
+```
+
+- Run Ansible tests:
+
+```sh
+make test
+
+# show debug logs
+DEBUG=1 make test
+
+# do not destroy tests containers
+KEEP=1 make test
+```
+
+- If you add/modify a role, please write relevants tests in `molecule/default/tests`.
+
+- Run packer validation:
+
+```sh
+# validate `packer/base.yml` file
+make image-validate build=packer/base.yml
+
+# validate `packer/custom/my-customer-server.yml` file
+make image-validate build=packer/custom/my-customer-server.yml
+```
diff --git a/doc/deploy.md b/doc/deploy.md
new file mode 100644
index 00000000..f8f19397
--- /dev/null
+++ b/doc/deploy.md
@@ -0,0 +1,105 @@
+# Deployment
+
+## Remotely
+
+### All services
+
+```sh
+make deploy i=inventories/my-customer
+```
+
+### MediaWorker
+
+```sh
+make deploy i=inventories/my-customer l=worker
+```
+
+### Monitor
+
+```sh
+make deploy i=inventories/my-customer l=monitor
+```
+
+### MirisManager
+
+```sh
+make deploy i=inventories/my-customer l=manager
+```
+
+### MediaServer
+
+```sh
+make deploy i=inventories/my-customer l=server
+```
+
+### MediaImport
+
+```sh
+make deploy i=inventories/my-customer l=import
+```
+
+### MediaVault
+
+```sh
+make deploy i=inventories/my-customer l=vault
+```
+
+### Celerity
+
+```sh
+make deploy i=inventories/my-customer l=celerity
+```
+
+### Wowza
+
+```sh
+make deploy i=inventories/my-customer l=wowza
+```
+
+### Postgres
+
+```sh
+make deploy i=inventories/my-customer l=postgres
+```
+
+### Netcapture
+
+```sh
+make deploy i=inventories/my-customer l=netcapture
+```
+
+## Locally
+
+Instead of deploying all host remotely through SSH, you can also clone the envsetup repository on the server as `root` in `~/envsetup`, then enter in the directory, configure the activation or system key and run one of those commands:
+
+```sh
+make deploy i=inventories/local-server
+make deploy i=inventories/local-worker
+make deploy i=inventories/local-vault
+```
+
+## Known issues
+
+- Proxy
+
+If output trafic on the remote hosts is allowed only through a proxy, the deployment will fail.
+It won't be able to locally clone repositry and get host configuration file from mirismanager.ubicast.eu.
+
+For "remote" deployment you have to set the proxy settings in the inventory variables, in `inventories/my-customer/group_vars/all.yml`:
+
+```yaml
+---
+
+[...]
+
+proxy_http: http://proxy.my-customer.net:3128
+proxy_https: http://proxy.my-customer.net:3128
+```
+
+For "local" deployment you have to manually set the proxy settings in the `/etc/environment` file:
+
+```
+PROXY_HTTP="http://proxy.my-customer.net:3128"
+PROXY_HTTPS="http://proxy.my-customer.net:3128"
+NO_PROXY="localhost,127.0.0.1,::1,mymediaserver.my-customer.net"
+```
diff --git a/doc/install.md b/doc/install.md
new file mode 100644
index 00000000..74855491
--- /dev/null
+++ b/doc/install.md
@@ -0,0 +1,134 @@
+# Installation
+
+This has only been tested on Linux. But it should work the same way for MacOS or Windows WSL.
+
+## Repository
+
+Clone this repository on your computer:
+
+```sh
+git clone https://git.ubicast.net/mediaserver/envsetup.git
+
+# enter inside the cloned repository
+cd envsetup/
+```
+
+Every commands are assumed to be launched from the envsetup directory.
+
+## Make
+
+For convenience we use the `make` tool, please install it.
+
+For example, on a Debian-based distribution:
+
+```sh
+sudo apt install make
+```
+
+Otherwise you can look at the `Makefile` to see which commands are ran.
+
+## Ansible
+
+There are many way to install Ansible, choose one below.
+
+### Virtual environment
+
+```sh
+make requirements
+```
+
+Alternatively you can manually create a virtual environment with [Python's venv](https://docs.python.org/3/library/venv.html) or with the package [virtualenv](https://virtualenv.pypa.io/en/stable/). For this example i will use the former one.
+
+```sh
+# create the venv
+python3 -m venv .venv
+
+# activate the venv
+source .venv/bin/activate
+
+# install requirements (inside the activated venv)
+python3 -m pip install -U pip wheel
+python3 -m pip install -r requirements.txt
+```
+
+If you want to exit the venv:
+
+```sh
+deactivate
+```
+
+### Docker
+
+If you do not want to bother with Python, virtual environment and Ansible on your computer (even though it is isolated inside a venv), you can use [Docker](https://docs.docker.com/install/).
+
+```sh
+docker run \
+  `# run an interactive pseudo-TTY` \
+  -it \
+  `# remove the container once you leave it` \
+  --rm \
+  `# share the current directory` \
+  -v $(pwd):/workspace \
+  `# share your SSH configuration` \
+  -v $HOME/.ssh:/home/code/.ssh:ro \
+  `# share your SSH agent` \
+  -v $SSH_AUTH_SOCK:/ssh-agent:ro \
+  `# let container know where is mapped the SSH agent` \
+  -e SSH_AUTH_SOCK=/ssh-agent \
+  `# container image to use` \
+  registry.ubicast.net/mediaserver/envsetup \
+  `# executable to run` \
+  bash
+```
+
+Make sur to share your SSH configuration with the Docker container, this may require to adapt the example command.
+
+### Pip
+
+You can also install Ansible and its dependencies on your operating system, but it is not recommend since it may conflicts with existing packages.
+
+This will install requirements into your user's path (`~/.local/bin` must be in your `PATH` in order to work):
+
+```sh
+python3 -m pip install --user -r requirements.txt
+```
+
+Even least recommended, install into system's path:
+
+```sh
+sudo python3 -m pip install -r requirements.txt
+```
+
+### Distribution packages
+
+Depending on your distribution you may be able to install Ansible from repositories, but the provided version may be outdated (< 2.8 will certainly not work).
+
+For example, on a Debian-based distribution:
+
+```sh
+sudo apt install \
+  ansible \
+  python3-netaddr \
+  python3-pyyaml \
+```
+
+Take a look at the requirements in `requirements.in` file.
+
+## Testing
+
+To make sure Ansible is properly installed, run this command:
+
+```sh
+ansible --version
+```
+
+The output should like like this:
+
+```
+ansible 2.9.1
+  config file = /workspace/ansible.cfg
+  configured module search path = ['/workspace/library']
+  ansible python module location = /home/code/pyvenv/lib/python3.7/site-packages/ansible
+  executable location = /home/code/pyvenv/bin/ansible
+  python version = 3.7.3 (default, Apr  3 2019, 05:39:12) [GCC 8.3.0]
+```
diff --git a/envsetup.py b/envsetup.py
deleted file mode 100755
index f66e4a91..00000000
--- a/envsetup.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-'''
-Environment setup script for MediaServer
-'''
-import importlib.util
-import os
-import subprocess
-import sys
-import traceback
-
-import utils
-from utils import log
-
-
-class EnvSetup():
-    USAGE = '''%s [-d] [-h] [<action id>]
-    -d: debug mode (can be started with non root users).
-    -h: show this message.
-    action id: specify which action should be started (non interactive).''' % __file__
-    PY_SETUP_NAME = '0_setup.py'
-    BASH_SETUP_NAME = '0_setup.sh'
-
-    def __init__(self, *args):
-        self.display_header()
-        args = list(args)
-        # Check if help is required
-        if '-h' in args:
-            log('USAGE: ' + self.USAGE)
-            sys.exit(0)
-        # Check current dir
-        root_dir = utils.get_dir(__file__)
-        if root_dir != '':
-            os.chdir(root_dir)
-        self.root_dir = root_dir
-        # Add to python path
-        if root_dir not in sys.path:
-            sys.path.append(root_dir)
-        # Get available actions
-        self.actions = self.discover_actions()
-        if not self.actions:
-            log('No action available.')
-            sys.exit(1)
-        # Check that this script is run by root
-        self.debug = '-d' in args
-        if self.debug:
-            args.remove('-d')
-        whoami = subprocess.check_output(['whoami']).decode('utf-8').strip()
-        if whoami != 'root' and not self.debug:
-            log('This script should be run as root user.')
-            sys.exit(1)
-        # Load conf
-        conf = utils.load_conf()
-        if not conf:
-            log('No configuration loaded.')
-            sys.exit(1)
-        if args:
-            # Run command
-            for arg in args:
-                self.run(arg, interactive=False)
-        else:
-            # Open main menu
-            self.menu()
-
-    def display_header(self):
-        log('\033[96m----------------------------------\033[0m')
-        log('\033[96m- UbiCast environment setup tool -\033[0m')
-        log('\033[96m----------------------------------\033[0m')
-
-    def discover_actions(self):
-        actions = list()
-        for section_name in os.listdir(self.root_dir):
-            section_path = os.path.join(self.root_dir, section_name)
-            if not os.path.isdir(section_path):
-                continue
-            try:
-                section_index = int(section_name.split('.')[0])
-            except ValueError:
-                continue
-            section_label = section_name[len(str(section_index)) + 1:].strip().replace('_', ' ')
-            if not section_label:
-                log('No label found for dir %s.' % section_name)
-                continue
-            actions.append(dict(index=int(str(section_index) + '0'), label=section_label, path=section_path, fct=None))
-
-            for name in os.listdir(section_path):
-                path = os.path.join(section_path, name)
-                if not os.path.isdir(path):
-                    continue
-                try:
-                    index = int(str(section_index) + name.split('.')[0])
-                except ValueError:
-                    continue
-                label = name[len(str(index)) + 1 - len(str(section_index)):].strip().replace('_', ' ')
-                if not label:
-                    log('No label found for dir %s.' % name)
-                    continue
-
-                if os.path.isfile(os.path.join(path, self.PY_SETUP_NAME)):
-                    spec = importlib.util.spec_from_file_location('setup_%s' % name, os.path.join(path, self.PY_SETUP_NAME))
-                    setup_module = importlib.util.module_from_spec(spec)
-                    spec.loader.exec_module(setup_module)
-                    actions.append(dict(index=index, label=label, path=path, fct=setup_module.setup))
-                elif os.path.isfile(os.path.join(path, self.BASH_SETUP_NAME)):
-                    actions.append(dict(index=index, label=label, path=path, fct='bash -e "%s"' % os.path.join(path, self.BASH_SETUP_NAME)))
-        actions.sort(key=lambda a: a['index'])
-        return actions
-
-    def menu(self):
-        # Show main menu
-        log('Actions:')
-        for action in self.actions:
-            if action['fct']:
-                log('  %s: %s' % (action['index'], action['label']))
-            else:
-                log('  \033[1;94m%s\033[0m' % (action['label']))
-        log('')
-        log('  t: Run tests')
-        log('  c: Configuration status')
-        log('  e: Exit\n')
-        log('Info:')
-        log('\033[0;36m  To setup a system entirely for a determined purpose (Worker, MS, CM, ...), you should use the launcher:\033[0m')
-        log('\033[0;36m  bash /root/envsetup/launcher.sh\033[0m')
-        log('\nWhat action do you want to start ?')
-        try:
-            target = input('---> ').strip()
-        except (KeyboardInterrupt, EOFError):
-            log('')
-            target = 'e'
-        self.run(target)
-
-    def run(self, target, interactive=True):
-        if target == 'e':
-            log('Exit')
-            sys.exit(0)
-        exit_code = 0
-        if target == 't':
-            # Run tests
-            args = [os.path.join(self.root_dir, 'tester.py'), 'tester.py']
-            if self.debug:
-                args.append('-d')
-            os.execl(*args)
-        elif target == 'c':
-            # Display current configuration
-            log('Configuration status:')
-            override = utils.get_conf('_override')
-            if not override:
-                log('Configuration status not available.')
-            else:
-                log('Is default | Name | Value')
-                for name, is_overriden in override.items():
-                    is_default = '\033[93m no ' if is_overriden else '\033[94m yes'
-                    log('%s\033[0m | \033[95m%s\033[0m | \033[96m%s\033[0m' % (is_default, name, utils.get_conf(name)))
-        else:
-            # Run an action
-            found = False
-            for action in self.actions:
-                if target == str(action['index']) and action['fct']:
-                    found = True
-                    log('Starting action %s: %s setup.' % (action['index'], action['label']))
-                    try:
-                        os.chdir(action['path'])
-                        if isinstance(action['fct'], str):
-                            utils.run_commands([action['fct']])
-                        else:
-                            action['fct'](interactive)
-                    except Exception as e:
-                        exit_code = 1
-                        if isinstance(action['fct'], str):
-                            log(action['fct'])
-                        else:
-                            log(traceback.format_exc())
-                        log('Unable to setup %s:\n%s\n' % (action['label'], e))
-                    else:
-                        log('%s setup complete.\n' % action['label'])
-                    os.chdir(self.root_dir)
-                    break
-            if not found:
-                exit_code = 1
-                log('Invalid action requested: "%s".' % target)
-        if interactive:
-            try:
-                input('Press enter to continue.')
-            except (KeyboardInterrupt, EOFError):
-                log('')
-                sys.exit(exit_code)
-            self.display_header()
-            self.menu()
-        else:
-            sys.exit(exit_code)
-
-
-if __name__ == '__main__':
-    EnvSetup(*sys.argv[1:])
diff --git a/getenvsetup.sh b/getenvsetup.sh
index b6c090af..538fbae5 100755
--- a/getenvsetup.sh
+++ b/getenvsetup.sh
@@ -1,25 +1,55 @@
 #!/bin/bash
+
+# check root
 if [ "$EUID" -ne 0 ]
   then echo "Please run as root"
-  exit
+  exit 1
 fi
-add-apt-repository universe || true  # ubuntu only
-apt update
-apt full-upgrade -y
-apt install -y git
-cd /root
-read -p "HTTP proxy (e.g. proxy:8080, enter to skip):" PROXY
-if [ -z "$PROXY" ]; then
-    git clone https://panel.ubicast.eu/git/mediaserver/envsetup.git -b stable
-else
-    git -c "http.proxy=$PROXY" clone https://panel.ubicast.eu/git/mediaserver/envsetup.git -b stable
+
+# ubuntu only
+if grep -qi ubuntu /etc/issue; then
+    add-apt-repository universe
+fi
+
+# install required tools
+apt-get update
+apt-get install -y git make
+
+# target selection
+choices="1:2:3"
+while [[ ":${choices}:" != *:${choice}:* ]]; do
+    echo -e "What are you deploying?"
+    echo -e "\\t1. MediaServer (default)"
+    echo -e "\\t2. MediaWorker"
+    echo -e "\\t3. MediaVault"
+    read -r -p "Choice: [1] " choice
+    [ -z "$choice" ] && choice=1
+done
+
+[ $choice = 1 ] && target=server
+[ $choice = 2 ] && target=worker
+[ $choice = 3 ] && target=vault
+
+# configure proxy
+read -r -p "HTTP proxy (e.g. proxy.example.net:3128, enter to skip):" proxy
+if [ "$proxy" ]; then
     git config --global http.sslVerify false
-    echo "SKYREACH_SSL_VERIFY='0'" >> /root/envsetup/conf.sh
-    echo "Remember to run this after this script completes:"
-    echo "export https_proxy='http://proxy:8080'"
-    echo "export http_proxy='http://proxy:8080'"
+    git config --global http.proxy "$proxy"
+    echo "HTTP_PROXY=\"http://$proxy\"" >> /etc/environment
+    echo "HTTPS_PROXY=\"http://$proxy\"" >> /etc/environment
+fi
+
+# clone envsetup
+git clone https://mirismanager.ubicast.eu/git/mediaserver/envsetup.git -b stable
+cd /root/envsetup || exit
+
+# activation key
+read -r -p "Activation key (e.g. XXX-XXX-XXX-XXX): " key
+if [ "$key" ]; then
+    cp -u /root/envsetup/inventories/local-${target}/host_vars/localhost.dist.yml /root/envsetup/inventories/local-${target}/host_vars/localhost.yml
+    sed -i "s/skyreach_activation_key:.*/skyreach_activation_key: ${key}/g" /root/envsetup/inventories/local-${target}/host_vars/localhost.yml
 fi
-cd envsetup
-read -p "Paste deployment key (e.g. XXX-XXX-XXX-XXX):" KEY
-echo "SKYREACH_ACTIVATION_KEY='$KEY'" >> /root/envsetup/conf.sh
-echo "All done, you can now run /root/envsetup/launcher.sh something"
+
+# deploy target
+make requirements
+make deploy i=inventories/local-${target}
diff --git a/inventories/example/group_vars/all.yml b/inventories/example/group_vars/all.yml
new file mode 100644
index 00000000..fcbcbead
--- /dev/null
+++ b/inventories/example/group_vars/all.yml
@@ -0,0 +1,9 @@
+---
+
+# enable letsencrypt certificate
+letsencrypt_enabled: false
+
+# update conf.sh
+conf_update: false
+
+...
diff --git a/inventories/example/host_vars/mymediaserver.yml b/inventories/example/host_vars/mymediaserver.yml
new file mode 100644
index 00000000..60702606
--- /dev/null
+++ b/inventories/example/host_vars/mymediaserver.yml
@@ -0,0 +1,5 @@
+---
+
+skyreach_system_key: changeme
+
+...
diff --git a/inventories/example/host_vars/mymediavault.yml b/inventories/example/host_vars/mymediavault.yml
new file mode 100644
index 00000000..60702606
--- /dev/null
+++ b/inventories/example/host_vars/mymediavault.yml
@@ -0,0 +1,5 @@
+---
+
+skyreach_system_key: changeme
+
+...
diff --git a/inventories/example/host_vars/mymediaworker.yml b/inventories/example/host_vars/mymediaworker.yml
new file mode 100644
index 00000000..60702606
--- /dev/null
+++ b/inventories/example/host_vars/mymediaworker.yml
@@ -0,0 +1,5 @@
+---
+
+skyreach_system_key: changeme
+
+...
diff --git a/inventories/example/host_vars/mynetcapture.yml b/inventories/example/host_vars/mynetcapture.yml
new file mode 100644
index 00000000..60702606
--- /dev/null
+++ b/inventories/example/host_vars/mynetcapture.yml
@@ -0,0 +1,5 @@
+---
+
+skyreach_system_key: changeme
+
+...
diff --git a/inventories/example/hosts b/inventories/example/hosts
new file mode 100644
index 00000000..760a16da
--- /dev/null
+++ b/inventories/example/hosts
@@ -0,0 +1,42 @@
+; For hosts parameters see:
+; https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#connecting-to-hosts-behavioral-inventory-parameters
+
+; hosts list
+
+mymediaserver ansible_host=10.0.0.1
+mymediaworker ansible_host=10.0.0.2
+
+; groups list and their members
+
+[monitor]
+mymediaserver
+
+[postgres]
+mymediaserver
+
+[manager]
+mymediaserver
+
+[server]
+mymediaserver
+
+[wowza]
+mymediaserver
+
+[celerity]
+mymediaserver
+
+[worker]
+mymediaworker
+
+[vault]
+
+[import]
+
+[netcapture]
+
+[bench_server]
+
+[bench_worker]
+
+; vim:ft=dosini
diff --git a/inventories/local-full/host_vars/localhost.dist.yml b/inventories/local-full/host_vars/localhost.dist.yml
new file mode 100644
index 00000000..d3e5920e
--- /dev/null
+++ b/inventories/local-full/host_vars/localhost.dist.yml
@@ -0,0 +1,6 @@
+---
+
+skyreach_system_key:
+skyreach_activation_key:
+
+...
diff --git a/inventories/local-full/hosts b/inventories/local-full/hosts
new file mode 100644
index 00000000..5047b2e4
--- /dev/null
+++ b/inventories/local-full/hosts
@@ -0,0 +1,25 @@
+localhost ansible_connection=local
+
+[monitor]
+localhost
+
+[postgres]
+localhost
+
+[manager]
+localhost
+
+[server]
+localhost
+
+[wowza]
+localhost
+
+[celerity]
+localhost
+
+[worker]
+localhost
+
+[import]
+localhost
diff --git a/inventories/local-server/host_vars/localhost.dist.yml b/inventories/local-server/host_vars/localhost.dist.yml
new file mode 100644
index 00000000..d3e5920e
--- /dev/null
+++ b/inventories/local-server/host_vars/localhost.dist.yml
@@ -0,0 +1,6 @@
+---
+
+skyreach_system_key:
+skyreach_activation_key:
+
+...
diff --git a/inventories/local-server/hosts b/inventories/local-server/hosts
new file mode 100644
index 00000000..ec3c7bcc
--- /dev/null
+++ b/inventories/local-server/hosts
@@ -0,0 +1,22 @@
+localhost ansible_connection=local
+
+[monitor]
+localhost
+
+[postgres]
+localhost
+
+[manager]
+localhost
+
+[server]
+localhost
+
+[wowza]
+localhost
+
+[celerity]
+localhost
+
+[import]
+localhost
diff --git a/inventories/local-vault/host_vars/localhost.dist.yml b/inventories/local-vault/host_vars/localhost.dist.yml
new file mode 100644
index 00000000..d3e5920e
--- /dev/null
+++ b/inventories/local-vault/host_vars/localhost.dist.yml
@@ -0,0 +1,6 @@
+---
+
+skyreach_system_key:
+skyreach_activation_key:
+
+...
diff --git a/inventories/local-vault/hosts b/inventories/local-vault/hosts
new file mode 100644
index 00000000..8acb6f25
--- /dev/null
+++ b/inventories/local-vault/hosts
@@ -0,0 +1,4 @@
+localhost ansible_connection=local
+
+[vault]
+localhost
diff --git a/inventories/local-worker/host_vars/localhost.dist.yml b/inventories/local-worker/host_vars/localhost.dist.yml
new file mode 100644
index 00000000..ed267074
--- /dev/null
+++ b/inventories/local-worker/host_vars/localhost.dist.yml
@@ -0,0 +1,8 @@
+---
+
+skyreach_system_key:
+skyreach_activation_key:
+
+toto:
+
+...
diff --git a/inventories/local-worker/hosts b/inventories/local-worker/hosts
new file mode 100644
index 00000000..1de03526
--- /dev/null
+++ b/inventories/local-worker/hosts
@@ -0,0 +1,8 @@
+localhost ansible_connection=local
+
+[worker]
+localhost
+
+; empty group required for ferm role
+[server]
+[celerity]
diff --git a/launcher.sh b/launcher.sh
deleted file mode 100755
index c91ec9ff..00000000
--- a/launcher.sh
+++ /dev/null
@@ -1,189 +0,0 @@
-#!/bin/bash
-set -e
-# This script installs a MediaServer/Worker/Miris Manager/Cache server/MediaVault using envsetup on a system
-
-# Prerequisites
-# 1. create DNS entries
-#   alpha.ubicast.net
-#   alpha-mm.ubicast.net
-#   alpha-mon.ubicast.net
-# 2. create system in panel
-# 3. launcher.sh
-
-# log
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-mkdir -p "${DIR}/log"
-LOG_FILE="${DIR}/log/launcher_$(date +%F).log"
-exec > >(tee -i ${LOG_FILE})
-
-# log envsetup version
-version="$( cd "${DIR}" && git log -1 --pretty='format:%H %ad' --date=iso )"
-echo "Envsetup version:"
-echo "$version"
-
-# envsetup action calls
-init() {
-	python3 -u /root/envsetup/update_envsetup.py
-
-	python3 -u /root/envsetup/envsetup.py 31
-	python3 -u /root/envsetup/envsetup.py 32
-	python3 -u /root/envsetup/envsetup.py 33
-
-	python3 -u /root/envsetup/envsetup.py 11
-	python3 -u /root/envsetup/envsetup.py 12
-	python3 -u /root/envsetup/envsetup.py 13
-	python3 -u /root/envsetup/envsetup.py 14
-
-	python3 -u /root/envsetup/envsetup.py 21
-	python3 -u /root/envsetup/envsetup.py 22
-	python3 -u /root/envsetup/envsetup.py 28
-}
-
-monitor() {
-	python3 -u /root/envsetup/envsetup.py 25
-	python3 -u /root/envsetup/envsetup.py 26
-	python3 -u /root/envsetup/envsetup.py 41
-	python3 -u /root/envsetup/envsetup.py 28
-}
-
-mediaserver() {
-	python3 -u /root/envsetup/envsetup.py 23
-	python3 -u /root/envsetup/envsetup.py 24
-	python3 -u /root/envsetup/envsetup.py 71
-	python3 -u /root/envsetup/envsetup.py 25
-	python3 -u /root/envsetup/envsetup.py 51
-	python3 -u /root/envsetup/envsetup.py 25
-	python3 -u /root/envsetup/envsetup.py 28
-}
-
-worker() {
-	python3 -u /root/envsetup/envsetup.py 72
-	python3 -u /root/envsetup/envsetup.py 26
-}
-
-mirismanager() {
-	python3 -u /root/envsetup/envsetup.py 23
-	python3 -u /root/envsetup/envsetup.py 25
-	python3 -u /root/envsetup/envsetup.py 61
-	python3 -u /root/envsetup/envsetup.py 62
-	python3 -u /root/envsetup/envsetup.py 25
-	python3 -u /root/envsetup/envsetup.py 28
-}
-
-cache() {
-	python3 -u /root/envsetup/envsetup.py 81
-	python3 -u /root/envsetup/envsetup.py 82
-	python3 -u /root/envsetup/envsetup.py 25
-	python3 -u /root/envsetup/envsetup.py 26
-}
-
-bench() {
-	python3 -u /root/envsetup/envsetup.py 52
-}
-
-tests() {
-	python3 -u /root/envsetup/tester.py
-}
-
-backup_server() {
-	python3 -u /root/envsetup/envsetup.py 91
-}
-
-exportvm() {
-	python3 -u /root/envsetup/envsetup.py 111
-}
-
-exportvm_local() {
-	python3 -u /root/envsetup/envsetup.py 112
-}
-
-case "$1" in
-	"ms")
-		init
-		monitor
-		mirismanager
-		mediaserver
-		[[ $2 = "--skip-tests" ]] || tests
-	;;
-
-	"w")
-		init
-		worker
-		[[ $2 = "--skip-tests" ]] || tests
-	;;
-
-	"mm")
-		init
-		monitor
-		mirismanager
-		[[ $2 = "--skip-tests" ]] || tests
-	;;
-
-	"cache")
-		init
-		cache
-	;;
-
-	"mv")
-		init
-		backup_server
-	;;
-
-	"demokit")
-        init
-        monitor
-        mirismanager
-        # install celerity-worker first so that vod test works
-        python3 -u /root/envsetup/envsetup.py 72
-        mediaserver
-        # install netcapture
-        python3 -u /root/envsetup/envsetup.py 121
-        # demokit scripts: deploy videos, custom conf, etc
-        python3 -u /root/envsetup/envsetup.py 131
-        # deploy ssl self-signed certificates
-        python3 -u /root/envsetup/envsetup.py 133
-        [[ $2 = "--skip-tests" ]] || tests
-	;;
-
-    "demokit-reset")
-        # purge content
-        python3 -u /root/envsetup/envsetup.py 132
-        # demokit scripts: deploy videos, custom conf, etc
-        python3 -u /root/envsetup/envsetup.py 131
-    ;;
-
-	"b")
-		init
-		bench
-	;;
-
-	"exportvm")
-		CONF="$2"
-		exportvm
-	;;
-
-	"exportvm_local")
-		VM_NAME="$2"
-		exportvm_local
-	;;
-
-	*)
-		echo "
-Usage: $0 [arg] [--skip-tests]
-ms              build a MediaServer & Miris Manager
-w               build a Worker
-mm              build a Miris Manager
-cache           build a Cache server (munin needs manual configuration on master AND node)
-mv              build a MediaVault
-demokit         build a full demo kit
-demokit-reset   purge all content from demokit
-
-After usage a log file will be generated under /root/$(date +%F)_envsetup.log
-
-From admin machine:
-Usage: $0 exportvm
-exportvm        [conf_file]    export VM from an hypervisor
-exportvm_local  [vm name]      export VM from local vbox
-"
-	;;
-esac
diff --git a/library/nmcli.py b/library/nmcli.py
new file mode 100644
index 00000000..2d6d46ee
--- /dev/null
+++ b/library/nmcli.py
@@ -0,0 +1,1571 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+    'metadata_version': '1.1',
+    'status': ['preview'],
+    'supported_by': 'community'
+}
+
+DOCUMENTATION = r'''
+---
+module: nmcli
+author:
+- Chris Long (@alcamie101)
+short_description: Manage Networking
+requirements:
+- dbus
+- NetworkManager-libnm (or NetworkManager-glib on older systems)
+- nmcli
+version_added: "2.0"
+description:
+    - Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.
+    - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager-nmlib,
+      libsemanage-python, policycoreutils-python.'
+    - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-glib,
+      libnm-qt-devel.x86_64, nm-connection-editor.x86_64, libsemanage-python, policycoreutils-python.'
+    - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager,
+      python-dbus (or python3-dbus, depending on the Python version in use), libnm-dev.'
+    - 'On older Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager,
+      python-dbus (or python3-dbus, depending on the Python version in use), libnm-glib-dev.'
+options:
+    state:
+        description:
+            - Whether the device should exist or not, taking action if the state is different from what is stated.
+        type: str
+        required: true
+        choices: [ absent, present ]
+    autoconnect:
+        description:
+            - Whether the connection should start on boot.
+            - Whether the connection profile can be automatically activated
+        type: bool
+        default: yes
+    activate:
+        description:
+            - Whether the connection should should be activate.
+        type: bool
+        default: yes
+    conn_name:
+        description:
+            - 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: <type>[-<ifname>][-<num>]'
+        type: str
+        required: true
+    ifname:
+        description:
+            - The interface to bind the connection to.
+            - The connection will only be applicable to this interface name.
+            - A special value of C('*') can be used for interface-independent connections.
+            - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
+            - This parameter defaults to C(conn_name) when left unset.
+        type: str
+    type:
+        description:
+            - This is the type of device or network connection that you wish to create or modify.
+            - Type C(generic) is added in Ansible 2.5.
+        type: str
+        choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, ipip, sit, team, team-slave, vlan, vxlan ]
+    mode:
+        description:
+            - This is the type of device or network connection that you wish to create for a bond, team or bridge.
+        type: str
+        choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ]
+        default: balance-rr
+    master:
+        description:
+            - Master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
+        type: str
+    ip4:
+        description:
+            - The IPv4 address to this interface.
+            - Use the format C(192.0.2.24/24).
+        type: str
+    gw4:
+        description:
+            - The IPv4 gateway for this interface.
+            - Use the format C(192.0.2.1).
+        type: str
+    dns4:
+        description:
+            - A list of up to 3 dns servers.
+            - IPv4 format e.g. to add two IPv4 DNS server addresses, use C(192.0.2.53 198.51.100.53).
+        type: list
+    dns4_search:
+        description:
+            - A list of DNS search domains.
+        type: list
+        version_added: '2.5'
+    ip6:
+        description:
+            - The IPv6 address to this interface.
+            - Use the format C(abbe::cafe).
+        type: str
+    gw6:
+        description:
+            - The IPv6 gateway for this interface.
+            - Use the format C(2001:db8::1).
+        type: str
+    dns6:
+        description:
+            - A list of up to 3 dns servers.
+            - IPv6 format e.g. to add two IPv6 DNS server addresses, use C(2001:4860:4860::8888 2001:4860:4860::8844).
+        type: list
+    dns6_search:
+        description:
+            - A list of DNS search domains.
+        type: list
+        version_added: '2.5'
+    mtu:
+        description:
+            - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
+            - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
+            - This parameter defaults to C(1500) when unset.
+        type: int
+    dhcp_client_id:
+        description:
+            - DHCP Client Identifier sent to the DHCP server.
+        type: str
+        version_added: "2.5"
+    primary:
+        description:
+            - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'.
+        type: str
+    miimon:
+        description:
+            - This is only used with bond - miimon.
+            - This parameter defaults to C(100) when unset.
+        type: int
+    downdelay:
+        description:
+            - This is only used with bond - downdelay.
+        type: int
+    updelay:
+        description:
+            - This is only used with bond - updelay.
+        type: int
+    arp_interval:
+        description:
+            - This is only used with bond - ARP interval.
+        type: int
+    arp_ip_target:
+        description:
+            - This is only used with bond - ARP IP target.
+        type: str
+    stp:
+        description:
+            - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge.
+        type: bool
+        default: yes
+    priority:
+        description:
+            - This is only used with 'bridge' - sets STP priority.
+        type: int
+        default: 128
+    forwarddelay:
+        description:
+            - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds.
+        type: int
+        default: 15
+    hellotime:
+        description:
+            - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds.
+        type: int
+        default: 2
+    maxage:
+        description:
+            - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds.
+        type: int
+        default: 20
+    ageingtime:
+        description:
+            - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds.
+        type: int
+        default: 300
+    mac:
+        description:
+            - This is only used with bridge - MAC address of the bridge.
+            - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel.
+    slavepriority:
+        description:
+            - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave.
+        type: int
+        default: 32
+    path_cost:
+        description:
+            - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave.
+        type: int
+        default: 100
+    hairpin:
+        description:
+            - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
+              frame was received on.
+        type: bool
+        default: yes
+    vlanid:
+        description:
+            - This is only used with VLAN - VLAN ID in range <0-4095>.
+        type: int
+    vlandev:
+        description:
+            - This is only used with VLAN - parent device this VLAN is on, can use ifname.
+        type: str
+    flags:
+        description:
+            - This is only used with VLAN - flags.
+        type: str
+    ingress:
+        description:
+            - This is only used with VLAN - VLAN ingress priority mapping.
+        type: str
+    egress:
+        description:
+            - This is only used with VLAN - VLAN egress priority mapping.
+        type: str
+    vxlan_id:
+        description:
+            - This is only used with VXLAN - VXLAN ID.
+        type: int
+        version_added: "2.8"
+    vxlan_remote:
+       description:
+            - This is only used with VXLAN - VXLAN destination IP address.
+       type: str
+       version_added: "2.8"
+    vxlan_local:
+       description:
+            - This is only used with VXLAN - VXLAN local IP address.
+       type: str
+       version_added: "2.8"
+    ip_tunnel_dev:
+        description:
+            - This is used with IPIP/SIT - parent device this IPIP/SIT tunnel, can use ifname.
+        type: str
+        version_added: "2.8"
+    ip_tunnel_remote:
+       description:
+            - This is used with IPIP/SIT - IPIP/SIT destination IP address.
+       type: str
+       version_added: "2.8"
+    ip_tunnel_local:
+       description:
+            - This is used with IPIP/SIT - IPIP/SIT local IP address.
+       type: str
+       version_added: "2.8"
+'''
+
+EXAMPLES = r'''
+# These examples are using the following inventory:
+#
+# ## Directory layout:
+#
+# |_/inventory/cloud-hosts
+# |           /group_vars/openstack-stage.yml
+# |           /host_vars/controller-01.openstack.host.com
+# |           /host_vars/controller-02.openstack.host.com
+# |_/playbook/library/nmcli.py
+# |          /playbook-add.yml
+# |          /playbook-del.yml
+# ```
+#
+# ## inventory examples
+# ### groups_vars
+# ```yml
+# ---
+# #devops_os_define_network
+# storage_gw: "192.0.2.254"
+# external_gw: "198.51.100.254"
+# tenant_gw: "203.0.113.254"
+#
+# #Team vars
+# nmcli_team:
+#   - conn_name: tenant
+#     ip4: '{{ tenant_ip }}'
+#     gw4: '{{ tenant_gw }}'
+#   - conn_name: external
+#     ip4: '{{ external_ip }}'
+#     gw4: '{{ external_gw }}'
+#   - conn_name: storage
+#     ip4: '{{ storage_ip }}'
+#     gw4: '{{ storage_gw }}'
+# nmcli_team_slave:
+#   - conn_name: em1
+#     ifname: em1
+#     master: tenant
+#   - conn_name: em2
+#     ifname: em2
+#     master: tenant
+#   - conn_name: p2p1
+#     ifname: p2p1
+#     master: storage
+#   - conn_name: p2p2
+#     ifname: p2p2
+#     master: external
+#
+# #bond vars
+# nmcli_bond:
+#   - conn_name: tenant
+#     ip4: '{{ tenant_ip }}'
+#     gw4: ''
+#     mode: balance-rr
+#   - conn_name: external
+#     ip4: '{{ external_ip }}'
+#     gw4: ''
+#     mode: balance-rr
+#   - conn_name: storage
+#     ip4: '{{ storage_ip }}'
+#     gw4: '{{ storage_gw }}'
+#     mode: balance-rr
+# nmcli_bond_slave:
+#   - conn_name: em1
+#     ifname: em1
+#     master: tenant
+#   - conn_name: em2
+#     ifname: em2
+#     master: tenant
+#   - conn_name: p2p1
+#     ifname: p2p1
+#     master: storage
+#   - conn_name: p2p2
+#     ifname: p2p2
+#     master: external
+#
+# #ethernet vars
+# nmcli_ethernet:
+#   - conn_name: em1
+#     ifname: em1
+#     ip4: '{{ tenant_ip }}'
+#     gw4: '{{ tenant_gw }}'
+#   - conn_name: em2
+#     ifname: em2
+#     ip4: '{{ tenant_ip1 }}'
+#     gw4: '{{ tenant_gw }}'
+#   - conn_name: p2p1
+#     ifname: p2p1
+#     ip4: '{{ storage_ip }}'
+#     gw4: '{{ storage_gw }}'
+#   - conn_name: p2p2
+#     ifname: p2p2
+#     ip4: '{{ external_ip }}'
+#     gw4: '{{ external_gw }}'
+# ```
+#
+# ### host_vars
+# ```yml
+# ---
+# storage_ip: "192.0.2.91/23"
+# external_ip: "198.51.100.23/21"
+# tenant_ip: "203.0.113.77/23"
+# ```
+
+
+
+## playbook-add.yml example
+
+---
+- hosts: openstack-stage
+  remote_user: root
+  tasks:
+
+  - name: install needed network manager libs
+    package:
+      name:
+        - NetworkManager-libnm
+        - nm-connection-editor
+        - libsemanage-python
+        - policycoreutils-python
+      state: present
+
+##### Working with all cloud nodes - Teaming
+  - name: Try nmcli add team - conn_name only & ip4 gw4
+    nmcli:
+      type: team
+      conn_name: '{{ item.conn_name }}'
+      ip4: '{{ item.ip4 }}'
+      gw4: '{{ item.gw4 }}'
+      state: present
+    with_items:
+      - '{{ nmcli_team }}'
+
+  - name: Try nmcli add teams-slave
+    nmcli:
+      type: team-slave
+      conn_name: '{{ item.conn_name }}'
+      ifname: '{{ item.ifname }}'
+      master: '{{ item.master }}'
+      state: present
+    with_items:
+      - '{{ nmcli_team_slave }}'
+
+###### Working with all cloud nodes - Bonding
+  - name: Try nmcli add bond - conn_name only & ip4 gw4 mode
+    nmcli:
+      type: bond
+      conn_name: '{{ item.conn_name }}'
+      ip4: '{{ item.ip4 }}'
+      gw4: '{{ item.gw4 }}'
+      mode: '{{ item.mode }}'
+      state: present
+    with_items:
+      - '{{ nmcli_bond }}'
+
+  - name: Try nmcli add bond-slave
+    nmcli:
+      type: bond-slave
+      conn_name: '{{ item.conn_name }}'
+      ifname: '{{ item.ifname }}'
+      master: '{{ item.master }}'
+      state: present
+    with_items:
+      - '{{ nmcli_bond_slave }}'
+
+##### Working with all cloud nodes - Ethernet
+  - name: Try nmcli add Ethernet - conn_name only & ip4 gw4
+    nmcli:
+      type: ethernet
+      conn_name: '{{ item.conn_name }}'
+      ip4: '{{ item.ip4 }}'
+      gw4: '{{ item.gw4 }}'
+      state: present
+    with_items:
+      - '{{ nmcli_ethernet }}'
+
+## playbook-del.yml example
+- hosts: openstack-stage
+  remote_user: root
+  tasks:
+
+  - name: Try nmcli del team - multiple
+    nmcli:
+      conn_name: '{{ item.conn_name }}'
+      state: absent
+    with_items:
+      - conn_name: em1
+      - conn_name: em2
+      - conn_name: p1p1
+      - conn_name: p1p2
+      - conn_name: p2p1
+      - conn_name: p2p2
+      - conn_name: tenant
+      - conn_name: storage
+      - conn_name: external
+      - conn_name: team-em1
+      - conn_name: team-em2
+      - conn_name: team-p1p1
+      - conn_name: team-p1p2
+      - conn_name: team-p2p1
+      - conn_name: team-p2p2
+
+  - name: Add an Ethernet connection with static IP configuration
+    nmcli:
+    conn_name: my-eth1
+    ifname: eth1
+    type: ethernet
+    ip4: 192.0.2.100/24
+    gw4: 192.0.2.1
+    state: present
+
+  - name: Add an Team connection with static IP configuration
+    nmcli:
+      conn_name: my-team1
+      ifname: my-team1
+      type: team
+      ip4: 192.0.2.100/24
+      gw4: 192.0.2.1
+      state: present
+      autoconnect: yes
+
+  - name: Optionally, at the same time specify IPv6 addresses for the device
+    nmcli:
+      conn_name: my-eth1
+      ifname: eth1
+      type: ethernet
+      ip4: 192.0.2.100/24
+      gw4: 192.0.2.1
+      ip6: 2001:db8::cafe
+      gw6: 2001:db8::1
+      state: present
+
+  - name: Add two IPv4 DNS server addresses
+    nmcli:
+      conn_name: my-eth1
+      type: ethernet
+      dns4:
+      - 192.0.2.53
+      - 198.51.100.53
+      state: present
+
+  - name: Make a profile usable for all compatible Ethernet interfaces
+    nmcli:
+      ctype: ethernet
+      name: my-eth1
+      ifname: '*'
+      state: present
+
+  - name: Change the property of a setting e.g. MTU
+    nmcli:
+      conn_name: my-eth1
+      mtu: 9000
+      type: ethernet
+      state: present
+
+  - name: Add VxLan
+    nmcli:
+      type: vxlan
+      conn_name: vxlan_test1
+      vxlan_id: 16
+      vxlan_local: 192.168.1.2
+      vxlan_remote: 192.168.1.5
+
+  - name: Add ipip
+    nmcli:
+      type: ipip
+      conn_name: ipip_test1
+      ip_tunnel_dev: eth0
+      ip_tunnel_local: 192.168.1.2
+      ip_tunnel_remote: 192.168.1.5
+
+  - name: Add sit
+    nmcli:
+      type: sit
+      conn_name: sit_test1
+      ip_tunnel_dev: eth0
+      ip_tunnel_local: 192.168.1.2
+      ip_tunnel_remote: 192.168.1.5
+
+# nmcli exits with status 0 if it succeeds and exits with a status greater
+# than zero when there is a failure. The following list of status codes may be
+# returned:
+#
+#     - 0 Success - indicates the operation succeeded
+#     - 1 Unknown or unspecified error
+#     - 2 Invalid user input, wrong nmcli invocation
+#     - 3 Timeout expired (see --wait option)
+#     - 4 Connection activation failed
+#     - 5 Connection deactivation failed
+#     - 6 Disconnecting device failed
+#     - 7 Connection deletion failed
+#     - 8 NetworkManager is not running
+#     - 9 nmcli and NetworkManager versions mismatch
+#     - 10 Connection, device, or access point does not exist.
+'''
+
+RETURN = r"""#
+"""
+
+import traceback
+
+DBUS_IMP_ERR = None
+try:
+    import dbus
+    HAVE_DBUS = True
+except ImportError:
+    DBUS_IMP_ERR = traceback.format_exc()
+    HAVE_DBUS = False
+
+NM_CLIENT_IMP_ERR = None
+HAVE_NM_CLIENT = True
+try:
+    import gi
+    gi.require_version('NM', '1.0')
+    from gi.repository import NM
+except (ImportError, ValueError):
+    try:
+        import gi
+        gi.require_version('NMClient', '1.0')
+        gi.require_version('NetworkManager', '1.0')
+        from gi.repository import NetworkManager, NMClient
+    except (ImportError, ValueError):
+        NM_CLIENT_IMP_ERR = traceback.format_exc()
+        HAVE_NM_CLIENT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class Nmcli(object):
+    """
+    This is the generic nmcli manipulation class that is subclassed based on platform.
+    A subclass may wish to override the following action methods:-
+            - create_connection()
+            - delete_connection()
+            - modify_connection()
+            - show_connection()
+            - up_connection()
+            - down_connection()
+    All subclasses MUST define platform and distribution (which may be None).
+    """
+
+    platform = 'Generic'
+    distribution = None
+    if HAVE_DBUS:
+        bus = dbus.SystemBus()
+    # The following is going to be used in dbus code
+    DEVTYPES = {
+        1: "Ethernet",
+        2: "Wi-Fi",
+        5: "Bluetooth",
+        6: "OLPC",
+        7: "WiMAX",
+        8: "Modem",
+        9: "InfiniBand",
+        10: "Bond",
+        11: "VLAN",
+        12: "ADSL",
+        13: "Bridge",
+        14: "Generic",
+        15: "Team",
+        16: "VxLan",
+        17: "ipip",
+        18: "sit",
+    }
+    STATES = {
+        0: "Unknown",
+        10: "Unmanaged",
+        20: "Unavailable",
+        30: "Disconnected",
+        40: "Prepare",
+        50: "Config",
+        60: "Need Auth",
+        70: "IP Config",
+        80: "IP Check",
+        90: "Secondaries",
+        100: "Activated",
+        110: "Deactivating",
+        120: "Failed"
+    }
+
+    def __init__(self, module):
+        self.module = module
+        self.state = module.params['state']
+        self.autoconnect = module.params['autoconnect']
+        self.activate = module.params['activate']
+        self.conn_name = module.params['conn_name']
+        self.master = module.params['master']
+        self.ifname = module.params['ifname']
+        self.type = module.params['type']
+        self.ip4 = module.params['ip4']
+        self.gw4 = module.params['gw4']
+        self.dns4 = ' '.join(module.params['dns4']) if module.params.get('dns4') else None
+        self.dns4_search = ' '.join(module.params['dns4_search']) if module.params.get('dns4_search') else None
+        self.ip6 = module.params['ip6']
+        self.gw6 = module.params['gw6']
+        self.dns6 = ' '.join(module.params['dns6']) if module.params.get('dns6') else None
+        self.dns6_search = ' '.join(module.params['dns6_search']) if module.params.get('dns6_search') else None
+        self.mtu = module.params['mtu']
+        self.stp = module.params['stp']
+        self.priority = module.params['priority']
+        self.mode = module.params['mode']
+        self.miimon = module.params['miimon']
+        self.primary = module.params['primary']
+        self.downdelay = module.params['downdelay']
+        self.updelay = module.params['updelay']
+        self.arp_interval = module.params['arp_interval']
+        self.arp_ip_target = module.params['arp_ip_target']
+        self.slavepriority = module.params['slavepriority']
+        self.forwarddelay = module.params['forwarddelay']
+        self.hellotime = module.params['hellotime']
+        self.maxage = module.params['maxage']
+        self.ageingtime = module.params['ageingtime']
+        self.hairpin = module.params['hairpin']
+        self.path_cost = module.params['path_cost']
+        self.mac = module.params['mac']
+        self.vlanid = module.params['vlanid']
+        self.vlandev = module.params['vlandev']
+        self.flags = module.params['flags']
+        self.ingress = module.params['ingress']
+        self.egress = module.params['egress']
+        self.vxlan_id = module.params['vxlan_id']
+        self.vxlan_local = module.params['vxlan_local']
+        self.vxlan_remote = module.params['vxlan_remote']
+        self.ip_tunnel_dev = module.params['ip_tunnel_dev']
+        self.ip_tunnel_local = module.params['ip_tunnel_local']
+        self.ip_tunnel_remote = module.params['ip_tunnel_remote']
+        self.nmcli_bin = self.module.get_bin_path('nmcli', True)
+        self.dhcp_client_id = module.params['dhcp_client_id']
+
+    def execute_command(self, cmd, use_unsafe_shell=False, data=None):
+        return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
+
+    def merge_secrets(self, proxy, config, setting_name):
+        try:
+            # returns a dict of dicts mapping name::setting, where setting is a dict
+            # mapping key::value.  Each member of the 'setting' dict is a secret
+            secrets = proxy.GetSecrets(setting_name)
+
+            # Copy the secrets into our connection config
+            for setting in secrets:
+                for key in secrets[setting]:
+                    config[setting_name][key] = secrets[setting][key]
+        except Exception:
+            pass
+
+    def dict_to_string(self, d):
+        # Try to trivially translate a dictionary's elements into nice string
+        # formatting.
+        dstr = ""
+        for key in d:
+            val = d[key]
+            str_val = ""
+            add_string = True
+            if isinstance(val, dbus.Array):
+                for elt in val:
+                    if isinstance(elt, dbus.Byte):
+                        str_val += "%s " % int(elt)
+                    elif isinstance(elt, dbus.String):
+                        str_val += "%s" % elt
+            elif isinstance(val, dbus.Dictionary):
+                dstr += self.dict_to_string(val)
+                add_string = False
+            else:
+                str_val = val
+            if add_string:
+                dstr += "%s: %s\n" % (key, str_val)
+        return dstr
+
+    def connection_to_string(self, config):
+        # dump a connection configuration to use in list_connection_info
+        setting_list = []
+        for setting_name in config:
+            setting_list.append(self.dict_to_string(config[setting_name]))
+        return setting_list
+
+    @staticmethod
+    def bool_to_string(boolean):
+        if boolean:
+            return "yes"
+        else:
+            return "no"
+
+    def list_connection_info(self):
+        # Ask the settings service for the list of connections it provides
+        bus = dbus.SystemBus()
+
+        service_name = "org.freedesktop.NetworkManager"
+        settings = None
+        try:
+            proxy = bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings")
+            settings = dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings")
+        except dbus.exceptions.DBusException as e:
+            self.module.fail_json(msg="Unable to read Network Manager settings from DBus system bus: %s" % to_native(e),
+                                  details="Please check if NetworkManager is installed and"
+                                          " service network-manager is started.")
+        connection_paths = settings.ListConnections()
+        connection_list = []
+        # List each connection's name, UUID, and type
+        for path in connection_paths:
+            con_proxy = bus.get_object(service_name, path)
+            settings_connection = dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection")
+            config = settings_connection.GetSettings()
+
+            # Now get secrets too; we grab the secrets for each type of connection
+            # (since there isn't a "get all secrets" call because most of the time
+            # you only need 'wifi' secrets or '802.1x' secrets, not everything) and
+            # merge that into the configuration data - To use at a later stage
+            self.merge_secrets(settings_connection, config, '802-11-wireless')
+            self.merge_secrets(settings_connection, config, '802-11-wireless-security')
+            self.merge_secrets(settings_connection, config, '802-1x')
+            self.merge_secrets(settings_connection, config, 'gsm')
+            self.merge_secrets(settings_connection, config, 'cdma')
+            self.merge_secrets(settings_connection, config, 'ppp')
+
+            # Get the details of the 'connection' setting
+            s_con = config['connection']
+            connection_list.append(s_con['id'])
+            connection_list.append(s_con['uuid'])
+            connection_list.append(s_con['type'])
+            connection_list.append(self.connection_to_string(config))
+        return connection_list
+
+    def connection_exists(self):
+        # we are going to use name and type in this instance to find if that connection exists and is of type x
+        connections = self.list_connection_info()
+
+        for con_item in connections:
+            if self.conn_name == con_item:
+                return True
+
+    def down_connection(self):
+        cmd = [self.nmcli_bin, 'con', 'down', self.conn_name]
+        return self.execute_command(cmd)
+
+    def up_connection(self):
+        cmd = [self.nmcli_bin, 'con', 'up', self.conn_name]
+        return self.execute_command(cmd)
+
+    def create_connection_team(self):
+        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'team', 'con-name']
+        # format for creating team interface
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+
+        options = {
+            'ipv4.address': self.ip4,
+            'ipv4.gateway': self.gw4,
+            'ipv6.address': self.ip6,
+            'ipv6.gateway': self.gw6,
+            'autoconnect': self.bool_to_string(self.autoconnect),
+            'ipv4.dns-search': self.dns4_search,
+            'ipv6.dns-search': self.dns6_search,
+            'ipv4.dhcp-client-id': self.dhcp_client_id,
+        }
+
+        for key, value in options.items():
+            if value is not None:
+                cmd.extend([key, value])
+
+        return cmd
+
+    def modify_connection_team(self):
+        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name]
+        options = {
+            'ipv4.address': self.ip4,
+            'ipv4.gateway': self.gw4,
+            'ipv4.dns': self.dns4,
+            'ipv6.address': self.ip6,
+            'ipv6.gateway': self.gw6,
+            'ipv6.dns': self.dns6,
+            'autoconnect': self.bool_to_string(self.autoconnect),
+            'ipv4.dns-search': self.dns4_search,
+            'ipv6.dns-search': self.dns6_search,
+            'ipv4.dhcp-client-id': self.dhcp_client_id,
+        }
+
+        for key, value in options.items():
+            if value is not None:
+                cmd.extend([key, value])
+
+        return cmd
+
+    def create_connection_team_slave(self):
+        cmd = [self.nmcli_bin, 'connection', 'add', 'type', self.type, 'con-name']
+        # format for creating team-slave interface
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+        cmd.append('master')
+        if self.conn_name is not None:
+            cmd.append(self.master)
+        return cmd
+
+    def modify_connection_team_slave(self):
+        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name, 'connection.master', self.master]
+        # format for modifying team-slave interface
+        if self.mtu is not None:
+            cmd.append('802-3-ethernet.mtu')
+            cmd.append(self.mtu)
+        return cmd
+
+    def create_connection_bond(self):
+        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'bond', 'con-name']
+        # format for creating bond interface
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+        options = {
+            'mode': self.mode,
+            'ipv4.address': self.ip4,
+            'ipv4.gateway': self.gw4,
+            'ipv6.address': self.ip6,
+            'ipv6.gateway': self.gw6,
+            'autoconnect': self.bool_to_string(self.autoconnect),
+            'ipv4.dns-search': self.dns4_search,
+            'ipv6.dns-search': self.dns6_search,
+            'miimon': self.miimon,
+            'downdelay': self.downdelay,
+            'updelay': self.updelay,
+            'arp-interval': self.arp_interval,
+            'arp-ip-target': self.arp_ip_target,
+            'primary': self.primary,
+            'ipv4.dhcp-client-id': self.dhcp_client_id,
+        }
+
+        for key, value in options.items():
+            if value is not None:
+                cmd.extend([key, value])
+        return cmd
+
+    def modify_connection_bond(self):
+        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name]
+        # format for modifying bond interface
+
+        options = {
+            'ipv4.address': self.ip4,
+            'ipv4.gateway': self.gw4,
+            'ipv4.dns': self.dns4,
+            'ipv6.address': self.ip6,
+            'ipv6.gateway': self.gw6,
+            'ipv6.dns': self.dns6,
+            'autoconnect': self.bool_to_string(self.autoconnect),
+            'ipv4.dns-search': self.dns4_search,
+            'ipv6.dns-search': self.dns6_search,
+            'miimon': self.miimon,
+            'downdelay': self.downdelay,
+            'updelay': self.updelay,
+            'arp-interval': self.arp_interval,
+            'arp-ip-target': self.arp_ip_target,
+            'ipv4.dhcp-client-id': self.dhcp_client_id,
+        }
+
+        for key, value in options.items():
+            if value is not None:
+                cmd.extend([key, value])
+
+        return cmd
+
+    def create_connection_bond_slave(self):
+        cmd = [self.nmcli_bin, 'connection', 'add', 'type', 'bond-slave', 'con-name']
+        # format for creating bond-slave interface
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+        cmd.append('master')
+        if self.conn_name is not None:
+            cmd.append(self.master)
+        return cmd
+
+    def modify_connection_bond_slave(self):
+        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name, 'connection.master', self.master]
+        # format for modifying bond-slave interface
+        return cmd
+
+    def create_connection_ethernet(self, conn_type='ethernet'):
+        # format for creating ethernet interface
+        # To add an Ethernet connection with static IP configuration, issue a command as follows
+        # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
+        # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
+        cmd = [self.nmcli_bin, 'con', 'add', 'type']
+        if conn_type == 'ethernet':
+            cmd.append('ethernet')
+        elif conn_type == 'generic':
+            cmd.append('generic')
+        cmd.append('con-name')
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+
+        options = {
+            'ipv4.address': self.ip4,
+            'ipv4.gateway': self.gw4,
+            'ipv6.address': self.ip6,
+            'ipv6.gateway': self.gw6,
+            'autoconnect': self.bool_to_string(self.autoconnect),
+            'ipv4.dns-search': self.dns4_search,
+            'ipv6.dns-search': self.dns6_search,
+            'ipv4.dhcp-client-id': self.dhcp_client_id,
+        }
+
+        for key, value in options.items():
+            if value is not None:
+                cmd.extend([key, value])
+
+        return cmd
+
+    def modify_connection_ethernet(self, conn_type='ethernet'):
+        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name]
+        # format for modifying ethernet interface
+        # To modify an Ethernet connection with static IP configuration, issue a command as follows
+        # - nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
+        # nmcli con mod con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
+        options = {
+            'ipv4.address': self.ip4,
+            'ipv4.gateway': self.gw4,
+            'ipv4.dns': self.dns4,
+            'ipv6.address': self.ip6,
+            'ipv6.gateway': self.gw6,
+            'ipv6.dns': self.dns6,
+            'autoconnect': self.bool_to_string(self.autoconnect),
+            'ipv4.dns-search': self.dns4_search,
+            'ipv6.dns-search': self.dns6_search,
+            '802-3-ethernet.mtu': self.mtu,
+            'ipv4.dhcp-client-id': self.dhcp_client_id,
+        }
+
+        for key, value in options.items():
+            if value is not None:
+                if key == '802-3-ethernet.mtu' and conn_type != 'ethernet':
+                    continue
+                cmd.extend([key, value])
+
+        return cmd
+
+    def create_connection_bridge(self):
+        # format for creating bridge interface
+        # To add an Bridge connection with static IP configuration, issue a command as follows
+        # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=bridge ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
+        # nmcli con add con-name my-eth1 ifname eth1 type bridge ip4 192.0.2.100/24 gw4 192.0.2.1
+        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'bridge', 'con-name']
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+
+        options = {
+            'ip4': self.ip4,
+            'gw4': self.gw4,
+            'ip6': self.ip6,
+            'gw6': self.gw6,
+            'autoconnect': self.bool_to_string(self.autoconnect),
+            'bridge.ageing-time': self.ageingtime,
+            'bridge.forward-delay': self.forwarddelay,
+            'bridge.hello-time': self.hellotime,
+            'bridge.mac-address': self.mac,
+            'bridge.max-age': self.maxage,
+            'bridge.priority': self.priority,
+            'bridge.stp': self.bool_to_string(self.stp)
+        }
+
+        for key, value in options.items():
+            if value is not None:
+                cmd.extend([key, value])
+
+        return cmd
+
+    def modify_connection_bridge(self):
+        # format for modifying bridge interface
+        # To add an Bridge connection with static IP configuration, issue a command as follows
+        # - nmcli: name=mod conn_name=my-eth1 ifname=eth1 type=bridge ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
+        # nmcli con mod my-eth1 ifname eth1 type bridge ip4 192.0.2.100/24 gw4 192.0.2.1
+        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name]
+
+        options = {
+            'ip4': self.ip4,
+            'gw4': self.gw4,
+            'ip6': self.ip6,
+            'gw6': self.gw6,
+            'autoconnect': self.bool_to_string(self.autoconnect),
+            'bridge.ageing-time': self.ageingtime,
+            'bridge.forward-delay': self.forwarddelay,
+            'bridge.hello-time': self.hellotime,
+            'bridge.mac-address': self.mac,
+            'bridge.max-age': self.maxage,
+            'bridge.priority': self.priority,
+            'bridge.stp': self.bool_to_string(self.stp)
+        }
+
+        for key, value in options.items():
+            if value is not None:
+                cmd.extend([key, value])
+
+        return cmd
+
+    def create_connection_bridge_slave(self):
+        # format for creating bond-slave interface
+        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'bridge-slave', 'con-name']
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+
+        options = {
+            'master': self.master,
+            'bridge-port.path-cost': self.path_cost,
+            'bridge-port.hairpin': self.bool_to_string(self.hairpin),
+            'bridge-port.priority': self.slavepriority,
+        }
+
+        for key, value in options.items():
+            if value is not None:
+                cmd.extend([key, value])
+
+        return cmd
+
+    def modify_connection_bridge_slave(self):
+        # format for modifying bond-slave interface
+        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name]
+        options = {
+            'master': self.master,
+            'bridge-port.path-cost': self.path_cost,
+            'bridge-port.hairpin': self.bool_to_string(self.hairpin),
+            'bridge-port.priority': self.slavepriority,
+        }
+
+        for key, value in options.items():
+            if value is not None:
+                cmd.extend([key, value])
+
+        return cmd
+
+    def create_connection_vlan(self):
+        cmd = [self.nmcli_bin]
+        cmd.append('con')
+        cmd.append('add')
+        cmd.append('type')
+        cmd.append('vlan')
+        cmd.append('con-name')
+
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        else:
+            cmd.append('vlan%s' % self.vlanid)
+
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+        else:
+            cmd.append('vlan%s' % self.vlanid)
+
+        params = {'dev': self.vlandev,
+                  'id': self.vlanid,
+                  'ip4': self.ip4 or '',
+                  'gw4': self.gw4 or '',
+                  'ip6': self.ip6 or '',
+                  'gw6': self.gw6 or '',
+                  'autoconnect': self.bool_to_string(self.autoconnect)
+                  }
+        for k, v in params.items():
+            cmd.extend([k, v])
+
+        return cmd
+
+    def modify_connection_vlan(self):
+        cmd = [self.nmcli_bin]
+        cmd.append('con')
+        cmd.append('mod')
+
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        else:
+            cmd.append('vlan%s' % self.vlanid)
+
+        params = {'vlan.parent': self.vlandev,
+                  'vlan.id': self.vlanid,
+                  'ipv4.address': self.ip4 or '',
+                  'ipv4.gateway': self.gw4 or '',
+                  'ipv4.dns': self.dns4 or '',
+                  'ipv6.address': self.ip6 or '',
+                  'ipv6.gateway': self.gw6 or '',
+                  'ipv6.dns': self.dns6 or '',
+                  'autoconnect': self.bool_to_string(self.autoconnect)
+                  }
+
+        for k, v in params.items():
+            cmd.extend([k, v])
+
+        return cmd
+
+    def create_connection_vxlan(self):
+        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'vxlan', 'con-name']
+
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        else:
+            cmd.append('vxlan%s' % self.vxlanid)
+
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+        else:
+            cmd.append('vxan%s' % self.vxlanid)
+
+        params = {'vxlan.id': self.vxlan_id,
+                  'vxlan.local': self.vxlan_local,
+                  'vxlan.remote': self.vxlan_remote,
+                  'autoconnect': self.bool_to_string(self.autoconnect)
+                  }
+        for k, v in params.items():
+            cmd.extend([k, v])
+
+        return cmd
+
+    def modify_connection_vxlan(self):
+        cmd = [self.nmcli_bin, 'con', 'mod']
+
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        else:
+            cmd.append('vxlan%s' % self.vxlanid)
+
+        params = {'vxlan.id': self.vxlan_id,
+                  'vxlan.local': self.vxlan_local,
+                  'vxlan.remote': self.vxlan_remote,
+                  'autoconnect': self.bool_to_string(self.autoconnect)
+                  }
+        for k, v in params.items():
+            cmd.extend([k, v])
+        return cmd
+
+    def create_connection_ipip(self):
+        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'ip-tunnel', 'mode', 'ipip', 'con-name']
+
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.ip_tunnel_dev is not None:
+            cmd.append('ipip%s' % self.ip_tunnel_dev)
+
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+        else:
+            cmd.append('ipip%s' % self.ipip_dev)
+
+        if self.ip_tunnel_dev is not None:
+            cmd.append('dev')
+            cmd.append(self.ip_tunnel_dev)
+
+        params = {'ip-tunnel.local': self.ip_tunnel_local,
+                  'ip-tunnel.remote': self.ip_tunnel_remote,
+                  'autoconnect': self.bool_to_string(self.autoconnect)
+                  }
+        for k, v in params.items():
+            cmd.extend([k, v])
+
+        return cmd
+
+    def modify_connection_ipip(self):
+        cmd = [self.nmcli_bin, 'con', 'mod']
+
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.ip_tunnel_dev is not None:
+            cmd.append('ipip%s' % self.ip_tunnel_dev)
+
+        params = {'ip-tunnel.local': self.ip_tunnel_local,
+                  'ip-tunnel.remote': self.ip_tunnel_remote,
+                  'autoconnect': self.bool_to_string(self.autoconnect)
+                  }
+        for k, v in params.items():
+            cmd.extend([k, v])
+        return cmd
+
+    def create_connection_sit(self):
+        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'ip-tunnel', 'mode', 'sit', 'con-name']
+
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.ip_tunnel_dev is not None:
+            cmd.append('sit%s' % self.ip_tunnel_dev)
+
+        cmd.append('ifname')
+        if self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.conn_name is not None:
+            cmd.append(self.conn_name)
+        else:
+            cmd.append('sit%s' % self.ipip_dev)
+
+        if self.ip_tunnel_dev is not None:
+            cmd.append('dev')
+            cmd.append(self.ip_tunnel_dev)
+
+        params = {'ip-tunnel.local': self.ip_tunnel_local,
+                  'ip-tunnel.remote': self.ip_tunnel_remote,
+                  'autoconnect': self.bool_to_string(self.autoconnect)
+                  }
+        for k, v in params.items():
+            cmd.extend([k, v])
+
+        return cmd
+
+    def modify_connection_sit(self):
+        cmd = [self.nmcli_bin, 'con', 'mod']
+
+        if self.conn_name is not None:
+            cmd.append(self.conn_name)
+        elif self.ifname is not None:
+            cmd.append(self.ifname)
+        elif self.ip_tunnel_dev is not None:
+            cmd.append('sit%s' % self.ip_tunnel_dev)
+
+        params = {'ip-tunnel.local': self.ip_tunnel_local,
+                  'ip-tunnel.remote': self.ip_tunnel_remote,
+                  'autoconnect': self.bool_to_string(self.autoconnect)
+                  }
+        for k, v in params.items():
+            cmd.extend([k, v])
+        return cmd
+
+    def create_connection(self):
+        cmd = []
+        if self.type == 'team':
+            if (self.dns4 is not None) or (self.dns6 is not None):
+                cmd = self.create_connection_team()
+                self.execute_command(cmd)
+                cmd = self.modify_connection_team()
+                if self.activate:
+                    self.execute_command(cmd)
+                    return self.up_connection()
+                else:
+                    return self.execute_command(cmd)
+            elif (self.dns4 is None) or (self.dns6 is None):
+                cmd = self.create_connection_team()
+        elif self.type == 'team-slave':
+            if self.mtu is not None:
+                cmd = self.create_connection_team_slave()
+                self.execute_command(cmd)
+                cmd = self.modify_connection_team_slave()
+                return self.execute_command(cmd)
+            else:
+                cmd = self.create_connection_team_slave()
+        elif self.type == 'bond':
+            if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
+                cmd = self.create_connection_bond()
+                self.execute_command(cmd)
+                cmd = self.modify_connection_bond()
+                if self.activate:
+                    self.execute_command(cmd)
+                    return self.up_connection()
+                else:
+                    return self.execute_command(cmd)
+            else:
+                cmd = self.create_connection_bond()
+        elif self.type == 'bond-slave':
+            cmd = self.create_connection_bond_slave()
+        elif self.type == 'ethernet':
+            if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
+                cmd = self.create_connection_ethernet()
+                self.execute_command(cmd)
+                cmd = self.modify_connection_ethernet()
+                if self.activate:
+                    self.execute_command(cmd)
+                    return self.up_connection()
+                else:
+                    return self.execute_command(cmd)
+            else:
+                cmd = self.create_connection_ethernet()
+        elif self.type == 'bridge':
+            cmd = self.create_connection_bridge()
+        elif self.type == 'bridge-slave':
+            cmd = self.create_connection_bridge_slave()
+        elif self.type == 'vlan':
+            cmd = self.create_connection_vlan()
+        elif self.type == 'vxlan':
+            cmd = self.create_connection_vxlan()
+        elif self.type == 'ipip':
+            cmd = self.create_connection_ipip()
+        elif self.type == 'sit':
+            cmd = self.create_connection_sit()
+        elif self.type == 'generic':
+            cmd = self.create_connection_ethernet(conn_type='generic')
+
+        if cmd:
+            return self.execute_command(cmd)
+        else:
+            self.module.fail_json(msg="Type of device or network connection is required "
+                                      "while performing 'create' operation. Please specify 'type' as an argument.")
+
+    def remove_connection(self):
+        # self.down_connection()
+        cmd = [self.nmcli_bin, 'con', 'del', self.conn_name]
+        return self.execute_command(cmd)
+
+    def modify_connection(self):
+        cmd = []
+        if self.type == 'team':
+            cmd = self.modify_connection_team()
+        elif self.type == 'team-slave':
+            cmd = self.modify_connection_team_slave()
+        elif self.type == 'bond':
+            cmd = self.modify_connection_bond()
+        elif self.type == 'bond-slave':
+            cmd = self.modify_connection_bond_slave()
+        elif self.type == 'ethernet':
+            cmd = self.modify_connection_ethernet()
+        elif self.type == 'bridge':
+            cmd = self.modify_connection_bridge()
+        elif self.type == 'bridge-slave':
+            cmd = self.modify_connection_bridge_slave()
+        elif self.type == 'vlan':
+            cmd = self.modify_connection_vlan()
+        elif self.type == 'vxlan':
+            cmd = self.modify_connection_vxlan()
+        elif self.type == 'ipip':
+            cmd = self.modify_connection_ipip()
+        elif self.type == 'sit':
+            cmd = self.modify_connection_sit()
+        elif self.type == 'generic':
+            cmd = self.modify_connection_ethernet(conn_type='generic')
+        if cmd:
+            return self.execute_command(cmd)
+        else:
+            self.module.fail_json(msg="Type of device or network connection is required "
+                                      "while performing 'modify' operation. Please specify 'type' as an argument.")
+
+
+def main():
+    # Parsing argument file
+    module = AnsibleModule(
+        argument_spec=dict(
+            autoconnect=dict(type='bool', default=True),
+            activate=dict(type='bool', default=True),
+            state=dict(type='str', required=True, choices=['absent', 'present']),
+            conn_name=dict(type='str', required=True),
+            master=dict(type='str'),
+            ifname=dict(type='str'),
+            type=dict(type='str',
+                      choices=['bond', 'bond-slave', 'bridge', 'bridge-slave', 'ethernet', 'generic', 'ipip', 'sit', 'team', 'team-slave', 'vlan', 'vxlan']),
+            ip4=dict(type='str'),
+            gw4=dict(type='str'),
+            dns4=dict(type='list'),
+            dns4_search=dict(type='list'),
+            dhcp_client_id=dict(type='str'),
+            ip6=dict(type='str'),
+            gw6=dict(type='str'),
+            dns6=dict(type='list'),
+            dns6_search=dict(type='list'),
+            # Bond Specific vars
+            mode=dict(type='str', default='balance-rr',
+                      choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
+            miimon=dict(type='int'),
+            downdelay=dict(type='int'),
+            updelay=dict(type='int'),
+            arp_interval=dict(type='int'),
+            arp_ip_target=dict(type='str'),
+            primary=dict(type='str'),
+            # general usage
+            mtu=dict(type='int'),
+            mac=dict(type='str'),
+            # bridge specific vars
+            stp=dict(type='bool', default=True),
+            priority=dict(type='int', default=128),
+            slavepriority=dict(type='int', default=32),
+            forwarddelay=dict(type='int', default=15),
+            hellotime=dict(type='int', default=2),
+            maxage=dict(type='int', default=20),
+            ageingtime=dict(type='int', default=300),
+            hairpin=dict(type='bool', default=True),
+            path_cost=dict(type='int', default=100),
+            # vlan specific vars
+            vlanid=dict(type='int'),
+            vlandev=dict(type='str'),
+            flags=dict(type='str'),
+            ingress=dict(type='str'),
+            egress=dict(type='str'),
+            # vxlan specific vars
+            vxlan_id=dict(type='int'),
+            vxlan_local=dict(type='str'),
+            vxlan_remote=dict(type='str'),
+            # ip-tunnel specific vars
+            ip_tunnel_dev=dict(type='str'),
+            ip_tunnel_local=dict(type='str'),
+            ip_tunnel_remote=dict(type='str'),
+        ),
+        supports_check_mode=True,
+    )
+
+    if not HAVE_DBUS:
+        module.fail_json(msg=missing_required_lib('dbus'), exception=DBUS_IMP_ERR)
+
+    if not HAVE_NM_CLIENT:
+        module.fail_json(msg=missing_required_lib('NetworkManager glib API'), exception=NM_CLIENT_IMP_ERR)
+
+    nmcli = Nmcli(module)
+
+    (rc, out, err) = (None, '', '')
+    result = {'conn_name': nmcli.conn_name, 'state': nmcli.state}
+
+    # check for issues
+    if nmcli.conn_name is None:
+        nmcli.module.fail_json(msg="Please specify a name for the connection")
+    # team-slave checks
+    if nmcli.type == 'team-slave' and nmcli.master is None:
+        nmcli.module.fail_json(msg="Please specify a name for the master")
+    if nmcli.type == 'team-slave' and nmcli.ifname is None:
+        nmcli.module.fail_json(msg="Please specify an interface name for the connection")
+
+    if nmcli.state == 'absent':
+        if nmcli.connection_exists():
+            if module.check_mode:
+                module.exit_json(changed=True)
+            (rc, out, err) = nmcli.down_connection()
+            (rc, out, err) = nmcli.remove_connection()
+            if rc != 0:
+                module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+
+    elif nmcli.state == 'present':
+        if nmcli.connection_exists():
+            # modify connection (note: this function is check mode aware)
+            # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
+            result['Exists'] = 'Connections do exist so we are modifying them'
+            if module.check_mode:
+                module.exit_json(changed=True)
+            (rc, out, err) = nmcli.modify_connection()
+        if not nmcli.connection_exists():
+            result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
+            if module.check_mode:
+                module.exit_json(changed=True)
+            (rc, out, err) = nmcli.create_connection()
+        if rc is not None and rc != 0:
+            module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
+
+    if rc is None:
+        result['changed'] = False
+    else:
+        result['changed'] = True
+    if out:
+        result['stdout'] = out
+    if err:
+        result['stderr'] = err
+
+    module.exit_json(**result)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/library/source_file.py b/library/source_file.py
new file mode 100644
index 00000000..2b527598
--- /dev/null
+++ b/library/source_file.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019, Nicolas Karolak <nicolas.karolak@ubicast.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+    "metadata_version": "1.1",
+    "status": ["preview"],
+    "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+module: source_file
+short_description: Source remote bash/dotenv file
+description:
+    - This module is used to register host variables from a remote bash/dotenv-like file.
+    - It handles boolean value (`MY_VAR=1`) and has a basic handling of list (`MY_VAR=one,two,three`) and dictionnary (`MY_VAR=a=1;b=2;c=3`).
+version_added: "2.8"
+author: "Nicolas Karolak (@nikaro)"
+options:
+    path:
+        description:
+            - Path to the file to source.
+        required: true
+        type: path
+    prefix:
+        description:
+            - Prefix to add to the registred variable name.
+        required: false
+        default: ""
+        type: str
+    lower:
+        description:
+            - Wether to lower or not the variable name.
+        required: false
+        default: false
+        type: bool
+notes:
+    - The `check_mode` is supported.
+"""
+
+EXAMPLES = """
+- name: source envsetup file
+  source_file:
+    prefix: envsetup_
+    path: /root/envsetup/conf.sh
+    lower: true
+"""
+
+RETURN = """
+ansible_facts:
+    description: Registred vairales.
+    returned: on success
+    type: dict
+    sample:
+        key: value
+"""
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.parsing.convert_bool import BOOLEANS, boolean
+from ansible.module_utils.six import string_types
+
+
+def run_module():
+    module_args = {
+        "path": {"type": "path", "required": True},
+        "prefix": {"type": "str", "required": False, "default": ""},
+        "lower": {"type": "bool", "required": False, "default": False},
+    }
+
+    result = {"changed": False}
+
+    module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
+
+    path = module.params["path"]
+    prefix = module.params["prefix"]
+    lower = boolean(module.params["lower"])
+    variables = {}
+    regex_valid_name = re.compile("^[a-zA-Z][a-zA-Z0-9_-]*$")
+    regex_key_value = re.compile(
+        "^(?P<key>[a-zA-Z][a-zA-Z0-9_-]*)=(?:[\'\"])?(?P<value>(?:[^\'\"])*)(?:[\'\"])?$",
+        re.MULTILINE
+    )
+
+    if not os.path.isfile(path):
+        module.fail_json(msg="'%s' does not exist or is not a file" % path, **result)
+
+    if prefix and not regex_valid_name.match(prefix):
+        module.fail_json(
+            msg="'%s' is not a valid prefix it must starts with a letter or underscore"
+            " character, and contains only letters, numbers and underscores" % prefix,
+            **result
+        )
+
+    with open(path) as path_fh:
+        # load file content and get all "key=value"
+        content = path_fh.read()
+        content_match = regex_key_value.findall(content)
+
+        for key, value in content_match:
+            # merge prefix + key
+            if prefix:
+                key = "%s%s" % (prefix, key)
+
+            # lower key
+            if lower:
+                key = key.lower()
+
+            # check key validity
+            if not regex_valid_name.match(key):
+                module.fail_json(
+                    msg="'%s' is not a valid variable name it must starts with a letter or "
+                    "underscore character, and contains only letters, numbers and underscores"
+                    % key,
+                    **result
+                )
+
+            # handle list value
+            if "," in value:
+                value = re.split("[,\n]", value)
+
+            # handle dict value
+            if ";" in value and "=" in value:
+                value = {i.split("=")[0]: i.split("=")[1] for i in value.split(";")}
+
+            # handle bool value
+            if isinstance(value, string_types) and value.lower() in BOOLEANS:
+                value = boolean(value)
+
+            # build variables dict
+            variables[key] = value
+
+            result["changed"] = True
+
+            if not module.check_mode:
+                result["ansible_facts"] = variables
+
+    module.exit_json(**result)
+
+
+def main():
+    run_module()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/molecule/default/Dockerfile.j2 b/molecule/default/Dockerfile.j2
new file mode 100644
index 00000000..36166d64
--- /dev/null
+++ b/molecule/default/Dockerfile.j2
@@ -0,0 +1,7 @@
+# Molecule managed
+
+{% if item.registry is defined %}
+FROM {{ item.registry.url }}/{{ item.image }}
+{% else %}
+FROM {{ item.image }}
+{% endif %}
diff --git a/molecule/default/molecule.yml b/molecule/default/molecule.yml
new file mode 100644
index 00000000..4d9fe5f0
--- /dev/null
+++ b/molecule/default/molecule.yml
@@ -0,0 +1,54 @@
+---
+
+dependency:
+  name: galaxy
+driver:
+  name: docker
+lint:
+  name: yamllint
+platforms:
+  - name: debian-buster-${CI_PIPELINE_ID:-default}
+    image: registry.ubicast.net/docker/debian-systemd:buster
+    command: /lib/systemd/systemd
+    privileged: true
+    volumes:
+      - /sys/fs/cgroup:/sys/fs/cgroup:ro
+    tmpfs:
+      - /tmp
+      - /run
+      - /run/lock
+    groups:
+      - celerity
+      - manager
+      - monitor
+      - postgres
+      - server
+      - worker
+      - import
+      - netcapture
+provisioner:
+  name: ansible
+  env:
+    ANSIBLE_ROLES_PATH: ../../roles
+    ANSIBLE_LIBRARY: ../../library
+    ANSIBLE_ACTION_PLUGINS: ../../plugins/action
+    ANSIBLE_PYTHON_INTERPRETER: /usr/bin/python3
+  lint:
+    name: ansible-lint
+    env:
+      ANSIBLE_ROLES_PATH: ../../roles
+      ANSIBLE_LIBRARY: ../../library
+      ANSIBLE_ACTION_PLUGINS: ../../plugins/action
+      ANSIBLE_PYTHON_INTERPRETER: /usr/bin/python3
+  inventory:
+    group_vars:
+      all:
+        ansible_python_interpreter: /usr/bin/python3
+  playbooks:
+    converge: ../../site.yml
+verifier:
+  name: testinfra
+  lint:
+    name: flake8
+    options:
+      max-line-length: 90
diff --git a/molecule/default/tests/test_000_python3.py b/molecule/default/tests/test_000_python3.py
new file mode 100644
index 00000000..edfcc5a7
--- /dev/null
+++ b/molecule/default/tests/test_000_python3.py
@@ -0,0 +1,15 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_python3_is_installed(host):
+    p = host.package("python3")
+
+    assert p.is_installed
+    assert p.version.startswith("3.")
diff --git a/molecule/default/tests/test_010_conf.py b/molecule/default/tests/test_010_conf.py
new file mode 100644
index 00000000..313b6b2d
--- /dev/null
+++ b/molecule/default/tests/test_010_conf.py
@@ -0,0 +1,63 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_git_is_installed(host):
+    p = host.package("git")
+
+    assert p.is_installed
+
+
+def test_envsetup_repo(host):
+    f = host.file("/root/envsetup")
+
+    assert f.exists
+    assert f.is_directory
+    assert f.user == "root"
+    assert f.group == "root"
+
+
+def test_root_privkey(host):
+    f = host.file("/root/.ssh/id_ed25519")
+
+    assert f.exists
+    assert f.is_file
+    assert f.user == "root"
+    assert f.group == "root"
+    assert f.mode == 0o600
+
+
+def test_root_pubkey(host):
+    f = host.file("/root/.ssh/id_ed25519.pub")
+
+    assert f.exists
+    assert f.is_file
+    assert f.user == "root"
+    assert f.group == "root"
+    assert f.mode == 0o644
+
+
+def test_generated_conf_sh_file(host):
+    f = host.file("/root/envsetup/auto-generated-conf.sh")
+
+    assert f.exists
+    assert f.is_file
+    assert f.user == "root"
+    assert f.group == "root"
+    assert f.contains("Generated by")
+    assert f.contains("SKYREACH_API_KEY=")
+
+
+def test_conf_sh_file(host):
+    f = host.file("/root/envsetup/conf.sh")
+
+    assert f.exists
+    assert f.is_file
+    assert f.user == "root"
+    assert f.group == "root"
diff --git a/molecule/default/tests/test_011_init.py b/molecule/default/tests/test_011_init.py
new file mode 100644
index 00000000..4941f6b5
--- /dev/null
+++ b/molecule/default/tests/test_011_init.py
@@ -0,0 +1,80 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_apt_source_skyreach_file(host):
+    f = host.file("/etc/apt/sources.list.d/skyreach.list")
+
+    assert f.exists
+    assert f.is_file
+    assert f.user == "root"
+    assert f.group == "root"
+    assert f.contains("deb http")
+
+
+def test_requests_is_installed(host):
+    p = host.package("python3-requests")
+
+    assert p.is_installed
+    assert p.version.startswith("2.")
+
+
+def test_locale_file(host):
+    f = host.file("/etc/default/locale")
+
+    assert f.exists
+    assert f.is_file
+    assert f.user == "root"
+    assert f.group == "root"
+    assert f.contains("LANGUAGE=")
+
+
+def test_ubicast_user(host):
+    u = host.user("ubicast")
+
+    assert u.name == "ubicast"
+    assert u.home == "/home/ubicast"
+    assert "sudo" in u.groups
+    assert u.expiration_date is None
+
+
+def test_bashrc_file(host):
+    f = host.file("/root/.bashrc")
+
+    assert f.exists
+
+
+def test_vimrc_file(host):
+    f = host.file("/root/.vimrc")
+
+    assert f.exists
+
+
+def test_authorized_keys_file(host):
+    f = host.file("/root/.ssh/authorized_keys")
+
+    assert f.exists
+    assert f.is_file
+    assert f.user == "root"
+    assert f.group == "root"
+    assert f.contains(
+        "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCr2IJlzvLlLxa2PyGhydAlz/PAOj240g8anQmY5"
+        "8X+llirLHIOlkdJXBqf94jAeZkweWpoE41RdmKPUQEz4pCO09dGJaZD4lv1NtDhrhNwTmoOnyFcko"
+        "PimR6DX6+UMM9wUmfti/ytljbVEVVo/pRacXmczeumDaci3uYTURyliuAR9h3zbIMQ6D2COESXjpt"
+        "WmEwawE9grsTfJi84Q+XIBPvXRHjjceB5hejUMWuf7xc6GH9WIo5REh3qTUvgtxHtIGLQ3ImOzrbC"
+        "sEhENrBWds0qH0pIuH0lykWGR6pumpPxLzXcVho+e/UJgUrEg5u6/58aizqJTkxFJMa8ciYz "
+        "support@ubicast"
+    )
+
+
+def test_journal_file(host):
+    f = host.file("/var/log/journal")
+
+    assert f.exists
+    assert f.is_directory
diff --git a/molecule/default/tests/test_012_postfix.py b/molecule/default/tests/test_012_postfix.py
new file mode 100644
index 00000000..59be2872
--- /dev/null
+++ b/molecule/default/tests/test_012_postfix.py
@@ -0,0 +1,63 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_postfix_is_installed(host):
+    p = host.package("postfix")
+
+    assert p.is_installed
+
+
+def test_postfix_main(host):
+    f = host.file("/etc/postfix/main.cf")
+
+    assert f.exists
+
+
+def test_mailname(host):
+    f = host.file("/etc/mailname")
+
+    assert f.exists
+
+
+def test_aliases(host):
+    f = host.file("/etc/aliases")
+
+    assert f.exists
+    assert f.contains("devnull:")
+    assert f.contains("root:")
+
+
+def test_postfix_virtual(host):
+    f = host.file("/etc/postfix/virtual")
+
+    assert f.exists
+    assert f.contains("postmaster@")
+    assert f.contains("bounces@")
+    assert f.contains("noreply@")
+
+
+def test_postfix_generic(host):
+    f = host.file("/etc/postfix/generic")
+
+    assert f.exists
+    assert f.contains("root@")
+
+
+def test_postfix_service(host):
+    s = host.service("postfix")
+
+    assert s.is_running
+    assert s.is_enabled
+
+
+def test_postfix_listen(host):
+    s = host.socket("tcp://127.0.0.1:25")
+
+    assert s.is_listening
diff --git a/molecule/default/tests/test_013_ntp.py b/molecule/default/tests/test_013_ntp.py
new file mode 100644
index 00000000..30700392
--- /dev/null
+++ b/molecule/default/tests/test_013_ntp.py
@@ -0,0 +1,38 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_ntp_is_installed(host):
+    p = host.package("ntp")
+
+    assert p.is_installed
+
+
+def test_systemd_timesyncd_override(host):
+    f = host.file(
+        "/lib/systemd/system/systemd-timesyncd.service.d/disable-with-time-daemon.conf"
+    )
+
+    assert f.exists
+    assert f.contains("[Unit]")
+    assert f.contains("ConditionFileIsExecutable=!")
+
+
+def test_systemd_timesyncd_disabled(host):
+    s = host.service("systemd-timesyncd")
+
+    assert not s.is_running
+    assert not s.is_enabled
+
+
+def test_ntp_service(host):
+    s = host.service("ntp")
+
+    assert s.is_running
+    assert s.is_enabled
diff --git a/molecule/default/tests/test_020_nginx.py b/molecule/default/tests/test_020_nginx.py
new file mode 100644
index 00000000..6ece959c
--- /dev/null
+++ b/molecule/default/tests/test_020_nginx.py
@@ -0,0 +1,32 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_apache2_is_installed(host):
+    p = host.package("apache2")
+
+    assert not p.is_installed
+
+
+def test_nginx_is_installed(host):
+    p = host.package("nginx")
+
+    assert p.is_installed
+
+
+def test_nginx_removed_default(host):
+    f = host.file("/etc/nginx/sites-enabled/default.conf")
+
+    assert not f.exists
+
+
+def test_nginx_removed_old_ssl(host):
+    f = host.file("/etc/nginx/conf.d/ssl.conf")
+
+    assert not f.exists
diff --git a/molecule/default/tests/test_021_monitor.py b/molecule/default/tests/test_021_monitor.py
new file mode 100644
index 00000000..aa58302a
--- /dev/null
+++ b/molecule/default/tests/test_021_monitor.py
@@ -0,0 +1,63 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_munin_is_installed(host):
+    p = host.package("ubicast-config")
+
+    assert p.is_installed
+
+
+def test_monitor_is_installed(host):
+    p = host.package("ubicast-monitor")
+
+    assert p.is_installed
+
+
+def test_monitor_runtime_is_installed(host):
+    p = host.package("ubicast-monitor-runtime")
+
+    assert p.is_installed
+
+
+def test_monitor_user(host):
+    u = host.user("msmonitor")
+
+    assert u.name == "msmonitor"
+
+
+def test_monitor_nginx(host):
+    f = host.file("/etc/nginx/sites-available/msmonitor.conf")
+
+    assert f.exists
+
+
+def test_monitor_service(host):
+    s = host.service("msmonitor")
+
+    assert s.is_running
+    assert s.is_enabled
+
+
+def test_monitor_socket(host):
+    s = host.socket("tcp://0.0.0.0:443")
+
+    assert s.is_listening
+
+
+def test_fail2ban_conf(host):
+    f = host.file("/etc/fail2ban/jail.d/monitor.local")
+
+    assert f.exists
+
+
+def test_fail2ban_service(host):
+    s = host.service("fail2ban")
+
+    assert s.is_running
diff --git a/molecule/default/tests/test_022_postgres.py b/molecule/default/tests/test_022_postgres.py
new file mode 100644
index 00000000..68119ad2
--- /dev/null
+++ b/molecule/default/tests/test_022_postgres.py
@@ -0,0 +1,38 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_psycopg2_is_installed(host):
+    p = host.package("python3-psycopg2")
+
+    assert p.is_installed
+
+
+def test_postgres_is_installed(host):
+    p = host.package("postgresql")
+
+    assert p.is_installed
+
+
+def test_postgres_user(host):
+    u = host.user("postgres")
+
+    assert u.name == "postgres"
+
+
+def test_postgres_service(host):
+    s = host.service("postgresql")
+
+    assert s.is_running
+
+
+def test_celerity_socket(host):
+    s = host.socket("tcp://127.0.0.1:5432")
+
+    assert s.is_listening
diff --git a/molecule/default/tests/test_030_manager.py b/molecule/default/tests/test_030_manager.py
new file mode 100644
index 00000000..d4912341
--- /dev/null
+++ b/molecule/default/tests/test_030_manager.py
@@ -0,0 +1,57 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_manager_is_installed(host):
+    p = host.package("ubicast-skyreach")
+
+    assert p.is_installed
+
+
+def test_manager_runtime_is_installed(host):
+    p = host.package("ubicast-skyreach-runtime")
+
+    assert p.is_installed
+
+
+def test_manager_user(host):
+    u = host.user("skyreach")
+
+    assert u.name == "skyreach"
+
+
+def test_manager_nginx(host):
+    f = host.file("/etc/nginx/sites-available/skyreach.conf")
+
+    assert f.exists
+
+
+def test_manager_service(host):
+    s = host.service("skyreach")
+
+    assert s.is_running
+    assert s.is_enabled
+
+
+def test_manager_socket(host):
+    s = host.socket("tcp://0.0.0.0:443")
+
+    assert s.is_listening
+
+
+def test_fail2ban_conf(host):
+    f = host.file("/etc/fail2ban/jail.d/manager.local")
+
+    assert f.exists
+
+
+def test_fail2ban_service(host):
+    s = host.service("fail2ban")
+
+    assert s.is_running
diff --git a/molecule/default/tests/test_040_celerity.py b/molecule/default/tests/test_040_celerity.py
new file mode 100644
index 00000000..5bbb2d76
--- /dev/null
+++ b/molecule/default/tests/test_040_celerity.py
@@ -0,0 +1,35 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_celerity_is_installed(host):
+    p = host.package("celerity-server")
+
+    assert p.is_installed
+
+
+def test_celerity_config(host):
+    f = host.file("/etc/celerity/config.py")
+
+    assert f.exists
+    assert f.contains("SIGNING_KEY =")
+    assert f.contains("MEDIASERVERS =")
+
+
+def test_celerity_service(host):
+    s = host.service("celerity-server")
+
+    assert s.is_running
+    assert s.is_enabled
+
+
+def test_celerity_socket(host):
+    s = host.socket("tcp://0.0.0.0:6200")
+
+    assert s.is_listening
diff --git a/molecule/default/tests/test_041_worker.py b/molecule/default/tests/test_041_worker.py
new file mode 100644
index 00000000..a0f825a7
--- /dev/null
+++ b/molecule/default/tests/test_041_worker.py
@@ -0,0 +1,30 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_celerity_is_installed(host):
+    p = host.package("celerity-workers")
+
+    assert p.is_installed
+
+
+def test_celerity_config(host):
+    f = host.file("/etc/celerity/config.py")
+
+    assert f.exists
+    assert f.contains("SIGNING_KEY =")
+    assert f.contains("SERVER_URL =")
+    assert f.contains("WORKERS_COUNT =")
+
+
+def test_celerity_service(host):
+    s = host.service("celerity-workers")
+
+    assert s.is_running
+    assert s.is_enabled
diff --git a/molecule/default/tests/test_050_server.py b/molecule/default/tests/test_050_server.py
new file mode 100644
index 00000000..bf2eaa58
--- /dev/null
+++ b/molecule/default/tests/test_050_server.py
@@ -0,0 +1,57 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_server_is_installed(host):
+    p = host.package("ubicast-mediaserver")
+
+    assert p.is_installed
+
+
+def test_server_runtime_is_installed(host):
+    p = host.package("ubicast-mediaserver-runtime")
+
+    assert p.is_installed
+
+
+def test_server_user(host):
+    u = host.user("msuser")
+
+    assert u.name == "msuser"
+
+
+def test_server_nginx(host):
+    f = host.file("/etc/nginx/sites-available/mediaserver-msuser.conf")
+
+    assert f.exists
+
+
+def test_server_service(host):
+    s = host.service("mediaserver")
+
+    assert s.is_running
+    assert s.is_enabled
+
+
+def test_server_socket(host):
+    s = host.socket("tcp://0.0.0.0:443")
+
+    assert s.is_listening
+
+
+def test_fail2ban_conf(host):
+    f = host.file("/etc/fail2ban/jail.d/server.local")
+
+    assert f.exists
+
+
+def test_fail2ban_service(host):
+    s = host.service("fail2ban")
+
+    assert s.is_running
diff --git a/molecule/default/tests/test_060_import.py b/molecule/default/tests/test_060_import.py
new file mode 100644
index 00000000..72ecace9
--- /dev/null
+++ b/molecule/default/tests/test_060_import.py
@@ -0,0 +1,84 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+# TODO: ubicast-mediaimport when released
+def test_import_is_installed(host):
+    p = host.package("python3-mediaserver-mediaimport")
+
+    assert p.is_installed
+
+
+def test_ftp_is_installed(host):
+    p = host.package("pure-ftpd")
+
+    assert p.is_installed
+
+
+def test_ssh_is_installed(host):
+    p = host.package("openssh-server")
+
+    assert p.is_installed
+
+
+def test_sftp_is_installed(host):
+    p = host.package("mysecureshell")
+
+    assert p.is_installed
+
+
+def test_mediaimport_conf(host):
+    f = host.file("/etc/mediaserver/mediaimport.json")
+
+    assert f.exists
+
+
+def test_mediaimport_service(host):
+    s = host.service("mediaimport")
+
+    assert s.is_running
+    assert s.is_enabled
+
+
+def test_ftp_service(host):
+    s = host.service("pure-ftpd")
+
+    assert s.is_running
+    assert s.is_enabled
+
+
+def test_sftp_service(host):
+    s = host.service("mysecureshell")
+
+    assert s.is_running
+    assert s.is_enabled
+
+
+def test_ftp_socket(host):
+    s = host.socket("tcp://0.0.0.0:21")
+
+    assert s.is_listening
+
+
+def test_sftp_socket(host):
+    s = host.socket("tcp://0.0.0.0:22")
+
+    assert s.is_listening
+
+
+def test_fail2ban_conf(host):
+    f = host.file("/etc/fail2ban/jail.d/pure-ftpd.local")
+
+    assert f.exists
+
+
+def test_fail2ban_service(host):
+    s = host.service("fail2ban")
+
+    assert s.is_running
diff --git a/molecule/default/tests/test_070_netcapture.py b/molecule/default/tests/test_070_netcapture.py
new file mode 100644
index 00000000..45fc1ceb
--- /dev/null
+++ b/molecule/default/tests/test_070_netcapture.py
@@ -0,0 +1,39 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_netcapture_is_installed(host):
+    p = host.package("python3-miris-netcapture")
+
+    assert p.is_installed
+
+
+def test_docker_is_installed(host):
+    p = host.package("docker-ce")
+
+    assert p.is_installed
+
+
+def test_netcapture_conf(host):
+    f = host.file("/etc/miris/netcapture.json")
+
+    assert f.exists
+
+
+def test_miris_api_conf(host):
+    f = host.file("/etc/miris/conf/api.json")
+
+    assert f.exists
+
+
+def test_docker_service(host):
+    s = host.service("docker")
+
+    assert s.is_running
+    assert s.is_enabled
diff --git a/packer/aio.yml b/packer/aio.yml
new file mode 100644
index 00000000..cdd6bacd
--- /dev/null
+++ b/packer/aio.yml
@@ -0,0 +1,80 @@
+---
+
+# build trigger: 201907171352
+
+variables:
+  ssh_auth_keys: "{{ env `PACKER_SSH_AUTH_KEYS` }}"
+  esx_host: "{{ env `PACKER_ESX_HOST` }}"
+  esx_datastore: "{{ env `PACKER_ESX_DATASTORE` }}"
+  esx_username: "{{ env `PACKER_ESX_USERNAME` }}"
+  esx_password: "{{ env `PACKER_ESX_PASSWORD` }}"
+  network_ip: "{{ env `NETWORK_IP` }}"
+  network_mask: "{{ env `NETWORK_MASK` }}"
+  network_gateway: "{{ env `NETWORK_GATEWAY` }}"
+  network_dns: "{{ env `NETWORK_DNS` }}"
+  skyreach_system_key: "{{ env `SKYREACH_SYSTEM_KEY` }}"
+
+builders:
+  - type: vmware-iso
+    vm_name: ubicast-aio
+    display_name: ubicast-aio
+    output_directory: output
+    guest_os_type: debian10-64
+    iso_urls:
+      - http://debian.univ-lorraine.fr/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+      - https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+    iso_checksum: 23237b0a100a860b3dc7ffcfb5baae4bed5460ac5f3f2b929df3154f3319b9809055b695264586f60289cc6cb25077c12938cc612fee01756bfa779c87d5a315
+    iso_checksum_type: sha512
+    remote_type: esx5
+    remote_host: "{{ user `esx_host` }}"
+    remote_datastore: "{{ user `esx_datastore` }}"
+    remote_username: "{{ user `esx_username` }}"
+    remote_password: "{{ user `esx_password` }}"
+    format: ova
+    tools_upload_flavor: linux
+    vnc_disable_password: true
+    cpus: 2
+    memory: 2048
+    vmx_data:
+      ethernet0.networkName: VM Network
+    boot_command:
+      - "<esc><wait>"
+      - "/install.amd/vmlinuz "
+      - "initrd=/install.amd/initrd.gz "
+      - "hostname={{ .Name }} "
+      - "domain= "
+      - "auto=true "
+      - "url=https://www.ubicast.eu/media/downloads/sys/preseed.cfg "
+      - "vga=788 noprompt quiet --<enter>"
+    ssh_username: root
+    ssh_password: ubicast
+    ssh_timeout: 15m
+    headless: false
+    shutdown_command: shutdown -P now
+
+provisioners:
+  - type: shell
+    pause_before: 10s
+    environment_vars:
+      - PACKER_SSH_AUTH_KEYS={{ user `ssh_auth_keys` }}
+    scripts:
+      - packer/scripts/root.sh
+      - packer/scripts/upgrade.sh
+  - type: ansible
+    ansible_env_vars:
+      - NETWORK_IP={{ user `network_ip` }}
+      - NETWORK_MASK={{ user `network_mask` }}
+      - NETWORK_GATEWAY={{ user `network_gateway` }}
+      - NETWORK_DNS={{ user `network_dns` }}
+      - SKYREACH_SYSTEM_KEY={{ user `skyreach_system_key` }}
+    playbook_file: site.yml
+    groups:
+      - monitor
+      - postgres
+      - manager
+      - wowza
+      - celerity
+      - server
+      - worker
+
+...
diff --git a/packer/base.yml b/packer/base.yml
new file mode 100644
index 00000000..0f1f55a8
--- /dev/null
+++ b/packer/base.yml
@@ -0,0 +1,69 @@
+---
+
+# build trigger: 201907171352
+
+variables:
+  ssh_auth_keys: "{{ env `PACKER_SSH_AUTH_KEYS` }}"
+  esx_host: "{{ env `PACKER_ESX_HOST` }}"
+  esx_datastore: "{{ env `PACKER_ESX_DATASTORE` }}"
+  esx_username: "{{ env `PACKER_ESX_USERNAME` }}"
+  esx_password: "{{ env `PACKER_ESX_PASSWORD` }}"
+
+builders:
+  - type: vmware-iso
+    vm_name: ubicast-base
+    display_name: ubicast-base
+    output_directory: output
+    guest_os_type: debian10-64
+    iso_urls:
+      - http://debian.univ-lorraine.fr/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+      - https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+    iso_checksum: 23237b0a100a860b3dc7ffcfb5baae4bed5460ac5f3f2b929df3154f3319b9809055b695264586f60289cc6cb25077c12938cc612fee01756bfa779c87d5a315
+    iso_checksum_type: sha512
+    remote_type: esx5
+    remote_host: "{{ user `esx_host` }}"
+    remote_datastore: "{{ user `esx_datastore` }}"
+    remote_username: "{{ user `esx_username` }}"
+    remote_password: "{{ user `esx_password` }}"
+    format: ova
+    tools_upload_flavor: linux
+    vnc_disable_password: true
+    cpus: 2
+    memory: 2048
+    vmx_data:
+      ethernet0.networkName: VM Network
+    boot_command:
+      - "<esc><wait>"
+      - "/install.amd/vmlinuz "
+      - "initrd=/install.amd/initrd.gz "
+      - "hostname={{ .Name }} "
+      - "domain= "
+      - "auto=true "
+      - "url=https://www.ubicast.eu/media/downloads/sys/preseed.cfg "
+      - "vga=788 noprompt quiet --<enter>"
+    ssh_username: root
+    ssh_password: ubicast
+    ssh_timeout: 15m
+    headless: false
+    shutdown_command: shutdown -P now
+
+provisioners:
+  - type: file
+    source: packer/files/root.cfg
+    destination: /tmp/99_root.cfg
+  - type: shell
+    expect_disconnect: true
+    pause_before: 10s
+    environment_vars:
+      - PACKER_SSH_AUTH_KEYS={{ user `ssh_auth_keys` }}
+    scripts:
+      - packer/scripts/root.sh
+      - packer/scripts/upgrade.sh
+      - packer/scripts/upgrade-buster.sh
+      - packer/scripts/reboot.sh
+  - type: shell
+    pause_before: 30s
+    scripts:
+      - packer/scripts/cleanup-buster.sh
+
+...
diff --git a/packer/celerity.yml b/packer/celerity.yml
new file mode 100644
index 00000000..58fc059a
--- /dev/null
+++ b/packer/celerity.yml
@@ -0,0 +1,74 @@
+---
+
+# build trigger: 201907171352
+
+variables:
+  ssh_auth_keys: "{{ env `PACKER_SSH_AUTH_KEYS` }}"
+  esx_host: "{{ env `PACKER_ESX_HOST` }}"
+  esx_datastore: "{{ env `PACKER_ESX_DATASTORE` }}"
+  esx_username: "{{ env `PACKER_ESX_USERNAME` }}"
+  esx_password: "{{ env `PACKER_ESX_PASSWORD` }}"
+  network_ip: "{{ env `NETWORK_IP` }}"
+  network_mask: "{{ env `NETWORK_MASK` }}"
+  network_gateway: "{{ env `NETWORK_GATEWAY` }}"
+  network_dns: "{{ env `NETWORK_DNS` }}"
+  skyreach_system_key: "{{ env `SKYREACH_SYSTEM_KEY` }}"
+
+builders:
+  - type: vmware-iso
+    vm_name: ubicast-celerity
+    display_name: ubicast-celerity
+    output_directory: output
+    guest_os_type: debian10-64
+    iso_urls:
+      - http://debian.univ-lorraine.fr/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+      - https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+    iso_checksum: 23237b0a100a860b3dc7ffcfb5baae4bed5460ac5f3f2b929df3154f3319b9809055b695264586f60289cc6cb25077c12938cc612fee01756bfa779c87d5a315
+    iso_checksum_type: sha512
+    remote_type: esx5
+    remote_host: "{{ user `esx_host` }}"
+    remote_datastore: "{{ user `esx_datastore` }}"
+    remote_username: "{{ user `esx_username` }}"
+    remote_password: "{{ user `esx_password` }}"
+    format: ova
+    tools_upload_flavor: linux
+    vnc_disable_password: true
+    cpus: 2
+    memory: 2048
+    vmx_data:
+      ethernet0.networkName: VM Network
+    boot_command:
+      - "<esc><wait>"
+      - "/install.amd/vmlinuz "
+      - "initrd=/install.amd/initrd.gz "
+      - "hostname={{ .Name }} "
+      - "domain= "
+      - "auto=true "
+      - "url=https://www.ubicast.eu/media/downloads/sys/preseed.cfg "
+      - "vga=788 noprompt quiet --<enter>"
+    ssh_username: root
+    ssh_password: ubicast
+    ssh_timeout: 15m
+    headless: false
+    shutdown_command: shutdown -P now
+
+provisioners:
+  - type: shell
+    pause_before: 10s
+    environment_vars:
+      - PACKER_SSH_AUTH_KEYS={{ user `ssh_auth_keys` }}
+    scripts:
+      - packer/scripts/root.sh
+      - packer/scripts/upgrade.sh
+  - type: ansible
+    ansible_env_vars:
+      - NETWORK_IP={{ user `network_ip` }}
+      - NETWORK_MASK={{ user `network_mask` }}
+      - NETWORK_GATEWAY={{ user `network_gateway` }}
+      - NETWORK_DNS={{ user `network_dns` }}
+      - SKYREACH_SYSTEM_KEY={{ user `skyreach_system_key` }}
+    playbook_file: playbooks/celerity.yml
+    groups:
+      - celerity
+
+...
diff --git a/packer/custom/example.yml b/packer/custom/example.yml
new file mode 100644
index 00000000..7b04dc74
--- /dev/null
+++ b/packer/custom/example.yml
@@ -0,0 +1,84 @@
+---
+
+variables:
+  name: "{{ env `PACKER_NAME` }}"
+  ssh_auth_keys: "{{ env `PACKER_SSH_AUTH_KEYS` }}"
+  esx_host: "{{ env `PACKER_ESX_HOST` }}"
+  esx_datastore: "{{ env `PACKER_ESX_DATASTORE` }}"
+  esx_username: "{{ env `PACKER_ESX_USERNAME` }}"
+  esx_password: "{{ env `PACKER_ESX_PASSWORD` }}"
+  network_ip: "{{ env `NETWORK_IP` }}"
+  network_mask: "{{ env `NETWORK_MASK` }}"
+  network_gateway: "{{ env `NETWORK_GATEWAY` }}"
+  network_dns: "{{ env `NETWORK_DNS` }}"
+  skyreach_system_key: "{{ env `SKYREACH_SYSTEM_KEY` }}"
+
+builders:
+  - type: vmware-iso
+    vm_name: ubicast-{{ user `name` }}
+    display_name: ubicast-{{ user `name` }}
+    output_directory: output
+    guest_os_type: debian10-64
+    iso_urls:
+      - http://debian.univ-lorraine.fr/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+      - https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+    iso_checksum: 23237b0a100a860b3dc7ffcfb5baae4bed5460ac5f3f2b929df3154f3319b9809055b695264586f60289cc6cb25077c12938cc612fee01756bfa779c87d5a315
+    iso_checksum_type: sha512
+    remote_type: esx5
+    remote_host: "{{ user `esx_host` }}"
+    remote_datastore: "{{ user `esx_datastore` }}"
+    remote_username: "{{ user `esx_username` }}"
+    remote_password: "{{ user `esx_password` }}"
+    format: ova
+    tools_upload_flavor: linux
+    vnc_disable_password: true
+    cpus: 2
+    memory: 2048
+    disk_type_id: zeroedthick
+    skip_compaction: true
+    vmx_data:
+      ethernet0.networkName: VM Network
+    boot_command:
+      - "<esc><wait>"
+      - "/install.amd/vmlinuz "
+      - "initrd=/install.amd/initrd.gz "
+      - "hostname={{ .Name }} "
+      - "domain= "
+      - "auto=true "
+      - "url=https://nextcloud.ubicast.net/s/LEcyMWG9BnKsrHX/download?path=%2F&files=preseed.cfg "
+      - "vga=788 noprompt quiet --<enter>"
+    ssh_username: root
+    ssh_password: ubicast
+    ssh_timeout: 15m
+    headless: false
+    shutdown_command: shutdown -P now
+    # Uncomment if you have to do custom/manual export:
+    # keep_registered: true
+    # skip_export: true
+
+provisioners:
+  - type: shell
+    pause_before: 10s
+    environment_vars:
+      - PACKER_SSH_AUTH_KEYS={{ user `ssh_auth_keys` }}
+    scripts:
+      - packer/scripts/root.sh
+      - packer/scripts/upgrade.sh
+  - type: ansible
+    ansible_env_vars:
+      - NETWORK_IP={{ user `network_ip` }}
+      - NETWORK_MASK={{ user `network_mask` }}
+      - NETWORK_GATEWAY={{ user `network_gateway` }}
+      - NETWORK_DNS={{ user `network_dns` }}
+      - SKYREACH_SYSTEM_KEY={{ user `skyreach_system_key` }}
+    playbook_file: site.yml
+    groups:
+      - monitor
+      - postgres
+      - manager
+      - wowza
+      - celerity
+      - server
+      - worker
+
+...
diff --git a/packer/files/preseed.cfg b/packer/files/preseed.cfg
new file mode 100644
index 00000000..7c359882
--- /dev/null
+++ b/packer/files/preseed.cfg
@@ -0,0 +1,59 @@
+d-i debian-installer/language string en
+d-i debian-installer/country string FR
+d-i debian-installer/locale string en_US.UTF-8
+d-i localechooser/supported-locales multiselect fr_FR.UTF-8
+
+d-i keyboard-configuration/xkb-keymap select us
+
+d-i netcfg/choose_interface select auto
+
+d-i netcfg/get_hostname string mediaserver
+d-i netcfg/get_domain string
+
+d-i hw-detect/load_firmware boolean true
+
+d-i mirror/country string manual
+d-i mirror/http/hostname string deb.debian.org
+d-i mirror/http/directory string /debian
+d-i mirror/http/proxy string
+
+d-i passwd/root-login boolean true
+d-i passwd/root-password password ubicast
+d-i passwd/root-password-again password ubicast
+d-i passwd/make-user boolean false
+
+d-i clock-setup/utc boolean true
+d-i time/zone string UTC
+
+d-i partman-auto/method string lvm
+d-i partman-auto/choose_recipe select multi
+d-i partman-auto-lvm/guided_size string max
+d-i partman-md/device_remove_md boolean true
+d-i partman-lvm/device_remove_lvm boolean true
+d-i partman-lvm/confirm boolean true
+d-i partman-lvm/confirm_nooverwrite boolean true
+d-i partman-partitioning/confirm_write_new_label boolean true
+d-i partman/choose_partition select finish
+d-i partman/confirm boolean true
+d-i partman/confirm_nooverwrite boolean true
+
+d-i grub-installer/only_debian boolean true
+d-i grub-installer/with_other_os boolean true
+d-i grub-installer/bootdev string default
+
+d-i apt-setup/non-free boolean true
+d-i apt-setup/contrib boolean true
+d-i apt-setup/cdrom/set-first boolean false
+d-i apt-setup/cdrom/set-next boolean false
+d-i apt-setup/cdrom/set-failed boolean false
+d-i pkgsel/include string git openssh-server python3 sudo
+d-i pkgsel/upgrade select none
+
+tasksel tasksel/first multiselect standard
+
+popularity-contest popularity-contest/participate boolean false
+
+d-i finish-install/reboot_in_progress note
+
+d-i preseed/late_command string \
+  in-target sh -c 'sed -i "s/^#PermitRootLogin.*\$/PermitRootLogin yes/g" /etc/ssh/sshd_config';
diff --git a/packer/files/root.cfg b/packer/files/root.cfg
new file mode 100644
index 00000000..17e78fb5
--- /dev/null
+++ b/packer/files/root.cfg
@@ -0,0 +1,8 @@
+disable_root: false
+system_info:
+  default_user:
+    name: root
+    lock_passwd: True
+    gecos: root
+    groups: [root]
+    shell: /bin/bash
diff --git a/packer/manager.yml b/packer/manager.yml
new file mode 100644
index 00000000..7e7ea5a6
--- /dev/null
+++ b/packer/manager.yml
@@ -0,0 +1,75 @@
+---
+
+# build trigger: 201907171352
+
+variables:
+  ssh_auth_keys: "{{ env `PACKER_SSH_AUTH_KEYS` }}"
+  esx_host: "{{ env `PACKER_ESX_HOST` }}"
+  esx_datastore: "{{ env `PACKER_ESX_DATASTORE` }}"
+  esx_username: "{{ env `PACKER_ESX_USERNAME` }}"
+  esx_password: "{{ env `PACKER_ESX_PASSWORD` }}"
+  network_ip: "{{ env `NETWORK_IP` }}"
+  network_mask: "{{ env `NETWORK_MASK` }}"
+  network_gateway: "{{ env `NETWORK_GATEWAY` }}"
+  network_dns: "{{ env `NETWORK_DNS` }}"
+  skyreach_system_key: "{{ env `SKYREACH_SYSTEM_KEY` }}"
+
+builders:
+  - type: vmware-iso
+    vm_name: ubicast-manager
+    display_name: ubicast-manager
+    output_directory: output
+    guest_os_type: debian10-64
+    iso_urls:
+      - http://debian.univ-lorraine.fr/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+      - https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+    iso_checksum: 23237b0a100a860b3dc7ffcfb5baae4bed5460ac5f3f2b929df3154f3319b9809055b695264586f60289cc6cb25077c12938cc612fee01756bfa779c87d5a315
+    iso_checksum_type: sha512
+    remote_type: esx5
+    remote_host: "{{ user `esx_host` }}"
+    remote_datastore: "{{ user `esx_datastore` }}"
+    remote_username: "{{ user `esx_username` }}"
+    remote_password: "{{ user `esx_password` }}"
+    format: ova
+    tools_upload_flavor: linux
+    vnc_disable_password: true
+    cpus: 2
+    memory: 2048
+    vmx_data:
+      ethernet0.networkName: VM Network
+    boot_command:
+      - "<esc><wait>"
+      - "/install.amd/vmlinuz "
+      - "initrd=/install.amd/initrd.gz "
+      - "hostname={{ .Name }} "
+      - "domain= "
+      - "auto=true "
+      - "url=https://www.ubicast.eu/media/downloads/sys/preseed.cfg "
+      - "vga=788 noprompt quiet --<enter>"
+    ssh_username: root
+    ssh_password: ubicast
+    ssh_timeout: 15m
+    headless: false
+    shutdown_command: shutdown -P now
+
+provisioners:
+  - type: shell
+    pause_before: 10s
+    environment_vars:
+      - PACKER_SSH_AUTH_KEYS={{ user `ssh_auth_keys` }}
+    scripts:
+      - packer/scripts/root.sh
+      - packer/scripts/upgrade.sh
+  - type: ansible
+    ansible_env_vars:
+      - NETWORK_IP={{ user `network_ip` }}
+      - NETWORK_MASK={{ user `network_mask` }}
+      - NETWORK_GATEWAY={{ user `network_gateway` }}
+      - NETWORK_DNS={{ user `network_dns` }}
+      - SKYREACH_SYSTEM_KEY={{ user `skyreach_system_key` }}
+    playbook_file: playbooks/manager.yml
+    groups:
+      - postgres
+      - manager
+
+...
diff --git a/packer/scripts/cleanup-buster.sh b/packer/scripts/cleanup-buster.sh
new file mode 100644
index 00000000..f96b0da5
--- /dev/null
+++ b/packer/scripts/cleanup-buster.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+export DEBIAN_FRONTEND=noninteractive
+export UCF_FORCE_CONFFNEW=1
+export APT="DEBIAN_FRONTEND=noninteractive apt-get -y -q -o Dpkg::Options::=--force-confnew"
+
+sudo $APT autoremove --purge
+
+sudo $APT --auto-remove purge \
+  awscli \
+  gcc-6-base \
+  initscripts \
+  insserv \
+  linux-image-4.9.* \
+  nano \
+  python2 \
+  startpar \
+  sysv-rc \
+  || :
+
+while [ "$(deborphan)" ]; do
+  sudo $APT --auto-remove purge $(deborphan)
+done
+
+if [ "$(dpkg -l | grep ^rc | awk '{ print $2 }')" ]; then
+  dpkg -l | grep ^rc | awk '{ print $2 }' | sudo dpkg -P
+fi
+
+exit 0
diff --git a/packer/scripts/reboot.sh b/packer/scripts/reboot.sh
new file mode 100644
index 00000000..43ebe369
--- /dev/null
+++ b/packer/scripts/reboot.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+sudo shutdown -r now
+
+exit 0
diff --git a/packer/scripts/root.sh b/packer/scripts/root.sh
new file mode 100644
index 00000000..cfa8e196
--- /dev/null
+++ b/packer/scripts/root.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+sudo mkdir -p /root/.ssh
+sudo chmod 700 /root/.ssh
+echo -e $PACKER_SSH_AUTH_KEYS | sudo tee /root/.ssh/authorized_keys
+
+sudo mkdir -p /etc/cloud/cloud.cfg.d
+if [ -f /tmp/99_root.cfg ]; then
+  sudo mv /tmp/99_root.cfg /etc/cloud/cloud.cfg.d/99_root.cfg
+  sudo chown root:root /etc/cloud/cloud.cfg.d/99_root.cfg
+  sudo chmod 644 /etc/cloud/cloud.cfg.d/99_root.cfg
+fi
+
+exit 0
diff --git a/packer/scripts/upgrade-buster.sh b/packer/scripts/upgrade-buster.sh
new file mode 100644
index 00000000..358cf965
--- /dev/null
+++ b/packer/scripts/upgrade-buster.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+export DEBIAN_FRONTEND=noninteractive
+export UCF_FORCE_CONFFNEW=1
+export APT="DEBIAN_FRONTEND=noninteractive apt-get -y -q -o Dpkg::Options::=--force-confnew"
+
+sudo sed -i 's/stretch/buster/g' /etc/apt/sources.list
+
+sudo $APT update
+sudo $APT upgrade
+sudo $APT dist-upgrade
+
+exit 0
diff --git a/packer/scripts/upgrade.sh b/packer/scripts/upgrade.sh
new file mode 100644
index 00000000..35073e85
--- /dev/null
+++ b/packer/scripts/upgrade.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+export DEBIAN_FRONTEND=noninteractive
+export UCF_FORCE_CONFFNEW=1
+export APT="DEBIAN_FRONTEND=noninteractive apt-get -y -q -o Dpkg::Options::=--force-confnew"
+
+sudo $APT purge unattended-upgrades
+sudo $APT update
+sudo $APT install apt-utils deborphan git
+sudo $APT dist-upgrade
+
+exit 0
diff --git a/packer/scripts/yml2json b/packer/scripts/yml2json
new file mode 100755
index 00000000..b56f08a8
--- /dev/null
+++ b/packer/scripts/yml2json
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+
+import json
+import sys
+import yaml
+
+y = yaml.safe_load(sys.stdin.read())
+print(json.dumps(y))
diff --git a/packer/server.yml b/packer/server.yml
new file mode 100644
index 00000000..f821ad67
--- /dev/null
+++ b/packer/server.yml
@@ -0,0 +1,76 @@
+---
+
+# build trigger: 201907171352
+
+variables:
+  ssh_auth_keys: "{{ env `PACKER_SSH_AUTH_KEYS` }}"
+  esx_host: "{{ env `PACKER_ESX_HOST` }}"
+  esx_datastore: "{{ env `PACKER_ESX_DATASTORE` }}"
+  esx_username: "{{ env `PACKER_ESX_USERNAME` }}"
+  esx_password: "{{ env `PACKER_ESX_PASSWORD` }}"
+  network_ip: "{{ env `NETWORK_IP` }}"
+  network_mask: "{{ env `NETWORK_MASK` }}"
+  network_gateway: "{{ env `NETWORK_GATEWAY` }}"
+  network_dns: "{{ env `NETWORK_DNS` }}"
+  skyreach_system_key: "{{ env `SKYREACH_SYSTEM_KEY` }}"
+
+builders:
+  - type: vmware-iso
+    vm_name: ubicast-server
+    display_name: ubicast-server
+    output_directory: output
+    guest_os_type: debian10-64
+    iso_urls:
+      - http://debian.univ-lorraine.fr/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+      - https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+    iso_checksum: 23237b0a100a860b3dc7ffcfb5baae4bed5460ac5f3f2b929df3154f3319b9809055b695264586f60289cc6cb25077c12938cc612fee01756bfa779c87d5a315
+    iso_checksum_type: sha512
+    remote_type: esx5
+    remote_host: "{{ user `esx_host` }}"
+    remote_datastore: "{{ user `esx_datastore` }}"
+    remote_username: "{{ user `esx_username` }}"
+    remote_password: "{{ user `esx_password` }}"
+    format: ova
+    tools_upload_flavor: linux
+    vnc_disable_password: true
+    cpus: 2
+    memory: 2048
+    vmx_data:
+      ethernet0.networkName: VM Network
+    boot_command:
+      - "<esc><wait>"
+      - "/install.amd/vmlinuz "
+      - "initrd=/install.amd/initrd.gz "
+      - "hostname={{ .Name }} "
+      - "domain= "
+      - "auto=true "
+      - "url=https://www.ubicast.eu/media/downloads/sys/preseed.cfg "
+      - "vga=788 noprompt quiet --<enter>"
+    ssh_username: root
+    ssh_password: ubicast
+    ssh_timeout: 15m
+    headless: false
+    shutdown_command: shutdown -P now
+
+provisioners:
+  - type: shell
+    pause_before: 10s
+    environment_vars:
+      - PACKER_SSH_AUTH_KEYS={{ user `ssh_auth_keys` }}
+    scripts:
+      - packer/scripts/root.sh
+      - packer/scripts/upgrade.sh
+  - type: ansible
+    ansible_env_vars:
+      - NETWORK_IP={{ user `network_ip` }}
+      - NETWORK_MASK={{ user `network_mask` }}
+      - NETWORK_GATEWAY={{ user `network_gateway` }}
+      - NETWORK_DNS={{ user `network_dns` }}
+      - SKYREACH_SYSTEM_KEY={{ user `skyreach_system_key` }}
+    playbook_file: playbooks/server.yml
+    groups:
+      - monitor
+      - postgres
+      - server
+
+...
diff --git a/packer/worker.yml b/packer/worker.yml
new file mode 100644
index 00000000..aff48f1d
--- /dev/null
+++ b/packer/worker.yml
@@ -0,0 +1,74 @@
+---
+
+# build trigger: 201907171352
+
+variables:
+  ssh_auth_keys: "{{ env `PACKER_SSH_AUTH_KEYS` }}"
+  esx_host: "{{ env `PACKER_ESX_HOST` }}"
+  esx_datastore: "{{ env `PACKER_ESX_DATASTORE` }}"
+  esx_username: "{{ env `PACKER_ESX_USERNAME` }}"
+  esx_password: "{{ env `PACKER_ESX_PASSWORD` }}"
+  network_ip: "{{ env `NETWORK_IP` }}"
+  network_mask: "{{ env `NETWORK_MASK` }}"
+  network_gateway: "{{ env `NETWORK_GATEWAY` }}"
+  network_dns: "{{ env `NETWORK_DNS` }}"
+  skyreach_system_key: "{{ env `SKYREACH_SYSTEM_KEY` }}"
+
+builders:
+  - type: vmware-iso
+    vm_name: ubicast-worker
+    display_name: ubicast-worker
+    output_directory: output
+    guest_os_type: debian10-64
+    iso_urls:
+      - http://debian.univ-lorraine.fr/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+      - https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-10.1.0-amd64-netinst.iso
+    iso_checksum: 23237b0a100a860b3dc7ffcfb5baae4bed5460ac5f3f2b929df3154f3319b9809055b695264586f60289cc6cb25077c12938cc612fee01756bfa779c87d5a315
+    iso_checksum_type: sha512
+    remote_type: esx5
+    remote_host: "{{ user `esx_host` }}"
+    remote_datastore: "{{ user `esx_datastore` }}"
+    remote_username: "{{ user `esx_username` }}"
+    remote_password: "{{ user `esx_password` }}"
+    format: ova
+    tools_upload_flavor: linux
+    vnc_disable_password: true
+    cpus: 2
+    memory: 2048
+    vmx_data:
+      ethernet0.networkName: VM Network
+    boot_command:
+      - "<esc><wait>"
+      - "/install.amd/vmlinuz "
+      - "initrd=/install.amd/initrd.gz "
+      - "hostname={{ .Name }} "
+      - "domain= "
+      - "auto=true "
+      - "url=https://www.ubicast.eu/media/downloads/sys/preseed.cfg "
+      - "vga=788 noprompt quiet --<enter>"
+    ssh_username: root
+    ssh_password: ubicast
+    ssh_timeout: 15m
+    headless: false
+    shutdown_command: shutdown -P now
+
+provisioners:
+  - type: shell
+    pause_before: 10s
+    environment_vars:
+      - PACKER_SSH_AUTH_KEYS={{ user `ssh_auth_keys` }}
+    scripts:
+      - packer/scripts/root.sh
+      - packer/scripts/upgrade.sh
+  - type: ansible
+    ansible_env_vars:
+      - NETWORK_IP={{ user `network_ip` }}
+      - NETWORK_MASK={{ user `network_mask` }}
+      - NETWORK_GATEWAY={{ user `network_gateway` }}
+      - NETWORK_DNS={{ user `network_dns` }}
+      - SKYREACH_SYSTEM_KEY={{ user `skyreach_system_key` }}
+    playbook_file: playbooks/worker.yml
+    groups:
+      - worker
+
+...
diff --git a/playbooks/bench-server.yml b/playbooks/bench-server.yml
new file mode 100755
index 00000000..0ef8cecb
--- /dev/null
+++ b/playbooks/bench-server.yml
@@ -0,0 +1,16 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+
+- name: DEPLOY BENCHMARK SERVER
+  hosts:
+    - bench_server
+  tags:
+    - bench_server
+  roles:
+    - bench-server
+
+...
diff --git a/playbooks/bench-worker.yml b/playbooks/bench-worker.yml
new file mode 100755
index 00000000..6cb6031f
--- /dev/null
+++ b/playbooks/bench-worker.yml
@@ -0,0 +1,16 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+
+- name: DEPLOY BENCHMARK WORKERS
+  hosts:
+    - bench_worker
+  tags:
+    - bench_worker
+  roles:
+    - bench-worker
+
+...
diff --git a/playbooks/celerity.yml b/playbooks/celerity.yml
new file mode 100755
index 00000000..7777414e
--- /dev/null
+++ b/playbooks/celerity.yml
@@ -0,0 +1,14 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+- import_playbook: includes/init.yml
+- import_playbook: includes/base.yml
+
+- import_playbook: includes/celerity.yml
+
+- import_playbook: includes/network.yml
+
+...
diff --git a/playbooks/cluster.yml b/playbooks/cluster.yml
new file mode 100755
index 00000000..432b1462
--- /dev/null
+++ b/playbooks/cluster.yml
@@ -0,0 +1,16 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+- import_playbook: includes/init.yml
+- import_playbook: includes/base.yml
+- when: firewall_enabled | default(false) | bool
+  import_playbook: includes/firewall.yml
+
+- import_playbook: includes/cluster.yml
+
+- import_playbook: includes/network.yml
+
+...
diff --git a/playbooks/import.yml b/playbooks/import.yml
new file mode 100755
index 00000000..10fe65e6
--- /dev/null
+++ b/playbooks/import.yml
@@ -0,0 +1,14 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+- import_playbook: includes/init.yml
+- import_playbook: includes/base.yml
+
+- import_playbook: includes/import.yml
+
+- import_playbook: includes/network.yml
+
+...
diff --git a/playbooks/includes/base.yml b/playbooks/includes/base.yml
new file mode 100755
index 00000000..890d6ec1
--- /dev/null
+++ b/playbooks/includes/base.yml
@@ -0,0 +1,15 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: BASE
+  hosts:
+    - all
+  tags:
+    - always
+    - base
+  roles:
+    - postfix
+    - ntp
+    - fail2ban
+
+...
diff --git a/playbooks/includes/celerity.yml b/playbooks/includes/celerity.yml
new file mode 100755
index 00000000..b4d9f597
--- /dev/null
+++ b/playbooks/includes/celerity.yml
@@ -0,0 +1,12 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: CELERITY
+  hosts:
+    - celerity
+  tags:
+    - celerity
+  roles:
+    - celerity
+
+...
diff --git a/playbooks/includes/certificates.yml b/playbooks/includes/certificates.yml
new file mode 100755
index 00000000..5f059109
--- /dev/null
+++ b/playbooks/includes/certificates.yml
@@ -0,0 +1,20 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: CERTIFICATES
+  hosts:
+    - monitor
+    - manager
+    - server
+  tags:
+    - monitor
+    - manager
+    - server
+    - letsencrypt
+  roles:
+    - role: letsencrypt
+      when:
+        - letsencrypt_enabled is defined
+        - letsencrypt_enabled
+
+...
diff --git a/playbooks/includes/check_docker.yml b/playbooks/includes/check_docker.yml
new file mode 100755
index 00000000..026e58ad
--- /dev/null
+++ b/playbooks/includes/check_docker.yml
@@ -0,0 +1,19 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: CHECK IF RUNNING IN DOCKER
+  hosts:
+    - all
+  tags:
+    - always
+    - check
+  tasks:
+    - name: check .dockerenv presence
+      register: check_if_docker
+      stat:
+        path: /.dockerenv
+    - name: set docker flag variable
+      set_fact:
+        in_docker: "{{ check_if_docker.stat.isreg is defined and check_if_docker.stat.isreg }}"
+
+...
diff --git a/playbooks/includes/cluster.yml b/playbooks/includes/cluster.yml
new file mode 100755
index 00000000..a7b072f1
--- /dev/null
+++ b/playbooks/includes/cluster.yml
@@ -0,0 +1,12 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: CLUSTER
+  hosts:
+    - cluster
+  tags:
+    - cluster
+  roles:
+    - cluster
+
+...
diff --git a/playbooks/includes/conf.yml b/playbooks/includes/conf.yml
new file mode 100755
index 00000000..e9ce9e78
--- /dev/null
+++ b/playbooks/includes/conf.yml
@@ -0,0 +1,13 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: LOAD CONF
+  hosts:
+    - all
+  tags:
+    - always
+    - conf
+  roles:
+    - conf
+
+...
diff --git a/playbooks/includes/firewall.yml b/playbooks/includes/firewall.yml
new file mode 100755
index 00000000..b37ad0f8
--- /dev/null
+++ b/playbooks/includes/firewall.yml
@@ -0,0 +1,13 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: FIREWALL
+  hosts:
+    - all
+  tags:
+    - always
+    - firewall
+  roles:
+    - ferm
+
+...
diff --git a/playbooks/includes/import.yml b/playbooks/includes/import.yml
new file mode 100755
index 00000000..944c9ba7
--- /dev/null
+++ b/playbooks/includes/import.yml
@@ -0,0 +1,12 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: MEDIAIMPORT
+  hosts:
+    - import
+  tags:
+    - import
+  roles:
+    - import
+
+...
diff --git a/playbooks/includes/init.yml b/playbooks/includes/init.yml
new file mode 100755
index 00000000..06ba396e
--- /dev/null
+++ b/playbooks/includes/init.yml
@@ -0,0 +1,17 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: INIT
+  hosts:
+    - all
+  tags:
+    - always
+    - init
+  roles:
+    - init
+    - repos
+    - sysutils
+    - locale
+    - users
+
+...
diff --git a/playbooks/includes/manager.yml b/playbooks/includes/manager.yml
new file mode 100755
index 00000000..ff6cfc37
--- /dev/null
+++ b/playbooks/includes/manager.yml
@@ -0,0 +1,15 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: MIRISMANAGER
+  hosts:
+    - manager
+  tags:
+    - manager
+  vars:
+    nginx_server_name: "{{ manager_hostname | default(envsetup_cm_server_name, true) }}"
+  roles:
+    - nginx
+    - manager
+
+...
diff --git a/playbooks/includes/monitor.yml b/playbooks/includes/monitor.yml
new file mode 100755
index 00000000..358de25f
--- /dev/null
+++ b/playbooks/includes/monitor.yml
@@ -0,0 +1,15 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: MSMONITOR
+  hosts:
+    - monitor
+  tags:
+    - monitor
+  vars:
+    nginx_server_name: "{{ monitor_hostname | default(envsetup_monitor_server_name, true) }}"
+  roles:
+    - nginx
+    - monitor
+
+...
diff --git a/playbooks/includes/netcapture.yml b/playbooks/includes/netcapture.yml
new file mode 100755
index 00000000..fae8ca2c
--- /dev/null
+++ b/playbooks/includes/netcapture.yml
@@ -0,0 +1,12 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: NETCAPTURE
+  hosts:
+    - netcapture
+  tags:
+    - netcapture
+  roles:
+    - netcapture
+
+...
diff --git a/playbooks/includes/network.yml b/playbooks/includes/network.yml
new file mode 100755
index 00000000..92f4d62b
--- /dev/null
+++ b/playbooks/includes/network.yml
@@ -0,0 +1,14 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: CUSTOMIZE NETWORK SETTINGS
+  hosts:
+    - all
+  tags:
+    - always
+    - network
+  roles:
+    - network
+    - proxy
+
+...
diff --git a/playbooks/includes/postgres.yml b/playbooks/includes/postgres.yml
new file mode 100755
index 00000000..dafae998
--- /dev/null
+++ b/playbooks/includes/postgres.yml
@@ -0,0 +1,12 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: POSTGRESQL
+  hosts:
+    - postgres
+  tags:
+    - postgres
+  roles:
+    - postgres
+
+...
diff --git a/playbooks/includes/python.yml b/playbooks/includes/python.yml
new file mode 100755
index 00000000..b19c8a3b
--- /dev/null
+++ b/playbooks/includes/python.yml
@@ -0,0 +1,14 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: ENSURE PYTHON
+  hosts:
+    - all
+  tags:
+    - always
+    - python
+  gather_facts: false
+  roles:
+    - python
+
+...
diff --git a/playbooks/includes/server.yml b/playbooks/includes/server.yml
new file mode 100755
index 00000000..e35f0830
--- /dev/null
+++ b/playbooks/includes/server.yml
@@ -0,0 +1,15 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: MEDIASERVER
+  hosts:
+    - server
+  tags:
+    - server
+  vars:
+    nginx_server_name: "{{ server_hostname | default(envsetup_ms_server_name, true) }}"
+  roles:
+    - nginx
+    - server
+
+...
diff --git a/playbooks/includes/vault.yml b/playbooks/includes/vault.yml
new file mode 100755
index 00000000..89a7f49a
--- /dev/null
+++ b/playbooks/includes/vault.yml
@@ -0,0 +1,12 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: MEDIAVAULT
+  hosts:
+    - vault
+  tags:
+    - vault
+  roles:
+    - vault
+
+...
diff --git a/playbooks/includes/worker.yml b/playbooks/includes/worker.yml
new file mode 100755
index 00000000..7786fda4
--- /dev/null
+++ b/playbooks/includes/worker.yml
@@ -0,0 +1,12 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: MEDIAWORKER
+  hosts:
+    - worker
+  tags:
+    - worker
+  roles:
+    - worker
+
+...
diff --git a/playbooks/includes/wowza.yml b/playbooks/includes/wowza.yml
new file mode 100755
index 00000000..753f1671
--- /dev/null
+++ b/playbooks/includes/wowza.yml
@@ -0,0 +1,12 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: WOWZA
+  hosts:
+    - wowza
+  tags:
+    - wowza
+  roles:
+    - wowza
+
+...
diff --git a/playbooks/manager.yml b/playbooks/manager.yml
new file mode 100755
index 00000000..ea5f4af6
--- /dev/null
+++ b/playbooks/manager.yml
@@ -0,0 +1,16 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+- import_playbook: includes/init.yml
+- import_playbook: includes/base.yml
+
+- import_playbook: includes/postgres.yml
+- import_playbook: includes/manager.yml
+
+- import_playbook: includes/certificates.yml
+- import_playbook: includes/network.yml
+
+...
diff --git a/playbooks/migrate-debian.yml b/playbooks/migrate-debian.yml
new file mode 100755
index 00000000..6da1a765
--- /dev/null
+++ b/playbooks/migrate-debian.yml
@@ -0,0 +1,116 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: MIGRATE TO DEBIAN 10
+  hosts: all
+  tasks:
+
+    - name: check / space
+      shell:
+        cmd: '[ $(df --output="avail" / | tail -n 1) -gt 4000000 ]'
+
+    - name: check /boot space
+      shell:
+        cmd: '[ $(df --output="avail" /boot | tail -n 1) -gt 300000 ]'
+
+    - name: dist-upgrade current ubuntu
+      apt:
+        force_apt_get: true
+        update_cache: true
+        dpkg_options: force-confnew
+        upgrade: dist
+
+    - name: autoremove current ubuntu
+      apt:
+        force_apt_get: true
+        autoclean: true
+        autoremove: true
+
+    - name: list ubicast packages
+      shell:
+        cmd: |
+          rm -f /root/ubicast-installed;
+          for pkg in 'ubicast-mediaserver' 'ubicast-mediaserver-runtime' 'ubicast-monitor' 'ubicast-monitor-runtime' 'ubicast-skyreach' 'ubicast-skyreach-runtime' 'celerity-server' 'celerity-workers' 'ubicast-config'; do
+            dpkg -s "$pkg" >/dev/null 2>&1 && echo -n "$pkg " | tee -a '/root/ubicast-installed';
+            echo '';
+          done
+
+    - name: dump mediaserver database
+      shell:
+        cmd: /usr/bin/mscontroller.py dump
+
+    - name: dump skyreach database
+      shell:
+        cmd: /home/skyreach/htdocs/skyreach_site/scripts/control.sh dump
+
+    - name: stop services
+      loop:
+        - nginx
+        - msmonitor
+        - mediaserver
+        - skyreach
+      systemd:
+        name: "{{ item }}"
+        state: stopped
+
+    - name: add debian keys
+      loop:
+        - https://ftp-master.debian.org/keys/archive-key-10.asc
+        - https://ftp-master.debian.org/keys/archive-key-10-security.asc
+      apt_key:
+        url: "{{ item }}"
+
+    - name: disable skyreach repository
+      shell:
+        cmd: mv -f /etc/apt/sources.list.d/skyreach.list /etc/apt/sources.list.d/skyreach.list.migrate
+
+    - name: update sources list
+      copy:
+        dest: /etc/apt/sources.list
+        content: |
+          deb http://ftp.debian.org/debian buster main contrib non-free
+          deb http://ftp.debian.org/debian buster-updates main contrib non-free
+          deb http://security.debian.org buster/updates main contrib non-free
+
+    - name: install debian keyring
+      apt:
+        force_apt_get: true
+        update_cache: true
+        name: debian-archive-keyring
+
+    - name: upgrade to debian
+      apt:
+        force_apt_get: true
+        update_cache: true
+        dpkg_options: force-confnew
+        upgrade: dist
+
+    - name: autoremove debian
+      apt:
+        force_apt_get: true
+        autoclean: true
+        autoremove: true
+
+    - name: install apt-show-version
+      apt:
+        force_apt_get: true
+        name: apt-show-version
+
+    - name: install debian version of packages
+      shell:
+        cmd: apt-get install $(apt-show-versions | grep -P 'newer than version in archive' | awk -F: '{print $1"/buster"}')
+
+    - name: upgrade
+      apt:
+        force_apt_get: true
+        update_cache: true
+        dpkg_options: force-confnew
+        upgrade: dist
+
+    - name: autoremove
+      apt:
+        force_apt_get: true
+        autoclean: true
+        autoremove: true
+
+...
diff --git a/playbooks/monitor.yml b/playbooks/monitor.yml
new file mode 100755
index 00000000..fc8b4917
--- /dev/null
+++ b/playbooks/monitor.yml
@@ -0,0 +1,15 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+- import_playbook: includes/init.yml
+- import_playbook: includes/base.yml
+
+- import_playbook: includes/monitor.yml
+
+- import_playbook: includes/certificates.yml
+- import_playbook: includes/network.yml
+
+...
diff --git a/playbooks/netcapture.yml b/playbooks/netcapture.yml
new file mode 100755
index 00000000..7513cf46
--- /dev/null
+++ b/playbooks/netcapture.yml
@@ -0,0 +1,14 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+- import_playbook: includes/init.yml
+- import_playbook: includes/base.yml
+
+- import_playbook: includes/netcapture.yml
+
+- import_playbook: includes/network.yml
+
+...
diff --git a/playbooks/pod.yml b/playbooks/pod.yml
new file mode 100755
index 00000000..2ae27f6d
--- /dev/null
+++ b/playbooks/pod.yml
@@ -0,0 +1,226 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: POD INSTALLATION
+  hosts:
+    - pod
+
+  vars:
+
+    pod_elastic_version: 6
+    pod_version: 2.2.2
+    pod_project_path: /usr/local/pod
+    pod_application_path: "{{ pod_project_path }}/app"
+    pod_virtualenv_path: "{{ pod_project_path }}/venv"
+    pod_superuser_name: admin
+    pod_superuser_email: sysadmin+pod@ubicast.eu
+    pod_superuser_password: pLafk0tt
+    pod_site_name: pod.ubicast.net
+    pod_site_domain: pod.ubicast.net
+    pod_settings: |
+      SECRET_KEY = 'T4b4B8BEP7kfHoSx7s49aUCR7NiY8zeZNcmJpQzZYYCDNCTv284rjSB262JAB8nQ'
+      ALLOWED_HOSTS = ['{{ pod_site_domain }}', 'localhost', '127.0.0.1', '::1']
+
+  handlers:
+
+    - name: restart elastic
+      systemd:
+        name: elasticsearch
+        state: restarted
+
+    - name: flag create_pod_index
+      become: true
+      become_user: pod
+      file:
+        path: "{{ pod_project_path }}/.create_pod_index"
+        state: touch
+
+    - name: flag initialize_database
+      become: true
+      become_user: pod
+      file:
+        path: "{{ pod_project_path }}/.initialize_database"
+        state: touch
+
+    - name: flag create_superuser
+      become: true
+      become_user: pod
+      file:
+        path: "{{ pod_project_path }}/.create_superuser"
+        state: touch
+
+    - name: flag config_site
+      become: true
+      become_user: pod
+      file:
+        path: "{{ pod_project_path }}/.config_site"
+        state: touch
+
+  tasks:
+
+    - name: os requirements
+      apt:
+        force_apt_get: true
+        name:
+          - build-essential
+          - ffmpeg
+          - ffmpegthumbnailer
+          - git
+          - imagemagick
+          - libjpeg-dev
+          - openjdk-11-jre
+          - policykit-1
+          - python3-dev
+          - python3-venv
+          - python3-wheel
+          - zlib1g-dev
+        state: present
+
+    - name: elastic key
+      apt_key:
+        keyserver: pgp.mit.edu
+        id: D88E42B4
+        state: present
+
+    - name: elastic repo
+      apt_repository:
+        repo: deb https://artifacts.elastic.co/packages/{{ pod_elastic_version }}.x/apt stable main
+        filename: elastic-{{ pod_elastic_version }}.x
+        state: present
+
+    - name: elastic package
+      apt:
+        force_apt_get: true
+        name:
+          - elasticsearch
+        state: present
+
+    - name: elastic cluster name
+      notify: restart elastic
+      lineinfile:
+        path: /etc/elasticsearch/elasticsearch.yml
+        regexp: '^#?cluster.name: '
+        line: 'cluster.name: pod-application'
+        state: present
+
+    - name: elastic node name
+      notify: restart elastic
+      lineinfile:
+        path: /etc/elasticsearch/elasticsearch.yml
+        regexp: '^#?node.name: '
+        line: 'node.name: pod-1'
+        state: present
+
+    - name: elastic discovery host
+      notify: restart elastic
+      lineinfile:
+        path: /etc/elasticsearch/elasticsearch.yml
+        line: 'discovery.zen.ping.unicast.hosts: ["127.0.0.1"]'
+        state: present
+
+    - name: elastic plugin analysis-icu
+      notify: restart elastic
+      command: /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu
+      args:
+        creates: /usr/share/elasticsearch/plugins/analysis-icu
+
+    - meta: flush_handlers
+
+    - name: elastic service
+      systemd:
+        name: elasticsearch
+        enabled: true
+        state: started
+
+    - name: pod group
+      group:
+        name: pod
+        system: true
+        state: present
+
+    - name: pod user
+      user:
+        name: pod
+        group: pod
+        system: true
+        password_lock: true
+        state: present
+
+    - name: pod project directory
+      file:
+        path: "{{ pod_project_path }}"
+        owner: pod
+        group: pod
+        state: directory
+
+    - name: pod repo
+      become: true
+      become_user: pod
+      git:
+        accept_hostkey: true
+        repo: https://github.com/esupportail/podv2.git
+        version: "{{ pod_version }}"
+        dest: "{{ pod_application_path }}"
+
+    - name: pod pip install
+      become: true
+      become_user: pod
+      pip:
+        virtualenv_command: /usr/bin/python3 -m venv
+        virtualenv_site_packages: true
+        virtualenv: "{{ pod_virtualenv_path }}"
+        requirements: "{{ pod_application_path }}/requirements.txt"
+        state: present
+
+    - name: pod settings
+      become: true
+      become_user: pod
+      copy:
+        dest: "{{ pod_application_path }}/pod/custom/settings_local.py"
+        content: "{{ pod_settings }}"
+
+    - name: pod elastic index video
+      become: true
+      become_user: pod
+      notify: flag create_pod_index
+      command: "python {{ pod_application_path }}/manage.py create_pod_index"
+      args:
+        chdir: "{{ pod_application_path }}"
+        creates: "{{ pod_project_path }}/.create_pod_index"
+      environment:
+        PATH: "{{ pod_virtualenv_path }}/bin:$PATH"
+
+    - name: pod initialize database
+      become: true
+      become_user: pod
+      notify: flag initialize_database
+      command: /usr/bin/sh {{ pod_application_path }}/create_data_base.sh
+      args:
+        chdir: "{{ pod_application_path }}"
+        creates: "{{ pod_project_path }}/.initialize_database"
+      environment:
+        PATH: "{{ pod_virtualenv_path }}/bin:$PATH"
+
+    - name: pod create superuser
+      become: true
+      become_user: pod
+      notify: flag create_superuser
+      shell: python manage.py shell -c "from django.contrib.auth.models import User; User.objects.create_superuser('{{ pod_superuser_name }}', '{{ pod_superuser_email }}', '{{ pod_superuser_password }}')"
+      args:
+        chdir: "{{ pod_application_path }}"
+        creates: "{{ pod_project_path }}/.create_superuser"
+      environment:
+        PATH: "{{ pod_virtualenv_path }}/bin:$PATH"
+
+    - name: pod configure site
+      become: true
+      become_user: pod
+      notify: flag config_site
+      shell: python manage.py shell -c "from django.contrib.sites.models import Site; Site.objects.filter(pk=1).update(name='{{ pod_site_name }}', domain='{{ pod_site_domain }}')"
+      args:
+        chdir: "{{ pod_application_path }}"
+        creates: "{{ pod_project_path }}/.config_site"
+      environment:
+        PATH: "{{ pod_virtualenv_path }}/bin:$PATH"
+
+...
diff --git a/playbooks/rocketchat.yml b/playbooks/rocketchat.yml
new file mode 100755
index 00000000..1853228c
--- /dev/null
+++ b/playbooks/rocketchat.yml
@@ -0,0 +1,10 @@
+#!/usr/bin/env ansible-playbook
+
+---
+
+- hosts: chat
+
+  roles:
+    - rocketchat
+
+...
diff --git a/playbooks/server.yml b/playbooks/server.yml
new file mode 100755
index 00000000..3bd48187
--- /dev/null
+++ b/playbooks/server.yml
@@ -0,0 +1,16 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+- import_playbook: includes/init.yml
+- import_playbook: includes/base.yml
+
+- import_playbook: includes/postgres.yml
+- import_playbook: includes/server.yml
+
+- import_playbook: includes/certificates.yml
+- import_playbook: includes/network.yml
+
+...
diff --git a/playbooks/tests.yml b/playbooks/tests.yml
new file mode 100755
index 00000000..df7fbc2a
--- /dev/null
+++ b/playbooks/tests.yml
@@ -0,0 +1,24 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: RUN TESTER
+  hosts: all
+  tags: tester
+  gather_facts: false
+  vars:
+    tester_reset_log: false
+  tasks:
+    - name: remove envsetup tester log
+      when: tester_reset_log
+      file:
+        path: /root/envsetup/log_tester.txt
+        state: absent
+    - name: envsetup tester
+      shell: |
+        set -o pipefail
+        python3 /root/envsetup/tester.py 2>&1 | tee /root/envsetup/log_tester.txt
+      args:
+        creates: /root/envsetup/log_tester.txt
+        executable: /bin/bash
+
+...
diff --git a/playbooks/upgrade.yml b/playbooks/upgrade.yml
new file mode 100755
index 00000000..8568f770
--- /dev/null
+++ b/playbooks/upgrade.yml
@@ -0,0 +1,21 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- name: UPGRADE SERVERS
+  hosts: all
+  tasks:
+
+    - name: apt-get dist-upgrade
+      when: ansible_os_family == "Debian"
+      apt:
+        force_apt_get: true
+        cache_valid_time: 3600
+        upgrade: dist
+
+    - name: yum upgrade
+      when: ansible_os_family == "RedHat"
+      yum:
+        name: "*"
+        state: latest
+
+...
diff --git a/playbooks/vault.yml b/playbooks/vault.yml
new file mode 100755
index 00000000..bf884c8e
--- /dev/null
+++ b/playbooks/vault.yml
@@ -0,0 +1,14 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+- import_playbook: includes/init.yml
+- import_playbook: includes/base.yml
+
+- import_playbook: includes/vault.yml
+
+- import_playbook: includes/network.yml
+
+...
diff --git a/playbooks/worker.yml b/playbooks/worker.yml
new file mode 100755
index 00000000..f02c1442
--- /dev/null
+++ b/playbooks/worker.yml
@@ -0,0 +1,14 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+- import_playbook: includes/init.yml
+- import_playbook: includes/base.yml
+
+- import_playbook: includes/worker.yml
+
+- import_playbook: includes/network.yml
+
+...
diff --git a/playbooks/wowza.yml b/playbooks/wowza.yml
new file mode 100755
index 00000000..f0882a11
--- /dev/null
+++ b/playbooks/wowza.yml
@@ -0,0 +1,14 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: includes/python.yml
+- import_playbook: includes/check_docker.yml
+- import_playbook: includes/conf.yml
+- import_playbook: includes/init.yml
+- import_playbook: includes/base.yml
+
+- import_playbook: includes/wowza.yml
+
+- import_playbook: includes/network.yml
+
+...
diff --git a/plugins/action/source_file.py b/plugins/action/source_file.py
new file mode 100644
index 00000000..d6088a20
--- /dev/null
+++ b/plugins/action/source_file.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+    """Plugin to set facts from variables sourced in `source_file` module."""
+
+    TRANSFERS_FILES = False
+
+    def run(self, tmp=None, task_vars=None):
+        self._supports_check_mode = True
+
+        result = super(ActionModule, self).run(tmp, task_vars)
+        del tmp  # tmp no longer has any effect
+
+        if self._task.args.get("path", None) is None:
+            result["failed"] = True
+            result["msg"] = "path arg needs to be provided"
+            return result
+
+        result.update(self._execute_module(task_vars=task_vars))
+
+        return result
diff --git a/requirements.dev.in b/requirements.dev.in
new file mode 100644
index 00000000..f453c2fe
--- /dev/null
+++ b/requirements.dev.in
@@ -0,0 +1,7 @@
+-r requirements.in
+ansible-lint
+molecule[docker] ~= 2.22
+pip-tools
+pre-commit
+pylint
+yamllint
diff --git a/requirements.dev.txt b/requirements.dev.txt
new file mode 100644
index 00000000..749a3210
--- /dev/null
+++ b/requirements.dev.txt
@@ -0,0 +1,92 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+#    pip-compile --output-file=requirements.dev.txt requirements.dev.in
+#
+ansible-lint==4.2.0       # via -r requirements.dev.in, molecule
+ansible==2.9.6            # via -r requirements.in, ansible-lint, molecule
+anyconfig==0.9.7          # via molecule
+appdirs==1.4.3            # via virtualenv
+arrow==0.15.5             # via jinja2-time
+aspy.yaml==1.3.0          # via pre-commit
+astroid==2.3.3            # via pylint
+attrs==19.3.0             # via pytest
+bcrypt==3.1.7             # via paramiko
+binaryornot==0.4.4        # via cookiecutter
+cerberus==1.3.2           # via molecule
+certifi==2019.11.28       # via requests
+cffi==1.14.0              # via bcrypt, cryptography, pynacl
+cfgv==3.1.0               # via pre-commit
+chardet==3.0.4            # via binaryornot, requests
+click-completion==0.5.2   # via molecule
+click==7.0                # via click-completion, cookiecutter, molecule, pip-tools, python-gilt
+colorama==0.4.3           # via molecule, python-gilt
+cookiecutter==1.7.0       # via molecule
+cryptography==2.8         # via ansible, paramiko
+distlib==0.3.0            # via virtualenv
+docker==4.2.0             # via molecule
+entrypoints==0.3          # via flake8
+fasteners==0.15           # via python-gilt
+filelock==3.0.12          # via virtualenv
+flake8==3.7.9             # via molecule
+future==0.18.2            # via cookiecutter
+git-url-parse==1.2.2      # via python-gilt
+identify==1.4.11          # via pre-commit
+idna==2.9                 # via requests
+importlib-metadata==1.5.0  # via pluggy, pre-commit, pytest, virtualenv
+isort==4.3.21             # via pylint
+jinja2-time==0.2.0        # via cookiecutter
+jinja2==2.11.1            # via ansible, click-completion, cookiecutter, jinja2-time, molecule
+lazy-object-proxy==1.4.3  # via astroid
+markupsafe==1.1.1         # via jinja2
+mccabe==0.6.1             # via flake8, pylint
+molecule[docker]==2.22    # via -r requirements.dev.in
+monotonic==1.5            # via fasteners
+more-itertools==8.2.0     # via pytest
+netaddr==0.7.19           # via -r requirements.in
+nodeenv==1.3.5            # via pre-commit
+packaging==20.3           # via pytest
+paramiko==2.7.1           # via molecule
+pathspec==0.7.0           # via yamllint
+pbr==5.4.4                # via git-url-parse, python-gilt
+pexpect==4.8.0            # via molecule
+pip-tools==4.5.1          # via -r requirements.dev.in
+pluggy==0.13.1            # via pytest
+poyo==0.5.0               # via cookiecutter
+pre-commit==1.21.0        # via -r requirements.dev.in, molecule
+psutil==5.7.0             # via molecule
+ptyprocess==0.6.0         # via pexpect
+py==1.8.1                 # via pytest
+pycodestyle==2.5.0        # via flake8
+pycparser==2.20           # via cffi
+pyflakes==2.1.1           # via flake8
+pylint==2.4.4             # via -r requirements.dev.in
+pynacl==1.3.0             # via paramiko
+pyparsing==2.4.6          # via packaging
+pytest==5.3.5             # via testinfra
+python-dateutil==2.8.1    # via arrow
+python-gilt==1.2.1        # via molecule
+pyyaml==5.3               # via -r requirements.in, ansible, ansible-lint, aspy.yaml, molecule, pre-commit, python-gilt, yamllint
+requests==2.23.0          # via cookiecutter, docker
+ruamel.yaml.clib==0.2.0   # via ruamel.yaml
+ruamel.yaml==0.16.10      # via ansible-lint
+sh==1.12.14               # via molecule, python-gilt
+shellingham==1.3.2        # via click-completion
+six==1.14.0               # via ansible-lint, astroid, bcrypt, click-completion, cryptography, docker, fasteners, molecule, packaging, pip-tools, pre-commit, pynacl, python-dateutil, testinfra, virtualenv, websocket-client
+tabulate==0.8.6           # via molecule
+testinfra==3.4.0          # via molecule
+toml==0.10.0              # via pre-commit
+tree-format==0.1.2        # via molecule
+typed-ast==1.4.1          # via astroid
+urllib3==1.25.8           # via requests
+virtualenv==20.0.8        # via pre-commit
+wcwidth==0.1.8            # via pytest
+websocket-client==0.57.0  # via docker
+whichcraft==0.6.1         # via cookiecutter
+wrapt==1.11.2             # via astroid
+yamllint==1.20.0          # via -r requirements.dev.in, molecule
+zipp==3.1.0               # via importlib-metadata
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools
diff --git a/requirements.in b/requirements.in
new file mode 100644
index 00000000..3f53a895
--- /dev/null
+++ b/requirements.in
@@ -0,0 +1,3 @@
+ansible ~= 2.9.0
+netaddr
+pyyaml
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 00000000..dd533699
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,15 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+#    pip-compile --output-file=requirements.txt requirements.in
+#
+ansible==2.9.6            # via -r requirements.in
+cffi==1.14.0              # via cryptography
+cryptography==2.8         # via ansible
+jinja2==2.11.1            # via ansible
+markupsafe==1.1.1         # via jinja2
+netaddr==0.7.19           # via -r requirements.in
+pycparser==2.20           # via cffi
+pyyaml==5.3               # via -r requirements.in, ansible
+six==1.14.0               # via cryptography
diff --git a/roles/bench-server/defaults/main.yml b/roles/bench-server/defaults/main.yml
new file mode 100644
index 00000000..41980209
--- /dev/null
+++ b/roles/bench-server/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+
+bench_server_packages:
+  - ubicast-benchmark
+
+bench_host: "{{ envsetup_ms_server_name | d() }}"
+bench_user: ubicast
+bench_password: "{{ envsetup_ms_superuser_pwd | d() }}"
+bench_oid:
+
+bench_stream_repo: https://git.ubicast.net/mediaserver/ms-testing-suite.git
+bench_host_api_key: "{{ envsetup_ms_api_key | d() }}"
+
+...
diff --git a/roles/bench-server/handlers/main.yml b/roles/bench-server/handlers/main.yml
new file mode 100644
index 00000000..8ec953f2
--- /dev/null
+++ b/roles/bench-server/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+
+- name: reload systemd daemon
+  systemd:
+    daemon_reload: true
+
+- name: restart bench-server
+  systemd:
+    name: bench-server
+    state: restarted
+
+...
diff --git a/roles/bench-server/tasks/main.yml b/roles/bench-server/tasks/main.yml
new file mode 100644
index 00000000..ac19976e
--- /dev/null
+++ b/roles/bench-server/tasks/main.yml
@@ -0,0 +1,45 @@
+---
+
+- name: install bench-server packages
+  apt:
+    force_apt_get: true
+    name: "{{ bench_server_packages }}"
+    state: present
+
+- name: deploy bench-server service
+  notify:
+    - reload systemd daemon
+    - restart bench-server
+  template:
+    src: bench-server.service.j2
+    dest: /etc/systemd/system/bench-server.service
+
+- name: ensure configuration directory exists
+  file:
+    path: /etc/mediaserver
+    state: directory
+
+- name: benchmark configuration settings
+  notify:
+    - restart bench-server
+  copy:
+    dest: /etc/mediaserver/bench.conf
+    content: |
+      BENCH_HOST=https://{{ bench_host }}
+      BENCH_USER={{ bench_user }}
+      BENCH_PASSWORD={{ bench_password }}
+      BENCH_OID={{ bench_oid }}
+
+- name: clone ms-testing-suite repository
+  git:
+    repo: "{{ bench_stream_repo }}"
+    version: stable
+    dest: /usr/share/ms-testing-suite
+    update: false
+
+- name: streaming configuration settings
+  template:
+    src: bench-streaming.conf.j2
+    dest: /etc/mediaserver/bench-streaming.conf
+
+...
diff --git a/roles/bench-server/templates/bench-server.service.j2 b/roles/bench-server/templates/bench-server.service.j2
new file mode 100644
index 00000000..2854b222
--- /dev/null
+++ b/roles/bench-server/templates/bench-server.service.j2
@@ -0,0 +1,9 @@
+[Unit]
+Description=Benchmark server
+
+[Service]
+Type=simple
+User=root
+Group=root
+EnvironmentFile=/etc/mediaserver/bench.conf
+ExecStart=/usr/bin/mediaserver-bench --host {{ bench_host }} -v -u {{ bench_user }} -p {{ bench_password }} --oid {{ bench_oid }}
diff --git a/roles/bench-server/templates/bench-streaming.conf.j2 b/roles/bench-server/templates/bench-streaming.conf.j2
new file mode 100644
index 00000000..1c8f9059
--- /dev/null
+++ b/roles/bench-server/templates/bench-streaming.conf.j2
@@ -0,0 +1,13 @@
+{
+    "SERVER_URL": "https://{{ bench_host }}",
+    "API_KEY": "{{ bench_host_api_key }}",
+    "OID": "{{ bench_oid }}",
+    "PROXIES": {
+        "http": "",
+        "https": ""
+    },
+    "VERIFY_SSL": false,
+    "EXTRA": {
+        "IGNORED_ROUTES": []
+    }
+}
diff --git a/roles/bench-worker/defaults/main.yml b/roles/bench-worker/defaults/main.yml
new file mode 100644
index 00000000..6527e729
--- /dev/null
+++ b/roles/bench-worker/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+
+bench_worker_packages:
+  - ubicast-benchmark
+
+bench_server: 127.0.0.1
+bench_time_stat: 15000
+
+bench_host: "{{ envsetup_ms_server_name | d() }}"
+bench_user: ubicast
+bench_password: "{{ envsetup_ms_superuser_pwd | d() }}"
+bench_oid:
+
+...
diff --git a/roles/bench-worker/handlers/main.yml b/roles/bench-worker/handlers/main.yml
new file mode 100644
index 00000000..e7642b11
--- /dev/null
+++ b/roles/bench-worker/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+
+- name: reload systemd daemon
+  systemd:
+    daemon_reload: true
+
+- name: restart bench-worker
+  systemd:
+    name: bench-worker
+    state: restarted
+
+...
diff --git a/roles/bench-worker/tasks/main.yml b/roles/bench-worker/tasks/main.yml
new file mode 100644
index 00000000..90265140
--- /dev/null
+++ b/roles/bench-worker/tasks/main.yml
@@ -0,0 +1,42 @@
+---
+
+- name: install bench-worker packages
+  apt:
+    force_apt_get: true
+    name: "{{ bench_worker_packages }}"
+    state: present
+
+- name: deploy worker launcher
+  notify:
+    - reload systemd daemon
+    - restart bench-worker
+  template:
+    src: mediaserver-benchmark-start.j2
+    dest: /usr/bin/mediaserver-benchmark-start
+    mode: 0755
+
+- name: deploy bench-worker service
+  notify: restart bench-worker
+  template:
+    src: bench-worker.service.j2
+    dest: /etc/systemd/system/bench-worker.service
+
+- name: ensure configuration directory exists
+  file:
+    path: /etc/mediaserver
+    state: directory
+
+- name: benchmark configuration settings
+  notify:
+    - restart bench-worker
+  copy:
+    dest: /etc/mediaserver/bench.conf
+    content: |
+      BENCH_SERVER={{ bench_server }}
+      BENCH_HOST=https://{{ bench_host }}
+      BENCH_USER={{ bench_user }}
+      BENCH_PASSWORD={{ bench_password }}
+      BENCH_OID={{ bench_oid }}
+      BENCH_TIME_STAT={{ bench_time_stat }}
+
+...
diff --git a/roles/bench-worker/templates/bench-worker.service.j2 b/roles/bench-worker/templates/bench-worker.service.j2
new file mode 100644
index 00000000..dfb52b53
--- /dev/null
+++ b/roles/bench-worker/templates/bench-worker.service.j2
@@ -0,0 +1,11 @@
+[Unit]
+Description=Benchmark worker
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+User=root
+Group=root
+EnvironmentFile=/etc/mediaserver/bench.conf
+ExecStart=/usr/bin/mediaserver-benchmark-start
+ExecStop=/usr/bin/pkill -9 locust
diff --git a/roles/bench-worker/templates/mediaserver-benchmark-start.j2 b/roles/bench-worker/templates/mediaserver-benchmark-start.j2
new file mode 100755
index 00000000..c0d23107
--- /dev/null
+++ b/roles/bench-worker/templates/mediaserver-benchmark-start.j2
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+END=($(nproc))
+echo "Launching $END clients"
+for (( c=0; c<$END; c++ )); do
+    mediaserver-bench --host $BENCH_HOST --master-host $BENCH_SERVER -v -u $BENCH_USER -p $BENCH_PASSWORD --oid $BENCH_OID --time-stat $BENCH_TIME_STAT --no-stream &
+done
diff --git a/roles/celerity/defaults/main.yml b/roles/celerity/defaults/main.yml
new file mode 100644
index 00000000..fac75691
--- /dev/null
+++ b/roles/celerity/defaults/main.yml
@@ -0,0 +1,44 @@
+---
+
+celerity_signing_key: "{{ envsetup_celerity_signing_key }}"
+celerity_server: "{{ envsetup_celerity_server | d(envsetup_ms_server_name, true) }}"
+
+celerity_workers_count: 2
+
+celerity_ms_id: "{{ envsetup_ms_id }}"
+celerity_ms_api_key: "{{ envsetup_ms_api_key }}"
+celerity_ms_hostname: "{{ envsetup_ms_server_name }}"
+celerity_ms_instances:
+  - ms_id: "{{ celerity_ms_id }}"
+    ms_api_key: "{{ celerity_ms_api_key }}"
+    ms_server_name: "{{ celerity_ms_hostname }}"
+
+celerity_fail2ban_enabled: "{{ envsetup_fail2ban_enabled | d(true) }}"
+celerity_f2b_filter:
+  name: celerity
+  content: |
+    [INCLUDES]
+    before = common.conf
+    [Definition]
+    # currently there is no login failure log in celerity so this useless for now
+    failregex = INFO Wrong credentials given to login\. IP: <HOST>, username: \S+\.$
+    ignoreregex =
+celerity_f2b_jail:
+  name: celerity
+  content: |
+    [celerity]
+    logpath = /var/lib/celerity/twisted.log
+    enabled = {% if celerity_fail2ban_enabled | bool %}true{% else %}false{% endif %}
+
+celerity_firewall_enabled: true
+celerity_ferm_rules_filename: celerity
+celerity_ferm_input_rules:
+  - saddr: "{{ groups['worker'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list }}"
+    proto:
+      - tcp
+    dport:
+      - 6200
+celerity_ferm_output_rules: []
+celerity_ferm_global_settings:
+
+...
diff --git a/roles/celerity/handlers/main.yml b/roles/celerity/handlers/main.yml
new file mode 100644
index 00000000..f76e4aa3
--- /dev/null
+++ b/roles/celerity/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+
+- name: restart celerity-server
+  service:
+    name: celerity-server
+    state: restarted
+
+...
diff --git a/roles/celerity/tasks/main.yml b/roles/celerity/tasks/main.yml
new file mode 100644
index 00000000..995d95f9
--- /dev/null
+++ b/roles/celerity/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+
+- name: celerity server install
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: celerity-server
+
+- name: config celerity server
+  notify: restart celerity-server
+  template:
+    src: celerity-config.py.j2
+    dest: /etc/celerity/config.py
+
+- name: ensure celerity server is running
+  service:
+    name: celerity-server
+    enabled: true
+    state: started
+
+# FAIL2BAN
+
+- name: fail2ban
+  when: celerity_fail2ban_enabled
+  vars:
+    f2b_filter: "{{ celerity_f2b_filter }}"
+    f2b_jail: "{{ celerity_f2b_jail }}"
+  include_role:
+    name: fail2ban
+
+# FIREWALL
+
+- name: firewall
+  when: celerity_firewall_enabled
+  vars:
+    ferm_rules_filename: "{{ celerity_ferm_rules_filename }}"
+    ferm_input_rules: "{{ celerity_ferm_input_rules }}"
+    ferm_output_rules: "{{ celerity_ferm_output_rules }}"
+    ferm_global_settings: "{{ celerity_ferm_global_settings }}"
+  include_role:
+    name: ferm
+
+...
diff --git a/roles/celerity/templates/celerity-config.py.j2 b/roles/celerity/templates/celerity-config.py.j2
new file mode 100644
index 00000000..6e1ea1d9
--- /dev/null
+++ b/roles/celerity/templates/celerity-config.py.j2
@@ -0,0 +1,12 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+SIGNING_KEY = '{{ celerity_signing_key }}'
+SERVER_URL = 'https://{{ celerity_server }}:6200'
+
+WORKERS_COUNT = {{ celerity_workers_count }}
+
+# MediaServer interactions
+MEDIASERVERS = {
+    '{{ celerity_ms_id }}': {'url': 'https://{{ celerity_ms_hostname }}', 'api_key': '{{ celerity_ms_api_key }}'},
+}
diff --git a/roles/cluster/defaults/main.yml b/roles/cluster/defaults/main.yml
new file mode 100644
index 00000000..fe543d96
--- /dev/null
+++ b/roles/cluster/defaults/main.yml
@@ -0,0 +1,72 @@
+---
+
+cluster_nodes_packages:
+  - corosync
+  - corosync-qdevice
+  - pacemaker
+  - pcs
+
+cluster_qnet_packages:
+  - corosync-qnetd
+  - pacemaker
+  - pcs
+
+## COROSYNC
+
+cluster_name: ubicast-ha
+cluster_netaddr: "{{ ansible_default_ipv4.address }}"
+_cluster_nodes: |
+  {% for node in groups['cluster_nodes'] %}
+  - inventory: {{ node }}
+    address: {{ hostvars[node]['ansible_default_ipv4']['address'] }}
+    hostname: {{ hostvars[node]['ansible_hostname'] }}
+    id: {{ loop.index }}
+  {% endfor %}
+cluster_nodes: "{{ _cluster_nodes | from_yaml }}"
+cluster_master: "{{ groups['cluster_nodes'][0] }}"
+cluster_qnet:
+  inventory: "{{ groups['cluster_qnet'][0] }}"
+  address: "{{ hostvars[groups['cluster_qnet'][0]]['ansible_default_ipv4']['address'] }}"
+  hostname: "{{ hostvars[groups['cluster_qnet'][0]]['ansible_hostname'] }}"
+
+## PACEMAKER
+
+cluster_pcm_password: "{{ lookup('password', inventory_dir + '/files/cluster_pcm_password.txt length=32 chars=ascii_letters,digits') }}"
+cluster_virtual_ip:
+  address:
+  netmask:
+
+## FIREWALL
+
+cluster_fw_enabled: "{{ firewall_enabled | default(false) | bool }}"
+cluster_fw_filename: cluster
+cluster_fw_input:
+  - proto:
+      - udp
+    saddr: "{{ cluster_nodes.append(cluster_qnet) }}"
+    dport:
+      - 5404
+      - 5405
+    policy: accept
+  - proto:
+      - tcp
+    saddr: "{{ cluster_nodes.append(cluster_qnet) }}"
+    dport:
+      - 2224
+    policy: accept
+cluster_fw_output:
+  - proto:
+      - udp
+    daddr: "{{ cluster_nodes.append(cluster_qnet) }}"
+    dport:
+      - 5404
+      - 5405
+    policy: accept
+  - proto:
+      - tcp
+    daddr: "{{ cluster_nodes.append(cluster_qnet) }}"
+    dport:
+      - 2224
+    policy: accept
+
+...
diff --git a/roles/cluster/handlers/main.yml b/roles/cluster/handlers/main.yml
new file mode 100644
index 00000000..6e8f8443
--- /dev/null
+++ b/roles/cluster/handlers/main.yml
@@ -0,0 +1,13 @@
+---
+
+- name: restart services
+  loop:
+    - corosync
+    - pacemaker
+    - pcsd
+  register: cluster_services_handler
+  systemd:
+    name: "{{ item }}"
+    state: restarted
+
+...
diff --git a/roles/cluster/tasks/main.yml b/roles/cluster/tasks/main.yml
new file mode 100644
index 00000000..b0c08ed8
--- /dev/null
+++ b/roles/cluster/tasks/main.yml
@@ -0,0 +1,255 @@
+---
+
+- name: install nodes
+  when: "'cluster_nodes' in group_names"
+  apt:
+    force_apt_get: true
+    name: "{{ cluster_nodes_packages }}"
+    state: present
+
+- name: install qnet
+  when: "'cluster_qnet' in group_names"
+  apt:
+    force_apt_get: true
+    name: "{{ cluster_qnet_packages }}"
+    state: present
+
+- name: set hacluster password on cluster
+  user:
+    name: hacluster
+    password: "{{ cluster_pcm_password | password_hash('sha512', 'envsetup') }}"
+
+## COROSYNC FIXES
+
+- name: disable corosync on qnet
+  when: inventory_hostname == cluster_qnet.inventory
+  systemd:
+    name: corosync
+    enabled: false
+    state: stopped
+
+# https://bugs.launchpad.net/ubuntu/+source/corosync-qdevice/+bug/1809682/comments/6
+- name: fix qdevice init script on nodes
+  when: "'cluster_nodes' in group_names"
+  register: cluster_fix_qdevice
+  lineinfile:
+    path: /etc/init.d/corosync-qdevice
+    regexp: '^# Default-Start:'
+    line: '# Default-Start: 2 3 4 5'
+
+- name: reload qdevice init script on nodes
+  when: cluster_fix_qdevice is changed
+  command: >
+    update-rc.d -f corosync-qdevice remove ;
+    update-rc.d -f corosync-qdevice defaults ;
+    systemctl enable corosync-qdevice ;
+
+## COROSYNC AUTHKEY
+
+- name: genrate corosync authentication key on master
+  when: inventory_hostname == cluster_master
+  command: corosync-keygen -l
+  args:
+    creates: /etc/corosync/authkey
+
+- name: retreive authentication key from master
+  when: inventory_hostname == cluster_master
+  register: cluster_authkey
+  slurp:
+    path: /etc/corosync/authkey
+
+- name: deploy corosync authkey on nodes
+  when: "'cluster_nodes' in group_names"
+  notify: restart services
+  copy:
+    dest: /etc/corosync/authkey
+    content: "{{ hostvars[groups['cluster_nodes'][0]]['cluster_authkey']['content'] | b64decode }}"
+    mode: 0600
+
+## COROSYNC SETTINGS
+
+- name: corosync configuration on nodes
+  when: "'cluster_nodes' in group_names"
+  notify: restart services
+  template:
+    src: corosync.conf.j2
+    dest: /etc/corosync/corosync.conf
+
+- name: corosync service directory on nodes
+  when: "'cluster_nodes' in group_names"
+  file:
+    path: /etc/corosync/service.d
+    state: directory
+
+- name: add pacemaker service on nodes
+  when: "'cluster_nodes' in group_names"
+  notify: restart services
+  copy:
+    dest: /etc/corosync/service.d/pcmk
+    content: |
+      service {
+        name: pacemaker
+        ver: 1
+      }
+
+- meta: flush_handlers
+
+## COROSYNC CERTIFICATES
+
+- name: generate qnet ca certificate on qnet
+  when: inventory_hostname == cluster_qnet.inventory
+  command: corosync-qnetd-certutil -i
+  args:
+    creates: /etc/corosync/qnetd/nssdb/qnetd-cacert.crt
+
+- name: retreive qnet ca certificate from qnet
+  when: inventory_hostname == cluster_qnet.inventory
+  register: cluster_qnet_cacert
+  slurp:
+    path: /etc/corosync/qnetd/nssdb/qnetd-cacert.crt
+
+- name: save qnet ca certificate on nodes
+  when: "'cluster_nodes' in group_names"
+  copy:
+    dest: /tmp/qnetd-cacert.crt
+    content: "{{ hostvars[cluster_qnet.inventory]['cluster_qnet_cacert']['content'] | b64decode }}"
+    mode: 0644
+
+- name: initalize certificate database on nodes
+  when: "'cluster_nodes' in group_names"
+  command: corosync-qdevice-net-certutil -i -c /tmp/qnetd-cacert.crt
+  args:
+    creates: /etc/corosync/qdevice/net/nssdb/cert8.db
+
+- name: generate certificate request on master
+  when: inventory_hostname == cluster_master
+  command: corosync-qdevice-net-certutil -r -n {{ cluster_name }}
+  args:
+    creates: /etc/corosync/qdevice/net/nssdb/qdevice-net-node.crq
+
+- name: retreive certificate request from master
+  when: inventory_hostname == cluster_master
+  register: cluster_csr
+  slurp:
+    path: /etc/corosync/qdevice/net/nssdb/qdevice-net-node.crq
+
+- name: save client certificate requests on qnet
+  when: inventory_hostname == cluster_qnet.inventory
+  copy:
+    dest: /tmp/cluster-{{ cluster_name }}.csr
+    content: "{{ hostvars[cluster_master]['cluster_csr']['content'] | b64decode }}"
+    mode: 0640
+
+- name: sign client certificate requests on qnet
+  when: inventory_hostname == cluster_qnet.inventory
+  command: corosync-qnetd-certutil -s -c /tmp/cluster-{{ cluster_name }}.csr -n {{ cluster_name }}
+  args:
+    creates: /etc/corosync/qnetd/nssdb/cluster-{{ cluster_name }}.crt
+
+- name: retreive signed certificate from qnet
+  when: inventory_hostname == cluster_qnet.inventory
+  register: cluster_cert
+  slurp:
+    path: /etc/corosync/qnetd/nssdb/cluster-{{ cluster_name }}.crt
+
+- name: save signed certificate on master
+  when: inventory_hostname == cluster_master
+  copy:
+    dest: /tmp/cluster-{{ cluster_name }}.crt
+    content: "{{ hostvars[cluster_qnet.inventory]['cluster_cert']['content'] | b64decode }}"
+    mode: 0640
+
+- name: import certificate and export pk12 on master
+  when: inventory_hostname == cluster_master
+  command: corosync-qdevice-net-certutil -M -c /tmp/cluster-{{ cluster_name }}.crt
+  args:
+    creates: /etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12
+
+- name: retreive pk12 from master
+  when: inventory_hostname == cluster_master
+  register: cluster_pk12
+  slurp:
+    path: /etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12
+
+- name: save pk12 on nodes
+  when: "'cluster_nodes' in group_names"
+  register: cluster_pk12_save
+  copy:
+    dest: /etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12
+    content: "{{ hostvars[groups['cluster_nodes'][0]]['cluster_pk12']['content'] | b64decode }}"
+    mode: 0600
+
+- name: import pk12 on nodes
+  when:
+    - "'cluster_nodes' in group_names"
+    - cluster_pk12_save is changed
+  notify: restart services
+  command: corosync-qdevice-net-certutil -m -c /etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12
+
+- name: clear nodes cache
+  when: inventory_hostname == cluster_master
+  shell: >
+    {% for item in cluster_nodes %}
+    pcs cluster node clear {{ item.hostname }} ;
+    {% endfor %}
+    touch /etc/corosync/.clear ;
+  args:
+    creates: /etc/corosync/.clear
+
+- name: auhtenticate cluster
+  when: inventory_hostname == cluster_master
+  shell: >
+    {% for item in cluster_nodes + [ cluster_qnet ] %}
+    pcs cluster auth --name {{ cluster_name }} {{ item.address }} -u hacluster -p {{ cluster_pcm_password }} ;
+    {% endfor %}
+    touch /etc/corosync/.auth ;
+  args:
+    creates: /etc/corosync/.auth
+
+- name: start cluster
+  when: inventory_hostname == cluster_master
+  shell: >
+    pcs cluster start --all ;
+    touch /etc/corosync/.start ;
+  args:
+    creates: /etc/corosync/.start
+
+- meta: flush_handlers
+
+- name: restart qdevice
+  when:
+    - "'cluster_nodes' in group_names"
+    - cluster_services_handler is changed
+  systemd:
+    name: corosync-qdevice
+    state: restarted
+
+## PACEMAKER
+
+# exporter config: pcs cluster cib clust_cfg
+# pcs property set stonith-enabled=false
+# pcs property set no-quorum-policy=stop
+# pcs resource defaults resource-stickiness=200
+# pcs resource create virtual_ip ocf:heartbeat:IPaddr2 ip={{ cluster_virtual_ip.address }} cidr_netmask={{ cluster_virtual_ip.netmask }} op monitor interval=30s
+# pcs resource create skyreach ...
+# pcs resource create celerity ...
+# pcs resource create wowza ...
+# pcs constraint colocation add skyreach virtual_ip INFINITY
+# pcs constraint colocation add celerity virtual_ip INFINITY
+# pcs constraint colocation add wowza virtual_ip INFINITY
+# pcs constraint order virtual_ip then skyreach
+# pcs constraint order virtual_ip then celerity
+# pcs constraint order virtual_ip then wowza
+
+## FIREWALL
+
+- name: firewall rules
+  when: cluster_fw_enabled
+  vars:
+    ferm_rules_filename: "{{ cluster_fw_filename }}"
+    ferm_input_rules: "{{ cluster_fw_input }}"
+    ferm_output_rules: "{{ cluster_fw_output }}"
+  include_role:
+    name: ferm
+
+...
diff --git a/roles/cluster/templates/corosync.conf.j2 b/roles/cluster/templates/corosync.conf.j2
new file mode 100644
index 00000000..a7368530
--- /dev/null
+++ b/roles/cluster/templates/corosync.conf.j2
@@ -0,0 +1,49 @@
+# Please read the corosync.conf.5 manual page
+totem {
+  version: 2
+  cluster_name: {{ cluster_name }}
+  # crypto_cipher: aes256
+  # crypto_hash: sha512
+  transport: udpu
+  interface {
+    ringnumber: 0
+    bindnetaddr: {{ cluster_netaddr }}
+    ttl: 1
+  }
+}
+
+logging {
+  logfile: /var/log/corosync/corosync.log
+  debug: off
+  logger_subsys {
+    subsys: QUORUM
+    debug: off
+  }
+  logger_subsys {
+    subsys: QDEVICE
+    debug: off
+  }
+}
+
+nodelist {
+{% for cluster_node in cluster_nodes %}
+  node {
+    ring0_addr: {{ cluster_node.address }}
+    name: {{ cluster_node.address }}
+    nodeid: {{ cluster_node.id }}
+  }
+{% endfor %}
+}
+
+quorum {
+  provider: corosync_votequorum
+  device {
+    votes: 1
+    model: net
+    net {
+      tls: on
+      algorithm: ffsplit
+      host: {{ cluster_qnet.address }}
+    }
+  }
+}
diff --git a/roles/conf/defaults/main.yml b/roles/conf/defaults/main.yml
new file mode 100644
index 00000000..9188e90d
--- /dev/null
+++ b/roles/conf/defaults/main.yml
@@ -0,0 +1,22 @@
+---
+
+conf_req_packages:
+  - ca-certificates
+  - git
+  - ssh-client
+
+conf_repo_url: https://mirismanager.ubicast.eu/git/mediaserver/envsetup.git
+conf_repo_version: stable
+conf_repo_dest: /root/envsetup
+
+conf_host: "{{ skyreach_host | default('panel.ubicast.eu', true) }}"
+conf_valid_cert: "{{ skyreach_valid_cert | default(true, true) }}"
+
+skyreach_activation_key: "{{ lookup('env', 'SKYREACH_ACTIVATION_KEY') }}"
+skyreach_system_key: "{{ lookup('env', 'SKYREACH_SYSTEM_KEY') }}"
+
+conf_update: false
+
+conf_debug: false
+
+...
diff --git a/roles/conf/tasks/main.yml b/roles/conf/tasks/main.yml
new file mode 100644
index 00000000..6667aa3c
--- /dev/null
+++ b/roles/conf/tasks/main.yml
@@ -0,0 +1,123 @@
+---
+
+- name: proxy
+  include_role:
+    name: proxy
+
+- name: install requirements
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ conf_req_packages }}"
+
+- name: clone envsetup repository
+  git:
+    repo: "{{ conf_repo_url }}"
+    version: "{{ conf_repo_version }}"
+    dest: "{{ conf_repo_dest }}"
+
+- name: generate root ssh key pair
+  register: conf_root
+  user:
+    name: root
+    generate_ssh_key: true
+    ssh_key_type: ed25519
+    ssh_key_file: .ssh/id_ed25519
+
+- name: check if auto-generated-conf.sh exists
+  check_mode: false
+  register: check_conf
+  stat:
+    path: "{{ conf_repo_dest }}/auto-generated-conf.sh"
+
+- name: check if conf.sh exists
+  check_mode: false
+  register: check_local_conf
+  stat:
+    path: "{{ conf_repo_dest }}/conf.sh"
+
+- name: download conf and update ssh public key with activation key
+  when: skyreach_activation_key | d(false)
+  register: conf_dl_ak
+  changed_when: conf_dl_ak.status == 200
+  failed_when:
+    - conf_dl_ak.status != 200
+    - not check_conf.stat.exists
+    - not skyreach_system_key
+  uri:
+    url: https://{{ conf_host }}/erp/credentials/envsetup-conf.sh
+    method: POST
+    body_format: form-urlencoded
+    body:
+      key: "{{ skyreach_activation_key }}"
+      public_key: "{{ conf_root.ssh_public_key }}"
+    return_content: true
+    validate_certs: "{{ conf_valid_cert }}"
+
+- name: download conf and update ssh public key with system key
+  when:
+    - not check_conf.stat.exists or conf_update
+    - skyreach_system_key | d(false)
+  register: conf_dl_sk
+  changed_when: conf_dl_sk.status == 200
+  failed_when:
+    - conf_dl_sk.status != 200
+    - not check_conf.stat.exists
+  uri:
+    url: https://{{ conf_host }}/erp/credentials/envsetup-conf.sh
+    method: POST
+    body_format: form-urlencoded
+    body:
+      api_key: "{{ skyreach_system_key }}"
+      public_key: "{{ conf_root.ssh_public_key }}"
+    return_content: true
+    validate_certs: "{{ conf_valid_cert }}"
+
+- name: save generated conf
+  loop:
+    - "{{ conf_dl_ak }}"
+    - "{{ conf_dl_sk }}"
+  when: item is changed
+  copy:
+    content: "{{ item.content }}"
+    dest: "{{ conf_repo_dest }}/auto-generated-conf.sh"
+    force: true
+    backup: true
+
+- name: touch local conf
+  file:
+    path: "{{ conf_repo_dest }}/conf.sh"
+    access_time: preserve
+    modification_time: preserve
+    state: touch
+
+- name: load global conf
+  changed_when: false
+  check_mode: false
+  source_file:
+    path: "{{ conf_repo_dest }}/global-conf.sh"
+    prefix: envsetup_
+    lower: true
+
+- name: load generated conf if exists
+  changed_when: false
+  check_mode: false
+  source_file:
+    path: "{{ conf_repo_dest }}/auto-generated-conf.sh"
+    prefix: envsetup_
+    lower: true
+
+- name: load local conf if exists
+  changed_when: false
+  check_mode: false
+  source_file:
+    path: "{{ conf_repo_dest }}/conf.sh"
+    prefix: envsetup_
+    lower: true
+
+- name: debug variables
+  when: conf_debug
+  debug:
+    var: ansible_facts
+
+...
diff --git a/roles/fail2ban/defaults/main.yml b/roles/fail2ban/defaults/main.yml
new file mode 100644
index 00000000..747f824b
--- /dev/null
+++ b/roles/fail2ban/defaults/main.yml
@@ -0,0 +1,27 @@
+---
+
+f2b_packages:
+  - fail2ban
+  - rsyslog
+
+f2b_enabled: "{% if envsetup_fail2ban_enabled | bool %}true{% else %}false{% endif %}"
+f2b_ignoreip: 127.0.0.1/8 ::1
+f2b_maxretry: "{{ envsetup_fail2ban_maxretry | default('6', true) }}"
+f2b_bantime: "{{ envsetup_fail2ban_bantime | default('30', true) }}"
+f2b_sender: "{{ envsetup_email_sender | default('root@localhost', true) }}"
+f2b_destemail: "{% if envsetup_fail2ban_dest_email is string %}{{ envsetup_fail2ban_dest_email }}{% else %}{{ envsetup_fail2ban_dest_email | join(',') }}{% endif %}"
+f2b_destemail_admins: "{% if envsetup_email_admins is string %}{{ envsetup_email_admins }}{% else %}{{ envsetup_email_admins | join(',') }}{% endif %}"
+f2b_action: "{% if envsetup_fail2ban_send_email | bool %}action_mwl{% else %}action_{% endif %}"
+
+f2b_filter:
+  name: sshd
+  content:
+
+f2b_jail:
+  name: sshd
+  content: |
+    [sshd]
+    enabled = {{ f2b_enabled }}
+    backend = systemd
+
+...
diff --git a/roles/fail2ban/handlers/main.yml b/roles/fail2ban/handlers/main.yml
new file mode 100644
index 00000000..83588db6
--- /dev/null
+++ b/roles/fail2ban/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+
+- name: restart fail2ban
+  systemd:
+    name: fail2ban
+    state: restarted
+
+...
diff --git a/roles/fail2ban/tasks/main.yml b/roles/fail2ban/tasks/main.yml
new file mode 100644
index 00000000..2ab46ee5
--- /dev/null
+++ b/roles/fail2ban/tasks/main.yml
@@ -0,0 +1,42 @@
+---
+
+- name: packages
+  apt:
+    force_apt_get: true
+    name: "{{ f2b_packages }}"
+    state: present
+
+- name: directories
+  loop:
+    - /etc/fail2ban/filter.d
+    - /etc/fail2ban/jail.d
+    - /etc/fail2ban/action.d
+  file:
+    path: "{{ item }}"
+    state: directory
+
+- name: jail defaults
+  notify: restart fail2ban
+  template:
+    src: jail.local.j2
+    dest: /etc/fail2ban/jail.local
+
+- name: filter
+  notify: restart fail2ban
+  copy:
+    dest: /etc/fail2ban/filter.d/{{ f2b_filter.name }}.local
+    content: "{{ f2b_filter.content }}"
+
+- name: jail
+  notify: restart fail2ban
+  copy:
+    dest: /etc/fail2ban/jail.d/{{ f2b_jail.name }}.local
+    content: "{{ f2b_jail.content }}"
+
+- name: service
+  systemd:
+    name: fail2ban
+    enabled: true
+    state: started
+
+...
diff --git a/roles/fail2ban/templates/jail.local.j2 b/roles/fail2ban/templates/jail.local.j2
new file mode 100644
index 00000000..71d2e303
--- /dev/null
+++ b/roles/fail2ban/templates/jail.local.j2
@@ -0,0 +1,8 @@
+[DEFAULT]
+
+ignoreip = {{ f2b_ignoreip }}
+bantime = {{ f2b_bantime }}
+maxretry = {{ f2b_maxretry }}
+destemail = {{ f2b_destemail | default(f2b_destemail_admins, true) }}
+sender = {{ f2b_sender }}
+action = %({{ f2b_action }})s
diff --git a/roles/ferm/defaults/main.yml b/roles/ferm/defaults/main.yml
new file mode 100644
index 00000000..f594f1ec
--- /dev/null
+++ b/roles/ferm/defaults/main.yml
@@ -0,0 +1,41 @@
+---
+
+# packages to install
+ferm_packages:
+  - ferm
+
+# default filtering and logging policy for input traffic
+ferm_input_policy: DROP
+ferm_input_log: true
+ferm_input_log_prefix: "{{ ferm_input_policy }} INPUT "
+
+# default filtering and logging for output traffic
+ferm_output_policy: ACCEPT
+ferm_output_log: false
+ferm_output_log_prefix: "{{ ferm_output_policy }} OUTPUT "
+
+# default filtering and logging for forward traffic
+ferm_forward_policy: DROP
+ferm_forward_log: true
+ferm_forward_log_prefix: "{{ ferm_forward_policy }} FORWARD "
+
+# filename into which rules will be written
+# /etc/ferm/{ferm|input|output|forward}.d/<filename>.conf
+ferm_rules_filename: default
+
+# enable anti-lockout rule
+ferm_antilockout_enabled: true
+
+# input rule
+ferm_input_rules: []
+
+# ouput rule
+ferm_output_rules: []
+
+# forward rule
+ferm_forward_rules: []
+
+# global settings to be put in ferm.d directory
+ferm_global_settings:
+
+...
diff --git a/roles/ferm/handlers/main.yml b/roles/ferm/handlers/main.yml
new file mode 100644
index 00000000..396bf92f
--- /dev/null
+++ b/roles/ferm/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+
+- name: reload systemd
+  systemd:
+    daemon_reload: true
+
+- name: restart ferm
+  systemd:
+    name: ferm
+    state: restarted
+
+...
diff --git a/roles/ferm/tasks/main.yml b/roles/ferm/tasks/main.yml
new file mode 100644
index 00000000..9223a58b
--- /dev/null
+++ b/roles/ferm/tasks/main.yml
@@ -0,0 +1,89 @@
+---
+
+- name: packages
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ ferm_packages }}"
+
+- name: configuration
+  notify: restart ferm
+  template:
+    src: ferm.conf.j2
+    dest: /etc/ferm/ferm.conf
+    backup: true
+
+- name: global
+  when: ferm_global_settings | d(false)
+  notify: restart ferm
+  copy:
+    dest: /etc/ferm/ferm.d/{{ ferm_rules_filename }}.conf
+    content: "{{ ferm_global_settings }}"
+
+- name: directories
+  loop:
+    - /etc/ferm/input.d
+    - /etc/ferm/output.d
+    - /etc/ferm/forward.d
+  file:
+    path: "{{ item }}"
+    state: directory
+
+- name: input
+  when: ferm_input_rules | length > 0
+  notify: restart ferm
+  copy:
+    dest: /etc/ferm/input.d/{{ ferm_rules_filename }}.conf
+    content: |
+      {% for rule in ferm_input_rules %}
+      {% if rule.mod is defined and rule.mod %}mod {{ rule.mod }} {% endif %}
+      {% if rule.helper is defined and rule.helper %}helper {{ rule.helper }} {% endif %}
+      {% if rule.saddr is defined and rule.saddr %}saddr @ipfilter(({{ rule.saddr | join(' ') }})) {% endif %}
+      {% if rule.daddr is defined and rule.daddr %}daddr @ipfilter(({{ rule.daddr | join(' ') }})) {% endif %}
+      {% if rule.proto is defined and rule.proto %}proto ({{ rule.proto | join(' ') }}) {% endif %}
+      {% if rule.dport is defined and rule.dport %}dport ({{ rule.dport | join(' ') }}) {% endif %}
+      {% if rule.sport is defined and rule.sport %}sport ({{ rule.sport | join(' ') }}) {% endif %}
+      {% if rule.policy is defined and rule.policy %}{{ rule.policy | upper }}{% else %}ACCEPT{% endif %};
+      {% endfor %}
+
+- name: output
+  when: ferm_output_rules | length > 0
+  notify: restart ferm
+  copy:
+    dest: /etc/ferm/output.d/{{ ferm_rules_filename }}.conf
+    content: |
+      {% for rule in ferm_output_rules %}
+      {% if rule.mod is defined and rule.mod %}mod {{ rule.mod }} {% endif %}
+      {% if rule.helper is defined and rule.helper %}helper {{ rule.helper }} {% endif %}
+      {% if rule.saddr is defined and rule.saddr %}saddr @ipfilter(({{ rule.saddr | join(' ') }})) {% endif %}
+      {% if rule.daddr is defined and rule.daddr %}daddr @ipfilter(({{ rule.daddr | join(' ') }})) {% endif %}
+      {% if rule.proto is defined and rule.proto %}proto ({{ rule.proto | join(' ') }}) {% endif %}
+      {% if rule.dport is defined and rule.dport %}dport ({{ rule.dport | join(' ') }}) {% endif %}
+      {% if rule.sport is defined and rule.sport %}sport ({{ rule.sport | join(' ') }}) {% endif %}
+      {% if rule.policy is defined and rule.policy %}{{ rule.policy | upper }}{% else %}ACCEPT{% endif %};
+      {% endfor %}
+
+- name: forward
+  when: ferm_forward_rules | length > 0
+  notify: restart ferm
+  copy:
+    dest: /etc/ferm/forward.d/{{ ferm_rules_filename }}.conf
+    content: |
+      {% for rule in ferm_forward_rules %}
+      {% if rule.mod is defined and rule.mod %}mod {{ rule.mod }} {% endif %}
+      {% if rule.helper is defined and rule.helper %}helper {{ rule.helper }} {% endif %}
+      {% if rule.saddr is defined and rule.saddr %}saddr @ipfilter(({{ rule.saddr | join(' ') }})) {% endif %}
+      {% if rule.daddr is defined and rule.daddr %}daddr @ipfilter(({{ rule.daddr | join(' ') }})) {% endif %}
+      {% if rule.proto is defined and rule.proto %}proto ({{ rule.proto | join(' ') }}) {% endif %}
+      {% if rule.dport is defined and rule.dport %}dport ({{ rule.dport | join(' ') }}) {% endif %}
+      {% if rule.sport is defined and rule.sport %}sport ({{ rule.sport | join(' ') }}) {% endif %}
+      {% if rule.policy is defined and rule.policy %}{{ rule.policy | upper }}{% else %}ACCEPT{% endif %};
+      {% endfor %}
+
+- name: service
+  systemd:
+    name: ferm
+    enabled: true
+    state: started
+
+...
diff --git a/roles/ferm/templates/ferm.conf.j2 b/roles/ferm/templates/ferm.conf.j2
new file mode 100644
index 00000000..219236db
--- /dev/null
+++ b/roles/ferm/templates/ferm.conf.j2
@@ -0,0 +1,73 @@
+# -*- shell-script -*-
+
+# include global rules
+@include 'ferm.d/';
+
+domain (ip ip6) {
+  table filter {
+    chain INPUT {
+        policy {{ ferm_input_policy | upper }};
+
+        # connection tracking
+        mod state state INVALID DROP;
+        mod state state (ESTABLISHED RELATED) ACCEPT;
+
+        # allow local connections
+        interface lo ACCEPT;
+
+        # allow ping
+        proto icmp ACCEPT;
+    {% if ferm_antilockout_enabled %}
+
+        # allow ssh, anti-lockout rule
+        proto tcp dport 22 ACCEPT;
+    {% endif %}
+
+        # include input rules
+        @include 'input.d/';
+    {% if ferm_input_log %}
+
+        # logging
+        LOG log-level warning log-prefix "{{ ferm_input_log_prefix }}";
+    {% endif %}
+    }
+
+    chain OUTPUT {
+        policy {{ ferm_output_policy | upper }};
+
+        # connection tracking
+        mod state state INVALID DROP;
+        mod state state (ESTABLISHED RELATED) ACCEPT;
+
+        # allow local connections
+        outerface lo ACCEPT;
+
+        # allow ping
+        proto icmp ACCEPT;
+
+        # include output rules
+        @include 'output.d/';
+    {% if ferm_output_log %}
+
+        # logging
+        LOG log-level warning log-prefix "{{ ferm_output_log_prefix }}";
+    {% endif %}
+    }
+
+    chain FORWARD {
+        policy {{ ferm_forward_policy | upper }};
+
+        # connection tracking
+        mod state state INVALID DROP;
+        mod state state (ESTABLISHED RELATED) ACCEPT;
+
+        # include forward rules
+        @include 'forward.d/';
+    {% if ferm_forward_log %}
+
+        # logging
+        LOG log-level warning log-prefix "{{ ferm_forward_log_prefix }}";
+    {% endif %}
+    }
+  }
+}
diff --git a/roles/import/defaults/main.yml b/roles/import/defaults/main.yml
new file mode 100644
index 00000000..e2fd626e
--- /dev/null
+++ b/roles/import/defaults/main.yml
@@ -0,0 +1,57 @@
+---
+
+import_users:
+  - name: "{{ envsetup_mediaimport_user | d() }}"
+    passwd: "{{ envsetup_mediaimport_passwd | d() }}"
+
+import_packages:
+  - clamav
+  - mysecureshell
+  - openssh-server
+  - openssl
+  - pure-ftpd
+  - python3-unidecode
+  - ubicast-mediaimport
+  # required by ansible tasks
+  - python3-openssl
+
+import_pureftpd_config:
+  - key: AllowDotFiles
+    value: "no"
+  - key: CallUploadScript
+    value: "yes"
+  - key: ChrootEveryone
+    value: "yes"
+  - key: DontResolve
+    value: "yes"
+  - key: PAMAuthentication
+    value: "yes"
+  - key: TLS
+    value: "1"
+
+import_virus_scan_on_upload: false
+
+import_ms_api_key: "{{ envsetup_ms_api_key | d() }}"
+import_ms_server_name: "{{ envsetup_ms_server_name | d() }}"
+
+import_fail2ban_enabled: "{{ envsetup_fail2ban_enabled | d(true) }}"
+import_f2b_jail:
+  name: pure-ftpd
+  content: |
+    [pure-ftpd]
+    enabled = {% if import_fail2ban_enabled | bool %}true{% else %}false{% endif %}
+
+import_firewall_enabled: true
+import_ferm_rules_filename: import
+import_ferm_input_rules:
+  - proto:
+      - tcp
+    dport:
+      - 21
+      - 22
+  - mod: helper
+    helper: ftp
+import_ferm_output_rules: []
+import_ferm_global_settings:
+
+...
diff --git a/10.MediaImport/2.Install_FTP_watch_folder/cron.d/mediaimport b/roles/import/files/mediaimport
similarity index 99%
rename from 10.MediaImport/2.Install_FTP_watch_folder/cron.d/mediaimport
rename to roles/import/files/mediaimport
index ddedd260..3294539e 100644
--- a/10.MediaImport/2.Install_FTP_watch_folder/cron.d/mediaimport
+++ b/roles/import/files/mediaimport
@@ -4,4 +4,3 @@
 # purge empty folders
 0 23 * * * root /usr/bin/find /home/ftp/storage -type d -empty -name thumbnails -delete
 0 23 * * * root /usr/bin/find /home/ftp/storage -type d -empty -name "*20*-*" -delete
-
diff --git a/roles/import/files/mediaimport.py b/roles/import/files/mediaimport.py
new file mode 100644
index 00000000..8b771af7
--- /dev/null
+++ b/roles/import/files/mediaimport.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python3
+
+import argparse
+import crypt
+import shutil
+import subprocess
+
+BASE_DIR = "/home/ftp/storage"
+INCOMING_DIR = BASE_DIR + "/incoming"
+WATCH_DIR = BASE_DIR + "/watchfolder"
+
+
+def main():
+    commands = MediaImport()
+
+    parser = argparse.ArgumentParser(prog="mediaimport", description=commands.__doc__)
+    subparsers = parser.add_subparsers(title="available commands", dest="command")
+    subparsers.required = True
+
+    # add command and arguments
+    parser_add = subparsers.add_parser("add", help=commands.add_user.__doc__)
+    parser_add.add_argument(
+        "-u",
+        "--user",
+        help="username",
+        action="store",
+        type=commands._new_user,
+        required=True,
+    )
+    parser_add.add_argument(
+        "-p", "--passwd", help="password", action="store", type=str, required=True
+    )
+    parser_add.add_argument(
+        "-y", "--yes", action="store_true", help="do not prompt for confirmation"
+    )
+    parser_add.set_defaults(func=commands.add_user)
+
+    # delete command and arguments
+    parser_del = subparsers.add_parser("delete", help=commands.del_user.__doc__)
+    parser_del.add_argument(
+        "-u",
+        "--user",
+        help="username",
+        action="store",
+        type=commands._user,
+        required=True,
+    )
+    parser_del.add_argument(
+        "-y", "--yes", action="store_true", help="do not prompt for confirmation"
+    )
+    parser_del.set_defaults(func=commands.del_user)
+
+    # list command and arguments
+    parser_list = subparsers.add_parser("list", help=commands.list_users.__doc__)
+    parser_list.set_defaults(func=commands.list_users)
+
+    # parse and run
+    args = parser.parse_args()
+    args.func(args)
+
+
+class MediaImport:
+    """Manage mediaimport users."""
+
+    def __init__(self):
+        self.users = self._get_users()
+
+    def _get_users(self) -> list:
+        """Get mysecureshell users list."""
+
+        with open("/etc/passwd") as fh:
+            passwd = fh.readlines()
+
+        return sorted(
+            [
+                u.split(":")[0]
+                for u in passwd
+                if u.split(":")[-1].strip() == "/usr/bin/mysecureshell"
+            ]
+        )
+
+    def _confirm(self, message: str = None):
+        """Ask for confirmation."""
+
+        if message:
+            print(message)
+        choice = input("Do you want to continue [y/N]? ").lower()
+
+        if choice not in ["y", "yes"]:
+            print("Exit.")
+            exit(0)
+
+    def _new_user(self, value: str) -> str:
+        """Check that username does not exist."""
+
+        if value in self.users:
+            raise argparse.ArgumentTypeError(f"{value} already exists")
+
+        return value
+
+    def _user(self, value: str) -> str:
+        """Check that username exists."""
+
+        if value not in self.users:
+            raise argparse.ArgumentTypeError(f"{value} does not exists")
+
+        return value
+
+    def add_user(self, args: argparse.Namespace):
+        """add an user"""
+
+        username = args.user
+        password = args.passwd
+
+        if not args.yes:
+            self._confirm(f"MediaImport user '{username}' will be created.")
+
+        # create user
+        subprocess.Popen(
+            [
+                "useradd",
+                "-b",
+                INCOMING_DIR,
+                "-m",
+                "-p",
+                crypt.crypt(password),
+                "-s",
+                "/usr/bin/mysecureshell",
+                "-U",
+                username,
+            ],
+            stdout=subprocess.DEVNULL,
+        )
+
+    def del_user(self, args: argparse.Namespace):
+        """delete an user"""
+
+        username = args.user
+        paths = [f"{INCOMING_DIR}/{username}", f"{WATCH_DIR}/{username}"]
+
+        if not args.yes:
+            self._confirm(f"MediaImport user '{username}' data will be deleted.")
+
+        # remove user
+        subprocess.Popen(
+            ["userdel", "-f", "-r", username],
+            stdout=subprocess.DEVNULL,
+            stderr=subprocess.DEVNULL,
+        )
+
+        # remove user's folders
+        for path in paths:
+            shutil.rmtree(path, ignore_errors=True)
+
+    def list_users(self, args: argparse.Namespace):
+        """list existing users"""
+
+        if len(self.users):
+            print("\n".join(self.users))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/roles/import/files/on-upload b/roles/import/files/on-upload
new file mode 100755
index 00000000..d5c8ff20
--- /dev/null
+++ b/roles/import/files/on-upload
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b89bef126027d77d064e3b39d7b2046a1d3f16067bdc15dad9b5307f668c046
+size 3522177
diff --git a/roles/import/files/on-upload.go b/roles/import/files/on-upload.go
new file mode 100644
index 00000000..b4d27ecf
--- /dev/null
+++ b/roles/import/files/on-upload.go
@@ -0,0 +1,141 @@
+package main
+
+import (
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+	"unicode"
+
+	"github.com/jessevdk/go-flags"
+	"golang.org/x/text/transform"
+	"golang.org/x/text/unicode/norm"
+)
+
+const (
+	baseDir       = "/home/ftp/storage"
+	incomingDir   = baseDir + "/incoming"
+	watchDir      = baseDir + "/watchfolder"
+	quarantineDir = baseDir + "/quarantine"
+)
+
+func setPermissions(path string) error {
+	stat, err := os.Stat(path)
+	if err != nil {
+		return err
+	}
+
+	switch mode := stat.Mode(); {
+	case mode.IsDir():
+		if err := os.Chmod(path, 0755); err != nil {
+			return err
+		}
+	case mode.IsRegular():
+		if err := os.Chmod(path, 0644); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func cleanName(filename string) string {
+	// normalize
+	isMn := func(r rune) bool {
+		return unicode.Is(unicode.Mn, r)
+	}
+	t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
+	cleanedName, _, _ := transform.String(t, filename)
+
+	// replace non allowed characters
+	allowedChars := strings.Split("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-.", "")
+	for _, filenameChar := range strings.Split(cleanedName, "") {
+		flagged := false
+		for _, allowedChar := range allowedChars {
+			if filenameChar == allowedChar {
+				flagged = true
+			}
+		}
+		// if not in allowed list replace by underscore
+		if !flagged {
+			cleanedName = strings.Replace(cleanedName, filenameChar, "_", 1)
+		}
+	}
+
+	return cleanedName
+}
+
+func virusScan(path string) error {
+	// will move file into quarantine directory if infected
+	cmd := exec.Command(
+		"/usr/bin/clamscan",
+		"--quiet",
+		"--infected",
+		"--recursive",
+		"--move="+quarantineDir,
+		"--max-scantime=600000", // 10 minutes
+		"--max-filesize=4000M",
+		"--max-scansize=4000M",
+		"--max-files=200",
+		"--max-recursion=6",
+		"--max-dir-recursion=6",
+		path,
+	)
+	err := cmd.Run()
+
+	return err
+}
+
+func main() {
+	var opts struct {
+		Scan bool `short:"s" long:"scan-virus" description:"Scan file for virus"`
+		Args struct {
+			SrcPaths []string `positional-arg-name:"path" required:"yes" description:"Paths of uploaded files"`
+		} `positional-args:"yes"`
+	}
+
+	if _, err := flags.Parse(&opts); err != nil {
+		os.Exit(1)
+	}
+
+	for _, srcPath := range opts.Args.SrcPaths {
+		// check that file is into incoming folder
+		if !strings.HasPrefix(srcPath, baseDir) {
+			log.Fatalln("file not in base dir (" + baseDir + "): " + srcPath)
+		}
+
+		// ensure permissions are correct
+		if err := setPermissions(srcPath); err != nil {
+			log.Fatalln(err)
+		}
+
+		// scan for virus if enabled
+		if opts.Scan {
+			if err := os.MkdirAll(quarantineDir, 0775); err != nil {
+				log.Fatalln(err)
+			}
+			if err := virusScan(srcPath); err != nil {
+				log.Fatalln(err)
+			}
+		}
+
+		// cleanup and set destination path
+		srcDir, srcFile := filepath.Split(srcPath)
+		dstFile := cleanName(srcFile)
+		dstDir := strings.ReplaceAll(srcDir, incomingDir, watchDir)
+		dstPath := dstDir + dstFile
+
+		// create destination directory
+		if err := os.MkdirAll(dstDir, 0775); err != nil {
+			log.Fatalln(err)
+		}
+
+		// move file into watchfolder
+		if err := os.Rename(srcPath, dstPath); err != nil {
+			log.Fatalln(err)
+		}
+
+		log.Println(srcPath + " moved to " + dstPath)
+	}
+}
diff --git a/roles/import/handlers/main.yml b/roles/import/handlers/main.yml
new file mode 100644
index 00000000..d68cd1e7
--- /dev/null
+++ b/roles/import/handlers/main.yml
@@ -0,0 +1,26 @@
+---
+
+- name: reload systemd
+  systemd:
+    daemon_reload: true
+
+- name: restart pure-ftpd
+  systemd:
+    name: pure-ftpd
+    state: restarted
+
+- name: restart mysecureshell
+  systemd:
+    name: mysecureshell
+    state: restarted
+
+- name: restart mediaimport
+  systemd:
+    name: mediaimport
+    state: restarted
+
+- name: sftp-verif
+  command:
+    cmd: timeout 30 sftp-verif
+
+...
diff --git a/roles/import/tasks/main.yml b/roles/import/tasks/main.yml
new file mode 100644
index 00000000..8dbac10c
--- /dev/null
+++ b/roles/import/tasks/main.yml
@@ -0,0 +1,177 @@
+---
+
+- name: install packages
+  package:
+    force_apt_get: true
+    name: "{{ import_packages }}"
+    state: present
+
+## USERS
+
+- name: create ftp folders
+  loop:
+    - /home/ftp/storage/incoming
+    - /home/ftp/storage/watchfolder
+  file:
+    path: "{{ item }}"
+    state: directory
+
+- name: deploy users management script
+  copy:
+    src: files/mediaimport.py
+    dest: /usr/local/bin/mediaimport
+    mode: 0755
+
+- name: create users
+  loop: "{{ import_users }}"
+  when:
+    - item.name | d(false)
+    - item.passwd | d(false)
+  no_log: true
+  command: mediaimport add --yes --user {{ item.name }} --passwd {{ item.passwd }}
+  args:
+    creates: /home/ftp/storage/incoming/{{ item.name }}
+
+- name: deploy on-upload script with setuid
+  copy:
+    src: files/on-upload
+    dest: /home/ftp/on-upload
+    mode: 04755
+
+## MYSECURESHELL
+
+- name: set the setuid on mysecureshell
+  file:
+    path: /usr/bin/mysecureshell
+    mode: 04755
+
+- name: configure mysecureshell
+  notify:
+    - restart mysecureshell
+    - sftp-verif
+  template:
+    src: sftp_config.j2
+    dest: /etc/ssh/sftp_config
+
+## PURE-FTPD
+
+- name: set pure-ftpd default config
+  notify: restart pure-ftpd
+  copy:
+    dest: /etc/default/pure-ftpd-common
+    content: |
+      STANDALONE_OR_INETD=standalone
+      VIRTUALCHROOT=false
+      UPLOADSCRIPT="/home/ftp/on-upload{% if import_virus_scan_on_upload %} --scan-virus{% endif %}"
+      UPLOADUID=0
+      UPLOADGID=0
+
+- name: configure pure-ftpd
+  notify: restart pure-ftpd
+  loop: "{{ import_pureftpd_config }}"
+  copy:
+    dest: /etc/pure-ftpd/conf/{{ item.key }}
+    content: "{{ item.value }}"
+
+## PURE-FTPD CERTIFICATES
+
+- name: create certificate directory
+  file:
+    path: /etc/ssl/{{ ansible_fqdn }}
+    state: directory
+
+- name: generate an private key
+  register: import_privkey
+  openssl_privatekey:
+    path: /etc/ssl/{{ ansible_fqdn }}/key.pem
+
+- name: generate an csr
+  when: import_privkey is changed
+  register: import_csr
+  openssl_csr:
+    path: /etc/ssl/{{ ansible_fqdn }}/csr.pem
+    privatekey_path: /etc/ssl/{{ ansible_fqdn }}/key.pem
+    common_name: "{{ ansible_fqdn }}"
+
+- name: generate a self-signed certificate
+  when: import_csr is changed
+  register: import_cert
+  openssl_certificate:
+    path: /etc/ssl/{{ ansible_fqdn }}/cert.pem
+    privatekey_path: /etc/ssl/{{ ansible_fqdn }}/key.pem
+    csr_path: /etc/ssl/{{ ansible_fqdn }}/csr.pem
+    provider: selfsigned
+
+- name: concatenate key and certificate
+  when: import_cert is changed
+  notify: restart pure-ftpd
+  shell: >
+    cat /etc/ssl/{{ ansible_fqdn }}/key.pem /etc/ssl/{{ ansible_fqdn }}/cert.pem > /etc/ssl/private/pure-ftpd.pem;
+    chmod 600 /etc/ssl/private/pure-ftpd.pem;
+
+- name: generate dhparams
+  notify: restart pure-ftpd
+  openssl_dhparam:
+    path: /etc/ssl/private/pure-ftpd-dhparams.pem
+    size: 1024
+
+## MEDIAIMPORT
+
+- name: configure mediaimport
+  when:
+    - import_ms_api_key | d(false)
+    - import_ms_server_name | d(false)
+  notify: restart mediaimport
+  template:
+    src: mediaimport.json.j2
+    dest: /etc/mediaserver/mediaimport.json
+    backup: true
+    mode: 0640
+
+- name: mediaimport service
+  systemd:
+    name: mediaimport
+    enabled: true
+    state: started
+
+- name: setup cron job
+  copy:
+    src: files/mediaimport
+    dest: /etc/cron.d/mediaimport
+
+- name: create service override directory
+  file:
+    path: /etc/systemd/system/mediaimport.service.d
+    state: directory
+
+- name: create service override file
+  notify: reload systemd
+  copy:
+    dest: /etc/systemd/system/mediaimport.service.d/override.conf
+    content: |
+      [Service]
+      User=root
+      Group=root
+
+# FAIL2BAN
+
+- name: fail2ban
+  when: import_fail2ban_enabled
+  vars:
+    f2b_jail: "{{ import_f2b_jail }}"
+  include_role:
+    name: fail2ban
+
+# FIREWALL
+
+- name: firewall
+  when: import_firewall_enabled
+  vars:
+    ferm_rules_filename: "{{ import_ferm_rules_filename }}"
+    ferm_input_rules: "{{ import_ferm_input_rules }}"
+    ferm_output_rules: "{{ import_ferm_output_rules }}"
+    ferm_global_settings: "{{ import_ferm_global_settings }}"
+  include_role:
+    name: ferm
+
+...
diff --git a/roles/import/templates/mediaimport.json.j2 b/roles/import/templates/mediaimport.json.j2
new file mode 100644
index 00000000..d7ba4072
--- /dev/null
+++ b/roles/import/templates/mediaimport.json.j2
@@ -0,0 +1,15 @@
+{
+  "email_to": "support-team@ubicast.eu",
+  "users": [{% for user in import_users %}
+    {
+      "enabled": true,
+      "mediaserver_api_key": "{{ import_ms_api_key }}",
+      "mediaserver_url": "https://{{ import_ms_server_name }}",
+      "folders": [
+        {
+          "path": "/home/ftp/storage/watchfolder/{{ user.name }}"
+        }
+      ]
+    }{% if not loop.last %},{% endif %}
+  {% endfor %}]
+}
diff --git a/roles/import/templates/sftp_config.j2 b/roles/import/templates/sftp_config.j2
new file mode 100644
index 00000000..959abf37
--- /dev/null
+++ b/roles/import/templates/sftp_config.j2
@@ -0,0 +1,26 @@
+## MySecureShell Configuration File
+# To get more informations on all possible options, please look at the doc:
+# http://mysecureshell.readthedocs.org
+
+#Default rules for everybody
+<Default>
+        GlobalDownload          50k
+        GlobalUpload            0
+        Download                5k
+        Upload                  0
+        StayAtHome              true
+        VirtualChroot           true
+        LimitConnection         100
+        LimitConnectionByUser   2
+        LimitConnectionByIP     10
+        Home                    /home/ftp/storage/incoming/$USER
+        CallbackUpload          "/home/ftp/on-upload{% if import_virus_scan_on_upload %} --scan-virus{% endif %} /home/ftp/storage/incoming/$USER$LAST_FILE_PATH"
+        IdleTimeOut             5m
+        ResolveIP               false
+        HideNoAccess            true
+        DefaultRights           0640 0750
+        ShowLinksAsLinks        false
+        LogFile                 /var/log/sftp-server.log
+        LogLevel                6
+        LogSyslog               true
+</Default>
diff --git a/roles/init/defaults/main.yml b/roles/init/defaults/main.yml
new file mode 100644
index 00000000..784cc1cc
--- /dev/null
+++ b/roles/init/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+
+init_packages:
+  - apt-utils
+  - gnupg
+  - ssh-client
+
+...
diff --git a/roles/init/tasks/main.yml b/roles/init/tasks/main.yml
new file mode 100644
index 00000000..633d01db
--- /dev/null
+++ b/roles/init/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+
+- name: install initial packages
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ init_packages }}"
+
+- name: configure proxy
+  when: proxy_when is not defined or proxy_when != "end"
+  include_role:
+    name: proxy
+    allow_duplicates: true
+
+...
diff --git a/roles/letsencrypt/defaults/main.yml b/roles/letsencrypt/defaults/main.yml
new file mode 100644
index 00000000..5e268b2a
--- /dev/null
+++ b/roles/letsencrypt/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+
+letsencrypt_domains: []
+letsencrypt_webroot: /tmp/letsencrypt
+letsencrypt_email: sysadmin@ubicast.eu
+letsencrypt_testing: false
+
+...
diff --git a/roles/letsencrypt/handlers/main.yml b/roles/letsencrypt/handlers/main.yml
new file mode 100644
index 00000000..38fab58a
--- /dev/null
+++ b/roles/letsencrypt/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+
+- name: restart nginx
+  service:
+    name: nginx
+    state: restarted
+
+...
diff --git a/roles/letsencrypt/tasks/main.yml b/roles/letsencrypt/tasks/main.yml
new file mode 100644
index 00000000..a1dedd4a
--- /dev/null
+++ b/roles/letsencrypt/tasks/main.yml
@@ -0,0 +1,123 @@
+---
+
+- name: install certbot
+  package:
+    force_apt_get: true
+    install_recommends: false
+    name: certbot
+    state: present
+
+- name: get all server_name values
+  when: letsencrypt_domains == []
+  register: letsencryt_nginx_output
+  shell: |
+    set -o pipefail
+    nginx -T 2>&1 | grep -v localhost | grep -P '^\s+server_name\s+.*;$' | sed -r 's/\s+server_name\s+(.*);/\1/' | uniq
+  args:
+    executable: /bin/bash
+  changed_when: false
+
+- name: save result as list
+  when: letsencrypt_domains == []
+  set_fact:
+    letsencrypt_domains: "{{ letsencryt_nginx_output.stdout.split() }}"
+
+- name: save domains list in a file
+  register: letsencrypt_save_list
+  copy:
+    dest: /etc/letsencrypt/domains.txt
+    content: |
+      {% for domain in letsencrypt_domains %}
+      {{ domain }}
+      {% endfor %}
+
+- name: create webroot directory
+  file:
+    path: "{{ letsencrypt_webroot }}"
+    state: directory
+
+- name: create renewal hook directory
+  file:
+    path: /etc/letsencrypt/renewal-hooks/deploy
+    state: directory
+
+- name: create pre hook script
+  copy:
+    dest: /etc/letsencrypt/renewal-hooks/pre/mkdir
+    mode: 0755
+    content: |
+      #!/usr/bin/env bash
+      CERTBOT_DOCROOT=/tmp/letsencrypt
+      mkdir -p "$CERTBOT_DOCROOT"
+      chmod 755 "$CERTBOT_DOCROOT"
+
+- name: create deploy hook script
+  copy:
+    dest: /etc/letsencrypt/renewal-hooks/deploy/nginx
+    mode: 0755
+    content: |
+      #!/usr/bin/env bash
+      nginx -t > /dev/null 2>&1
+      systemctl reload nginx
+
+- name: test generate certificates
+  when:
+    - letsencrypt_domains != []
+    - letsencrypt_save_list is changed
+  register: letsencrypt_dry_run
+  ignore_errors: true
+  command: |
+    certbot certonly \
+      --dry-run \
+      -n --agree-tos -m {{ letsencrypt_email }} \
+      --webroot -w {{ letsencrypt_webroot }} \
+      --expand \
+      -d {{ letsencrypt_domains | join(',') }}
+
+- name: remove domains list file in case of failure
+  when: letsencrypt_dry_run is failed
+  file:
+    path: "{{ letsencrypt_save_list.dest }}"
+    state: absent
+
+- name: exit in case of failure
+  when: letsencrypt_dry_run is failed
+  fail:
+
+- name: generate certificates
+  notify: restart nginx
+  when:
+    - letsencrypt_domains != []
+    - letsencrypt_save_list is changed
+    - letsencrypt_dry_run is succeeded
+  command: |
+    certbot certonly \
+      {% if letsencrypt_testing %}--staging{% endif %} \
+      -n --agree-tos -m {{ letsencrypt_email }} \
+      --webroot -w {{ letsencrypt_webroot }} \
+      --expand \
+      -d {{ letsencrypt_domains | join(',') }}
+
+- name: update nginx certificate configuration
+  when:
+    - letsencrypt_domains != []
+    - letsencrypt_save_list is changed
+    - letsencrypt_dry_run is succeeded
+  notify: restart nginx
+  lineinfile:
+    path: /etc/nginx/conf.d/ssl_certificate.conf
+    regexp: 'ssl_certificate\s+([\w/\-\_\.]+);'
+    line: 'ssl_certificate /etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/fullchain.pem;'
+
+- name: update nginx certificate key configuration
+  when:
+    - letsencrypt_domains != []
+    - letsencrypt_save_list is changed
+    - letsencrypt_dry_run is succeeded
+  notify: restart nginx
+  lineinfile:
+    path: /etc/nginx/conf.d/ssl_certificate.conf
+    regexp: 'ssl_certificate_key\s+([\w/\-\_\.]+);'
+    line: 'ssl_certificate_key /etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/privkey.pem;'
+
+...
diff --git a/roles/locale/defaults/main.yml b/roles/locale/defaults/main.yml
new file mode 100644
index 00000000..984fafa8
--- /dev/null
+++ b/roles/locale/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+
+locale_packages:
+  - locales
+  - tzdata
+
+init_locale: "{{ envsetup_locale | d('C.UTF-8', true) }}"
+
+init_timezone: "{{ envsetup_timezone | d('Etc/UTC', true) }}"
+
+...
diff --git a/roles/locale/handlers/main.yml b/roles/locale/handlers/main.yml
new file mode 100644
index 00000000..f8d52515
--- /dev/null
+++ b/roles/locale/handlers/main.yml
@@ -0,0 +1,11 @@
+---
+
+- name: update locale
+  command: locale-gen
+
+- name: restart cron
+  service:
+    name: cron
+    state: restarted
+
+...
diff --git a/roles/locale/tasks/main.yml b/roles/locale/tasks/main.yml
new file mode 100644
index 00000000..e40deb53
--- /dev/null
+++ b/roles/locale/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+
+- name: install locale packages
+  apt:
+    force_apt_get: true
+    name: "{{ locale_packages }}"
+
+- name: generate locale
+  locale_gen:
+    name: "{{ init_locale }}"
+
+- name: set locale
+  notify: update locale
+  copy:
+    dest: /etc/default/locale
+    content: |
+      LANG="{{ init_locale }}"
+      LANGUAGE="{{ init_locale }}"
+      LC_ALL="{{ init_locale }}"
+
+- name: set locale.gen
+  notify: update locale
+  lineinfile:
+    path: /etc/locale.gen
+    regexp: '^(?:# )?({{ init_locale }}.*)$'
+    backrefs: true
+    line: '\1'
+
+- name: set timezone
+  notify: restart cron
+  timezone:
+    name: "{{ init_timezone }}"
+
+...
diff --git a/roles/manager/defaults/main.yml b/roles/manager/defaults/main.yml
new file mode 100644
index 00000000..1bad1e9c
--- /dev/null
+++ b/roles/manager/defaults/main.yml
@@ -0,0 +1,47 @@
+---
+
+manager_packages:
+  - ubicast-skyreach
+  - ubicast-skyreach-runtime
+
+manager_testing: false
+manager_mail: dev-mediaserver@ubicast.eu
+manager_hostname: "{{ envsetup_cm_server_name }}"
+manager_default_email_sender: "noreply@{{ manager_hostname }}"
+manager_email_sender: "{{ envsetup_email_sender | default(manager_default_email_sender, true) }}"
+manager_proxy_http: "{{ envsetup_proxy_http }}"
+
+manager_fail2ban_enabled: "{{ envsetup_fail2ban_enabled | d(true) }}"
+manager_f2b_filter:
+  name: manager
+  content: |
+    [INCLUDES]
+    before = common.conf
+    [Definition]
+    failregex = INFO Wrong credentials given to login\. IP: <HOST>, username: \S+\.$
+                INFO Wrong crendentials given to login\. IP: <HOST>, username: \S+\.$
+    ignoreregex =
+manager_f2b_jail:
+  name: manager
+  content: |
+    [manager]
+    logpath = /home/skyreach/.skyreach/logs/skyreach.log
+    enabled = {% if manager_fail2ban_enabled | bool %}true{% else %}false{% endif %}
+
+manager_firewall_enabled: true
+manager_ferm_rules_filename: manager
+manager_ferm_input_rules:
+  - proto:
+      - tcp
+    dport:
+      - 80
+      - 443
+  - saddr: "{{ groups['all'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list }}"
+    proto:
+      - tcp
+    dport:
+      - 3142
+manager_ferm_output_rules: []
+manager_ferm_global_settings:
+
+...
diff --git a/roles/manager/files/set_site_url.py b/roles/manager/files/set_site_url.py
new file mode 100644
index 00000000..e72283ac
--- /dev/null
+++ b/roles/manager/files/set_site_url.py
@@ -0,0 +1,27 @@
+#!/usr/in/env python3
+
+import argparse
+
+import django
+
+django.setup()
+
+from skyreach_site.base.models import SiteSettings
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("url", help="URL of the Miris Manager", type=str)
+    args = parser.parse_args()
+
+    ss = SiteSettings.get_singleton()
+    ss.url = "https://{}".format(args.url)
+    ss.save()
+
+    path = "/home/skyreach/{}.log".format(args.url)
+    with open(path, "w") as flag:
+        flag.write("ok")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/roles/manager/handlers/main.yml b/roles/manager/handlers/main.yml
new file mode 100644
index 00000000..28b894a3
--- /dev/null
+++ b/roles/manager/handlers/main.yml
@@ -0,0 +1,13 @@
+---
+
+- name: restart nginx
+  service:
+    name: nginx
+    state: restarted
+
+- name: restart apt-cacher-ng
+  service:
+    name: apt-cacher-ng
+    state: restarted
+
+...
diff --git a/roles/manager/tasks/main.yml b/roles/manager/tasks/main.yml
new file mode 100644
index 00000000..4c1973bd
--- /dev/null
+++ b/roles/manager/tasks/main.yml
@@ -0,0 +1,93 @@
+---
+
+- name: mirismanager install
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ manager_packages }}"
+
+- name: configure email sender address
+  notify: restart nginx
+  lineinfile:
+    path: /home/skyreach/htdocs/skyreach_site/settings_override.py
+    regexp: '^#? ?DEFAULT_FROM_EMAIL.*'
+    line: "DEFAULT_FROM_EMAIL = '{{ manager_email_sender }}'"
+    backup: true
+
+- name: configure domain name in nginx conf
+  notify: restart nginx
+  replace:
+    path: /etc/nginx/sites-available/skyreach.conf
+    regexp: '^(\s*server_name).*;$'
+    replace: '\1 {{ manager_hostname }};'
+    backup: true
+
+- name: configure domain name in database
+  become: true
+  become_user: skyreach
+  script: files/set_site_url.py {{ manager_hostname }}
+  environment:
+    PYTHONPATH: "/home/skyreach/htdocs/skyreach_site:/home/skyreach/htdocs:${PYTHONPATH}"
+    DJANGO_SETTINGS_MODULE: settings
+  args:
+    executable: python3
+    creates: /home/skyreach/.{{ manager_hostname }}.log
+
+- name: resolve domain name to localhost ipv4
+  when: not in_docker
+  notify: restart nginx
+  lineinfile:
+    path: /etc/hosts
+    line: '127.0.1.1 {{ manager_hostname }}'
+    backup: true
+
+- name: ensure skyreach is running
+  service:
+    name: skyreach
+    enabled: true
+    state: started
+
+- name: check apt cacher ng config exists
+  register: manager_apt_cacher_conf
+  stat:
+    path: /etc/apt-cacher-ng/acng.conf
+
+- name: configure apt-cacher-ng
+  when:
+    - manager_apt_cacher_conf.stat.exists
+    - manager_proxy_http | d(false)
+  notify: restart apt-cacher-ng
+  lineinfile:
+    path: /etc/apt-cacher-ng/acng.conf
+    regexp: '^Proxy: .*'
+    line: 'Proxy: {{ manager_proxy_http }}'
+
+- name: ensure apt-cacher-ng is running
+  service:
+    name: apt-cacher-ng
+    enabled: true
+    state: started
+
+# FAIL2BAN
+
+- name: fail2ban
+  when: manager_fail2ban_enabled
+  vars:
+    f2b_filter: "{{ manager_f2b_filter }}"
+    f2b_jail: "{{ manager_f2b_jail }}"
+  include_role:
+    name: fail2ban
+
+# FIREWALL
+
+- name: firewall
+  when: manager_firewall_enabled
+  vars:
+    ferm_rules_filename: "{{ manager_ferm_rules_filename }}"
+    ferm_input_rules: "{{ manager_ferm_input_rules }}"
+    ferm_output_rules: "{{ manager_ferm_output_rules }}"
+    ferm_global_settings: "{{ manager_ferm_global_settings }}"
+  include_role:
+    name: ferm
+
+...
diff --git a/roles/monitor/defaults/main.yml b/roles/monitor/defaults/main.yml
new file mode 100644
index 00000000..fb6cb483
--- /dev/null
+++ b/roles/monitor/defaults/main.yml
@@ -0,0 +1,38 @@
+---
+
+monitor_packages:
+  - ubicast-monitor
+  - ubicast-monitor-runtime
+
+monitor_shell_pwd: "{{ envsetup_monitor_shell_pwd }}"
+monitor_hostname: "{{ envsetup_monitor_server_name }}"
+
+monitor_fail2ban_enabled: "{{ envsetup_fail2ban_enabled | d(true) }}"
+monitor_f2b_filter:
+  name: monitor
+  content: |
+    [INCLUDES]
+    before = common.conf
+    [Definition]
+    failregex = INFO Wrong credentials given to login\. IP: <HOST>, username: \S+\.$
+                INFO Wrong crendentials given to login\. IP: <HOST>, username: \S+\.$
+    ignoreregex =
+monitor_f2b_jail:
+  name: monitor
+  content: |
+    [monitor]
+    logpath = /home/msmonitor/msmonitor/logs/site.log
+    enabled = {% if monitor_fail2ban_enabled | bool %}true{% else %}false{% endif %}
+
+monitor_firewall_enabled: true
+monitor_ferm_rules_filename: monitor
+monitor_ferm_input_rules:
+  - proto:
+      - tcp
+    dport:
+      - 80
+      - 443
+monitor_ferm_output_rules: []
+monitor_ferm_global_settings:
+
+...
diff --git a/roles/monitor/handlers/main.yml b/roles/monitor/handlers/main.yml
new file mode 100644
index 00000000..38fab58a
--- /dev/null
+++ b/roles/monitor/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+
+- name: restart nginx
+  service:
+    name: nginx
+    state: restarted
+
+...
diff --git a/roles/monitor/tasks/main.yml b/roles/monitor/tasks/main.yml
new file mode 100644
index 00000000..82b01504
--- /dev/null
+++ b/roles/monitor/tasks/main.yml
@@ -0,0 +1,70 @@
+---
+
+- name: munin install
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: ubicast-config
+
+- name: monitor install
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ monitor_packages }}"
+
+- name: set msmonitor account password
+  user:
+    name: msmonitor
+    password: "{{ monitor_shell_pwd | password_hash('sha512', 'monitor') }}"
+
+- name: configure domain name in nginx conf
+  notify: restart nginx
+  replace:
+    path: /etc/nginx/sites-available/msmonitor.conf
+    regexp: '^(\s*server_name).*;$'
+    replace: '\1 {{ monitor_hostname }};'
+    backup: true
+
+- name: resolve domain name to localhost ipv4
+  when: not in_docker
+  notify: restart nginx
+  lineinfile:
+    path: /etc/hosts
+    line: '127.0.1.1 {{ monitor_hostname }}'
+    backup: true
+
+- name: ensure monitor is running
+  service:
+    name: msmonitor
+    enabled: true
+    state: started
+
+- name: fix directory permissions
+  file:
+    path: /home/msmonitor/msmonitor
+    mode: 0755
+    state: directory
+
+# FAIL2BAN
+
+- name: fail2ban
+  when: monitor_fail2ban_enabled
+  vars:
+    f2b_filter: "{{ monitor_f2b_filter }}"
+    f2b_jail: "{{ monitor_f2b_jail }}"
+  include_role:
+    name: fail2ban
+
+# FIREWALL
+
+- name: firewall
+  when: monitor_firewall_enabled
+  vars:
+    ferm_rules_filename: "{{ monitor_ferm_rules_filename }}"
+    ferm_input_rules: "{{ monitor_ferm_input_rules }}"
+    ferm_output_rules: "{{ monitor_ferm_output_rules }}"
+    ferm_global_settings: "{{ monitor_ferm_global_settings }}"
+  include_role:
+    name: ferm
+
+...
diff --git a/roles/netcapture/defaults/main.yml b/roles/netcapture/defaults/main.yml
new file mode 100644
index 00000000..d2de6931
--- /dev/null
+++ b/roles/netcapture/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+
+netcapture_registry_host: registry.ubicast.eu
+netcapture_registry_login: "{{ envsetup_netcapture_docker_login }}"
+netcapture_registry_password: "{{ envsetup_netcapture_docker_pwd }}"
+netcapture_cm_url: "https://{{ envsetup_cm_server_name | default('https://mirismanager.ubicast.eu', true) }}"
+netcapture_check_ssl: true
+netcapture_conf_folder: /etc/miris/conf
+netcapture_media_folder: /data/netcapture/media
+netcapture_hw_acceleration: false
+netcapture_miris_user_pwd: "{{ lookup('password', '/tmp/passwordfile length=12 chars=ascii_letters,digits') }}"
+netcapture_miris_auth: true
+
+...
diff --git a/roles/netcapture/tasks/main.yml b/roles/netcapture/tasks/main.yml
new file mode 100644
index 00000000..04adc331
--- /dev/null
+++ b/roles/netcapture/tasks/main.yml
@@ -0,0 +1,73 @@
+---
+
+- name: requirements install
+  apt:
+    force_apt_get: true
+    name:
+      - apt-transport-https
+      - ca-certificates
+      - curl
+      - gnupg-agent
+      - lsb-release
+      - software-properties-common
+    state: present
+
+- name: docker repo key
+  apt_key:
+    url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg
+    state: present
+
+- name: docker repo
+  apt_repository:
+    repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release | lower }} stable
+    state: present
+    filename: docker-ce
+
+- name: docker install
+  apt:
+    force_apt_get: true
+    name: docker-ce
+    state: present
+
+- name: docker service
+  systemd:
+    name: docker
+    enabled: true
+    state: started
+
+- name: netcapture install
+  apt:
+    force_apt_get: true
+    name: python3-miris-netcapture
+    state: present
+
+- name: netcapture config
+  template:
+    src: netcapture.json.j2
+    dest: /etc/miris/netcapture.json
+
+- name: netcapture miris
+  template:
+    src: miris-api.json.j2
+    dest: /etc/miris/conf/api.json
+
+- name: netcapture config dir
+  file:
+    path: "{{ netcapture_conf_folder }}"
+    group: video
+    mode: u=rwX,g=rwX,o=r
+    recurse: true
+    state: directory
+
+- name: netcapture media dir
+  file:
+    path: "{{ netcapture_media_folder }}"
+    group: video
+    mode: u=rwX,g=rwX,o=r
+    recurse: true
+    state: directory
+
+# TODO: add fail2ban ?
+# TODO: add firewall
+
+...
diff --git a/roles/netcapture/templates/miris-api.json.j2 b/roles/netcapture/templates/miris-api.json.j2
new file mode 100644
index 00000000..0f00edfa
--- /dev/null
+++ b/roles/netcapture/templates/miris-api.json.j2
@@ -0,0 +1,4 @@
+{
+  "auth_user_password": "{{ netcapture_miris_user_pwd }}",
+  "auth_enable": {% if netcapture_miris_auth %}true{% else %}false{% endif %}
+}
diff --git a/roles/netcapture/templates/netcapture.json.j2 b/roles/netcapture/templates/netcapture.json.j2
new file mode 100644
index 00000000..4db5c276
--- /dev/null
+++ b/roles/netcapture/templates/netcapture.json.j2
@@ -0,0 +1,10 @@
+{
+  "docker_registry_host": "{{ netcapture_registry_host }}",
+  "docker_registry_login": "{{ netcapture_registry_login }}",
+  "docker_registry_password": "{{ netcapture_registry_password }}",
+  "mirismanager_url": "{{ netcapture_cm_url }}",
+  "mirismanager_check_ssl": {% if netcapture_check_ssl %}true{% else %}false{% endif %},
+  "netcapture_conf_folder": "{{ netcapture_conf_folder }}",
+  "netcapture_media_folder": "{{ netcapture_media_folder }}",
+  "enable_hw_acceleration": {% if netcapture_hw_acceleration %}true{% else %}false{% endif %}
+}
diff --git a/roles/network/defaults/main.yml b/roles/network/defaults/main.yml
new file mode 100644
index 00000000..ff57ed21
--- /dev/null
+++ b/roles/network/defaults/main.yml
@@ -0,0 +1,18 @@
+---
+
+network_apply: false
+
+network_packages:
+  - cockpit
+  - libnm-dev
+  - network-manager
+  - python3-dbus
+
+network_ip: "{{ lookup('env', 'NETWORK_IP') | ipaddr }}"
+network_mask: "{{ lookup('env', 'NETWORK_MASK') }}"
+network_ip_mask: "{{ network_ip }}/{{ network_mask }}"
+network_ip_mask_cidr: "{{ network_ip_mask | ipaddr }}"
+network_gateway: "{{ lookup('env', 'NETWORK_GATEWAY') | ipaddr }}"
+network_dns: "{{ lookup('env', 'NETWORK_DNS').split(',') | ipaddr }}"
+
+...
diff --git a/roles/network/tasks/main.yml b/roles/network/tasks/main.yml
new file mode 100644
index 00000000..8a37b81d
--- /dev/null
+++ b/roles/network/tasks/main.yml
@@ -0,0 +1,63 @@
+---
+
+- name: if network settings are set
+  when:
+    - network_apply | bool
+    - network_ip | d(false)
+    - network_mask | d(false)
+    - network_gateway | d(false)
+    - network_dns | d(false)
+  block:
+
+    # Was needed when using ifupdown but probably not with network-manager
+    # - name: prevent dhclient to erase dns config
+    #   copy:
+    #     dest: /etc/dhcp/dhclient-enter-hooks.d/nodnsupdate
+    #     mode: 0755
+    #     content: |
+    #       #!/bin/sh
+    #       make_resolv_conf() {
+    #           :
+    #       }
+
+    - name: packages
+      apt:
+        force_apt_get: true
+        name: "{{ network_packages }}"
+        state: present
+
+    - name: cleanup
+      register: network_cleanup_interfaces
+      copy:
+        dest: /etc/network/interfaces
+        backup: true
+        content: |
+          # This file describes the network interfaces available on your system
+          # and how to activate them. For more information, se interfaces(5).
+
+          source /etc/network/interfaces.d/*
+
+          # The loopback network interface
+          auto lo
+          iface lo inet loopback
+
+    - name: service
+      when: network_cleanup_interfaces is changed
+      systemd:
+        name: network-manager
+        enabled: true
+        state: restarted
+
+    - name: interface
+      nmcli:
+        conn_name: "envsetup-{{ ansible_default_ipv4.interface }}"
+        type: ethernet
+        ifname: "{{ ansible_default_ipv4.interface }}"
+        ip4: "{{ network_ip_mask_cidr | ipv4 }}"
+        gw4: "{{ network_gateway | ipv4 }}"
+        dns4: "{{ network_dns | ipv4 }}"
+        autoconnect: true
+        activate: false
+        state: present
+
+...
diff --git a/roles/nginx/defaults/main.yml b/roles/nginx/defaults/main.yml
new file mode 100644
index 00000000..7946e67a
--- /dev/null
+++ b/roles/nginx/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+
+nginx_packages:
+  - nginx
+  - uwsgi
+  - uwsgi-plugin-python3
+
+nginx_server_name:
+
+nginx_ssl_certificate: /etc/ssl/certs/ssl-cert-snakeoil.pem
+nginx_ssl_certificate_key: /etc/ssl/private/ssl-cert-snakeoil.key
+
+...
diff --git a/roles/nginx/handlers/main.yml b/roles/nginx/handlers/main.yml
new file mode 100644
index 00000000..38fab58a
--- /dev/null
+++ b/roles/nginx/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+
+- name: restart nginx
+  service:
+    name: nginx
+    state: restarted
+
+...
diff --git a/roles/nginx/tasks/_certs.yml b/roles/nginx/tasks/_certs.yml
new file mode 100644
index 00000000..5a734831
--- /dev/null
+++ b/roles/nginx/tasks/_certs.yml
@@ -0,0 +1,41 @@
+---
+
+- name: nginx check old ssl conf exists
+  register: nginx_old_ssl_conf
+  stat:
+    path: /etc/nginx/conf.d/ssl.conf
+
+- name: nginx migrate old ssl certificate conf
+  when: nginx_old_ssl_conf.stat.exists
+  notify: restart nginx
+  loop:
+    - grep ssl_certificate /etc/nginx/conf.d/ssl.conf > /etc/nginx/conf.d/ssl_certificate.conf
+    - mv /etc/nginx/conf.d/ssl.conf /etc/nginx/conf.d/ssl.conf.old
+  command: "{{ item }}"
+
+- name: nginx check ssl cert conf exists
+  register: nginx_ssl_cert_conf
+  stat:
+    path: /etc/nginx/conf.d/ssl_certificate.conf
+
+- name: nginx update ssl certificate conf
+  when:
+    - nginx_ssl_cert_conf.stat.exists
+    - nginx_ssl_certificate != "/etc/ssl/certs/ssl-cert-snakeoil.pem"
+  notify: restart nginx
+  lineinfile:
+    path: /etc/nginx/conf.d/ssl_certificate.conf
+    regexp: 'ssl_certificate\s+([\w/\-\_\.]+);'
+    line: 'ssl_certificate {{ nginx_ssl_certificate }};'
+
+- name: nginx update ssl certificate key conf
+  when:
+    - nginx_ssl_cert_conf.stat.exists
+    - nginx_ssl_certificate_key != "/etc/ssl/private/ssl-cert-snakeoil.key"
+  notify: restart nginx
+  lineinfile:
+    path: /etc/nginx/conf.d/ssl_certificate.conf
+    regexp: 'ssl_certificate_key\s+([\w/\-\_\.]+);'
+    line: 'ssl_certificate_key {{ nginx_ssl_certificate_key }};'
+
+...
diff --git a/roles/nginx/tasks/_config.yml b/roles/nginx/tasks/_config.yml
new file mode 100644
index 00000000..d227f3ca
--- /dev/null
+++ b/roles/nginx/tasks/_config.yml
@@ -0,0 +1,12 @@
+---
+
+- name: nginx remove default vhost
+  notify: restart nginx
+  loop:
+    - /etc/nginx/sites-enabled/default
+    - /etc/nginx/sites-enabled/default.conf
+  file:
+    path: "{{ item }}"
+    state: absent
+
+...
diff --git a/roles/nginx/tasks/_install.yml b/roles/nginx/tasks/_install.yml
new file mode 100644
index 00000000..a251408c
--- /dev/null
+++ b/roles/nginx/tasks/_install.yml
@@ -0,0 +1,16 @@
+---
+
+- name: remove apache
+  apt:
+    force_apt_get: true
+    name: apache2
+    state: absent
+
+- name: nginx install
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ nginx_packages }}"
+    state: present
+
+...
diff --git a/roles/nginx/tasks/main.yml b/roles/nginx/tasks/main.yml
new file mode 100644
index 00000000..39541abd
--- /dev/null
+++ b/roles/nginx/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+
+- include_tasks: _install.yml
+- include_tasks: _config.yml
+- include_tasks: _certs.yml
+
+- name: ensure nginx is running
+  service:
+    name: nginx
+    enabled: true
+    state: started
+
+...
diff --git a/roles/ntp/defaults/main.yml b/roles/ntp/defaults/main.yml
new file mode 100644
index 00000000..0a627dc5
--- /dev/null
+++ b/roles/ntp/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+
+ntp_servers: "{{ envsetup_ntp_server }}"
+
+...
diff --git a/roles/ntp/handlers/main.yml b/roles/ntp/handlers/main.yml
new file mode 100644
index 00000000..fc82115e
--- /dev/null
+++ b/roles/ntp/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+
+- name: systemd daemon reload
+  systemd:
+    daemon_reload: true
+
+- name: restart ntp
+  service:
+    name: ntp
+    state: restarted
+
+...
diff --git a/roles/ntp/tasks/main.yml b/roles/ntp/tasks/main.yml
new file mode 100644
index 00000000..4902049b
--- /dev/null
+++ b/roles/ntp/tasks/main.yml
@@ -0,0 +1,48 @@
+---
+
+- name: create systemd-timesync service config directory
+  file:
+    path: /lib/systemd/system/systemd-timesyncd.service.d
+    state: directory
+    mode: 0755
+
+- name: ntp add condition to systemd-timesyncd service
+  notify: systemd daemon reload
+  copy:
+    dest: /lib/systemd/system/systemd-timesyncd.service.d/disable-with-time-daemon.conf
+    content: |
+      [Unit]
+      # don't run timesyncd if we have another NTP daemon installed
+      ConditionFileIsExecutable=!/usr/sbin/ntpd
+      ConditionFileIsExecutable=!/usr/sbin/openntpd
+      ConditionFileIsExecutable=!/usr/sbin/chronyd
+      ConditionFileIsExecutable=!/usr/sbin/VBoxService
+
+- name: ntp disable systemd-timesyncd service
+  notify: restart ntp
+  systemd:
+    name: systemd-timesyncd
+    enabled: false
+    state: stopped
+
+- name: ntp install
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: ntp
+    state: present
+
+- name: ntp config
+  notify: restart ntp
+  template:
+    backup: true
+    src: ntp.conf.j2
+    dest: /etc/ntp.conf
+
+- name: ensure ntp is running
+  service:
+    name: ntp
+    enabled: true
+    state: started
+
+...
diff --git a/roles/ntp/templates/ntp.conf.j2 b/roles/ntp/templates/ntp.conf.j2
new file mode 100644
index 00000000..0aa791d9
--- /dev/null
+++ b/roles/ntp/templates/ntp.conf.j2
@@ -0,0 +1,26 @@
+# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
+
+driftfile /var/lib/ntp/ntp.drift
+
+# Leap seconds definition provided by tzdata
+leapfile /usr/share/zoneinfo/leap-seconds.list
+
+# Specify one or more NTP servers.
+{% if ntp_servers | type_debug == "AnsibleUnsafeText" %}
+pool {{ ntp_servers }} iburst
+{% else %}
+{% for server in ntp_servers %}
+pool {{ server }} iburst
+{% endfor %}
+{% endif %}
+
+# By default, exchange time with everybody, but don't allow configuration.
+restrict -4 default kod notrap nomodify nopeer noquery limited
+restrict -6 default kod notrap nomodify nopeer noquery limited
+
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+
+# Needed for adding pool entries
+restrict source notrap nomodify noquery
diff --git a/roles/postfix/defaults/main.yml b/roles/postfix/defaults/main.yml
new file mode 100644
index 00000000..ebef6ec3
--- /dev/null
+++ b/roles/postfix/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+
+postfix_packages:
+  - postfix
+  - bsd-mailx
+
+postfix_mailname: "{{ envsetup_ms_server_name }}"
+postfix_default_email_sender: noreply@{{ postfix_mailname }}
+postfix_email_sender: "{{ envsetup_email_sender | default(postfix_default_email_sender, true) }}"
+postfix_relay_host: "{{ envsetup_email_smtp_server }}"
+postfix_relay_user: "{{ envsetup_email_smtp_user }}"
+postfix_relay_pass: "{{ envsetup_email_smtp_pwd }}"
+postfix_admin: sysdamin@ubicast.eu
+
+...
diff --git a/roles/postfix/handlers/main.yml b/roles/postfix/handlers/main.yml
new file mode 100644
index 00000000..f5519513
--- /dev/null
+++ b/roles/postfix/handlers/main.yml
@@ -0,0 +1,20 @@
+---
+
+- name: postmap sasl
+  command: postmap hash:/etc/postfix/sasl-passwords
+
+- name: postmap generic
+  command: postmap hash:/etc/postfix/generic
+
+- name: postmap virtual
+  command: postmap hash:/etc/postfix/virtual
+
+- name: newaliases
+  command: newaliases
+
+- name: restart postfix
+  service:
+    name: postfix
+    state: restarted
+
+...
diff --git a/roles/postfix/tasks/main.yml b/roles/postfix/tasks/main.yml
new file mode 100644
index 00000000..c6ba15f5
--- /dev/null
+++ b/roles/postfix/tasks/main.yml
@@ -0,0 +1,86 @@
+---
+
+- name: create postfix dir
+  file:
+    path: /etc/postfix
+    state: directory
+
+- name: postfix main config
+  notify: restart postfix
+  template:
+    backup: true
+    src: main.cf.j2
+    dest: /etc/postfix/main.cf
+
+- name: postfix mailname
+  notify: restart postfix
+  copy:
+    backup: true
+    dest: /etc/mailname
+    content: "{{ postfix_mailname }}"
+
+- name: postfix local aliases
+  notify:
+    - newaliases
+    - restart postfix
+  copy:
+    backup: true
+    dest: /etc/aliases
+    content: |
+      devnull: /dev/null
+      clamav: root
+      root: {{ postfix_admin }}
+
+- name: postfix virtual aliases
+  notify:
+    - postmap virtual
+    - restart postfix
+  copy:
+    backup: true
+    dest: /etc/postfix/virtual
+    content: |
+      postmaster@{{ postfix_mailname }} root
+      bounces@{{ postfix_mailname }} root
+      noreply@{{ postfix_mailname }} devnull
+
+- name: postfix generic aliases, sender rewriting
+  notify:
+    - postmap generic
+    - restart postfix
+  copy:
+    backup: true
+    dest: /etc/postfix/generic
+    content: |
+      root@localhost {{ postfix_email_sender }}
+      root@{{ postfix_mailname }} {{ postfix_email_sender }}
+      root@{{ ansible_hostname }} {{ postfix_email_sender }}
+      @{{ postfix_mailname }} {{ postfix_email_sender }}
+      @{{ ansible_hostname }} {{ postfix_email_sender }}
+
+- name: postfix authentication
+  when:
+    - postfix_relay_host | d(false)
+    - postfix_relay_user | d(false)
+    - postfix_relay_pass | d(false)
+  notify:
+    - postmap sasl
+    - restart postfix
+  copy:
+    backup: true
+    dest: /etc/postfix/sasl-passwords
+    content: "{{ postfix_relay_host }} {{ postfix_relay_user }}:{{ postfix_relay_pass }}"
+
+- name: install postfix
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ postfix_packages }}"
+    state: present
+
+- name: ensure postfix is running
+  service:
+    name: postfix
+    enabled: true
+    state: started
+
+...
diff --git a/2.Common_services/1.Postfix/main.cf b/roles/postfix/templates/main.cf.j2
similarity index 66%
rename from 2.Common_services/1.Postfix/main.cf
rename to roles/postfix/templates/main.cf.j2
index e6b1649a..8f3e2463 100644
--- a/2.Common_services/1.Postfix/main.cf
+++ b/roles/postfix/templates/main.cf.j2
@@ -20,13 +20,13 @@ smtp_tls_session_cache_database = btree:${queue_directory}/smtp_scache
 # See /usr/share/doc/postfix/TLS_README.gz in the postfix-doc package for
 # information on enabling SSL in the smtp client.
 
-myhostname = {{ hostname }}
+myhostname = {{ postfix_mailname }}
 alias_maps = hash:/etc/aliases
 alias_database = hash:/etc/aliases
-#virtual_maps = hash:/etc/postfix/virtual
+virtual_maps = hash:/etc/postfix/virtual
 myorigin = /etc/mailname
-mydestination = {{ hostname }}, localdomain, localhost.localdomain, localhost
-relayhost = {{ smtp }}
+mydestination = {{ postfix_mailname }}, {{ ansible_hostname }}, localdomain, localhost.localdomain, localhost
+relayhost = {{ postfix_relay_host }}
 mynetworks = 127.0.0.0/8
 mailbox_size_limit = 0
 recipient_delimiter = +
@@ -35,4 +35,13 @@ inet_protocols = ipv4
 default_transport = smtp
 relay_transport = smtp
 disable_vrfy_command = yes
-#smtp_generic_maps = hash:/etc/postfix/generic
+smtp_generic_maps = hash:/etc/postfix/generic
+notify_classes = bounce
+bounce_notice_recipient = bounces@{{ postfix_mailname }}
+{% if postfix_relay_user %}
+
+# SMTP relay authentication
+smtp_sasl_auth_enable = yes
+smtp_sasl_password_maps = hash:/etc/postfix/sasl-passwords
+smtp_sasl_security_options = noanonymous
+{% endif %}
diff --git a/roles/postgres/defaults/main.yml b/roles/postgres/defaults/main.yml
new file mode 100644
index 00000000..d5716237
--- /dev/null
+++ b/roles/postgres/defaults/main.yml
@@ -0,0 +1,18 @@
+---
+
+postgres_host: "{{ envsetup_db_host }}"
+postgres_port: "{{ envsetup_db_port }}"
+postgres_pwd: "{{ envsetup_db_pg_root_pwd }}"
+
+postgres_firewall_enabled: true
+postgres_ferm_rules_filename: postgres
+postgres_ferm_input_rules:
+  - saddr: "{{ groups['server'] | union(groups['manager']) | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list }}"
+    proto:
+      - tcp
+    dport:
+      - 5432
+postgres_ferm_output_rules: []
+postgres_ferm_global_settings:
+
+...
diff --git a/roles/postgres/tasks/main.yml b/roles/postgres/tasks/main.yml
new file mode 100644
index 00000000..f3e4fd2c
--- /dev/null
+++ b/roles/postgres/tasks/main.yml
@@ -0,0 +1,53 @@
+---
+
+- name: ansible postgresql requirements install
+  when:
+    - postgres_host == "127.0.0.1" or postgres_host == "localhost"
+    - postgres_port == "5432"
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: python3-psycopg2
+    state: present
+
+- name: postgresql install
+  when:
+    - postgres_host == "127.0.0.1" or postgres_host == "localhost"
+    - postgres_port == "5432"
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: postgresql
+    state: present
+
+- name: ensure postgresql is running
+  when:
+    - postgres_host == "127.0.0.1" or postgres_host == "localhost"
+    - postgres_port == "5432"
+  service:
+    name: postgresql
+    state: started
+
+- name: postgresql set superuser password
+  when:
+    - postgres_host == "127.0.0.1" or postgres_host == "localhost"
+    - postgres_port == "5432"
+  become: true
+  become_user: postgres
+  postgresql_user:
+    name: postgres
+    password: "{{ postgres_pwd }}"
+
+# FIREWALL
+
+- name: firewall
+  when: postgres_firewall_enabled
+  vars:
+    ferm_rules_filename: "{{ postgres_ferm_rules_filename }}"
+    ferm_input_rules: "{{ postgres_ferm_input_rules }}"
+    ferm_output_rules: "{{ postgres_ferm_output_rules }}"
+    ferm_global_settings: "{{ postgres_ferm_global_settings }}"
+  include_role:
+    name: ferm
+
+...
diff --git a/roles/proxy/defaults/main.yml b/roles/proxy/defaults/main.yml
new file mode 100644
index 00000000..50957cb1
--- /dev/null
+++ b/roles/proxy/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+
+proxy_http: "{{ envsetup_proxy_http | d() }}"
+proxy_https: "{{ envsetup_proxy_https | d() }}"
+proxy_exclude:
+  - "localhost"
+  - "127.0.0.1"
+  - "::1"
+  - "{{ envsetup_proxy_exclude | d() }}"
+  - "{{ envsetup_ms_server_name | d() }}"
+  - "{{ envsetup_monitor_server_name | d() }}"
+  - "{{ envsetup_cm_server_name | d() }}"
+
+...
diff --git a/roles/proxy/tasks/main.yml b/roles/proxy/tasks/main.yml
new file mode 100644
index 00000000..58885b8c
--- /dev/null
+++ b/roles/proxy/tasks/main.yml
@@ -0,0 +1,57 @@
+---
+
+- name: if proxy settings are set
+  when:
+    - proxy_http | d(false)
+    - proxy_https | d(false)
+  block:
+
+    - name: environment
+      blockinfile:
+        path: /etc/environment
+        create: true
+        marker_begin: BEGIN PROXY
+        marker_end: END PROXY
+        block: |
+          http_proxy={{ proxy_http }}
+          HTTP_PROXY={{ proxy_http }}
+          https_proxy={{ proxy_https }}
+          HTTPS_PROXY={{ proxy_https }}
+          no_proxy={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
+          NO_PROXY={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
+
+    - name: apt
+      copy:
+        dest: /etc/apt/apt.conf.d/proxy
+        content: |
+          Acquire::http::Proxy "{{ proxy_http }}";
+          Acquire::https::Proxy "{{ proxy_https }}";
+
+    - name: wget
+      copy:
+        dest: /etc/wgetrc
+        content: |
+          use_proxy=yes
+          http_proxy={{ proxy_http }}
+          https_proxy={{ proxy_https }}
+          no_proxy={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
+
+    - name: install git
+      apt:
+        force_apt_get: true
+        name: git
+        state: present
+
+    - name: git
+      loop:
+        - name: http.proxy
+          value: "{{ proxy_http }}"
+        - name: https.proxy
+          value: "{{ proxy_https }}"
+      git_config:
+        name: "{{ item.name }}"
+        scope: global
+        value: "{{ item.value }}"
+        state: present
+
+...
diff --git a/roles/python/tasks/main.yml b/roles/python/tasks/main.yml
new file mode 100644
index 00000000..829083ea
--- /dev/null
+++ b/roles/python/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+
+- name: install python3
+  register: python_install
+  changed_when:
+    - "'doing' in python_install.stdout_lines"
+    - "'pass' not in python_install.stdout_lines"
+  loop:
+    - command -v python3 || ( command -v yum && echo doing && yum install -y epel-release && yum install -y python36 ) || echo pass
+    - command -v python3 || ( command -v apt && echo doing && apt update && apt install -y python3-minimal python3-apt ) || echo pass
+  raw: "{{ item }}"
+
+...
diff --git a/roles/repos/defaults/main.yml b/roles/repos/defaults/main.yml
new file mode 100644
index 00000000..cd66c7ee
--- /dev/null
+++ b/roles/repos/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+
+repos_prefix: "{{ envsetup_apt_cache_url | d('http://', true) }}"
+repos_deb: deb.debian.org
+repos_deb_sec: security.debian.org
+repos_release: "{{ ansible_distribution_release }}"
+
+repos_skyreach_token: "{{ envsetup_skyreach_apt_token }}"
+repos_skyreach_host: "{{ envsetup_skyreach_host }}"
+
+...
diff --git a/roles/repos/handlers/main.yml b/roles/repos/handlers/main.yml
new file mode 100644
index 00000000..376429fa
--- /dev/null
+++ b/roles/repos/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+
+- name: update cache
+  apt:
+    force_apt_get: true
+    update_cache: true
diff --git a/roles/repos/tasks/main.yml b/roles/repos/tasks/main.yml
new file mode 100644
index 00000000..df933606
--- /dev/null
+++ b/roles/repos/tasks/main.yml
@@ -0,0 +1,36 @@
+---
+
+- name: ubuntu apt repo sources list
+  when: ansible_distribution == 'Ubuntu'
+  notify: update cache
+  copy:
+    dest: /etc/apt/sources.list
+    content: |
+      deb {{ repos_prefix }}archive.ubuntu.com/ubuntu/ {{ repos_release }} main restricted universe multiverse
+      deb {{ repos_prefix }}archive.ubuntu.com/ubuntu/ {{ repos_release }}-updates main restricted universe multiverse
+      deb {{ repos_prefix }}archive.ubuntu.com/ubuntu/ {{ repos_release }}-backports main restricted universe multiverse
+      deb {{ repos_prefix }}security.ubuntu.com/ubuntu {{ repos_release }}-security main restricted universe multiverse
+
+- name: debian apt repo sources list
+  when: ansible_distribution == 'Debian'
+  notify: update cache
+  copy:
+    dest: /etc/apt/sources.list
+    content: |
+      deb {{ repos_prefix }}{{ repos_deb }}/debian {{ repos_release }} main contrib non-free
+      deb {{ repos_prefix }}{{ repos_deb }}/debian {{ repos_release }}-updates main contrib non-free
+      deb {{ repos_prefix }}{{ repos_deb_sec }}/debian-security {{ repos_release }}/updates main contrib non-free
+
+- name: add skyreach apt repo key
+  when: repos_skyreach_token | d(false)
+  apt_key:
+    url: https://{{ repos_skyreach_host }}/media/public.gpg
+
+- name: add skyreach apt repo
+  when: repos_skyreach_token | d(false)
+  apt_repository:
+    repo: deb https://{{ repos_skyreach_host }} packaging/apt/{{ repos_skyreach_token }}/
+    filename: skyreach
+    update_cache: true
+
+...
diff --git a/roles/rocketchat/.editorconfig b/roles/rocketchat/.editorconfig
new file mode 100644
index 00000000..3274ec36
--- /dev/null
+++ b/roles/rocketchat/.editorconfig
@@ -0,0 +1,8 @@
+root = true
+
+[*]
+indent_style = space
+indent_size = 2
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
\ No newline at end of file
diff --git a/roles/rocketchat/.gitignore b/roles/rocketchat/.gitignore
new file mode 100644
index 00000000..bee8a64b
--- /dev/null
+++ b/roles/rocketchat/.gitignore
@@ -0,0 +1 @@
+__pycache__
diff --git a/roles/rocketchat/.yamllint b/roles/rocketchat/.yamllint
new file mode 100644
index 00000000..ad0be760
--- /dev/null
+++ b/roles/rocketchat/.yamllint
@@ -0,0 +1,11 @@
+extends: default
+
+rules:
+  braces:
+    max-spaces-inside: 1
+    level: error
+  brackets:
+    max-spaces-inside: 1
+    level: error
+  line-length: disable
+  truthy: disable
diff --git a/roles/rocketchat/README.md b/roles/rocketchat/README.md
new file mode 100644
index 00000000..91b25ee4
--- /dev/null
+++ b/roles/rocketchat/README.md
@@ -0,0 +1,41 @@
+Rocket.Chat
+===========
+
+Install and update Rocket.Chat (manual installation).
+
+Requirements
+------------
+
+None.
+
+Role Variables
+--------------
+
+See `defaults/main.yml`.
+
+Dependencies
+------------
+
+None.
+
+Example Playbook
+----------------
+
+```
+- hosts: rocketchat_server
+  roles:
+    - role: ubicast.rocketchat
+      rc_root_url: https://chat.example.net
+      rc_version: 1.3.0
+      rc_mail_url: mail.example.net:25
+```
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+@nikaro from @UbicastTeam
diff --git a/roles/rocketchat/defaults/main.yml b/roles/rocketchat/defaults/main.yml
new file mode 100644
index 00000000..36c12edf
--- /dev/null
+++ b/roles/rocketchat/defaults/main.yml
@@ -0,0 +1,60 @@
+---
+
+# package needed by ansible to run the role
+rc_apt_packages_requirements:
+  - apt-transport-https
+  - apt-utils
+  - curl
+  - gnupg2
+
+# mongodb repo
+rc_mongo_repo_key_url: https://www.mongodb.org/static/pgp/server-4.0.asc
+rc_mongo_repo: deb http://repo.mongodb.org/apt/debian stretch/mongodb-org/4.0 main
+rc_mongo_repo_file: mongodb-org-4.0
+
+# nodejs repo
+rc_node_repo_key_url: https://deb.nodesource.com/gpgkey/nodesource.gpg.key
+rc_node_repo: deb https://deb.nodesource.com/node_8.x stretch main
+rc_node_repo_file: nodesource
+
+# apt package requirements
+rc_apt_packages:
+  - build-essential
+  - git
+  - graphicsmagick
+  - mongodb-org
+  - nodejs
+
+# global npm package requirements
+rc_npm_packages:
+  - inherits
+  - n
+
+# nodejs version
+rc_node_version: 12.14.0
+
+# version & archive url
+rc_version: 3.0.4
+rc_url: https://releases.rocket.chat/{{ rc_version }}/download
+
+# where to install
+rc_directory: /usr/local/rocketchat
+
+# user & group
+rc_user: rocketchat
+rc_group: rocketchat
+
+# mongodb settings
+rc_mongo_url: mongodb://localhost:27017/rocketchat?replicaSet=rs01
+rc_mongo_oplog_url: mongodb://localhost:27017/local?replicaSet=rs01
+
+# url by which it is accessible
+rc_root_url: http://localhost:3000
+
+# listen on port
+rc_port: 3000
+
+# email server:port to send notifications
+rc_mail_url: localhost:25
+
+...
diff --git a/roles/rocketchat/handlers/main.yml b/roles/rocketchat/handlers/main.yml
new file mode 100644
index 00000000..f8018206
--- /dev/null
+++ b/roles/rocketchat/handlers/main.yml
@@ -0,0 +1,20 @@
+---
+
+- name: reload systemd
+  systemd:
+    daemon_reload: true
+
+- name: restart mongodb
+  systemd:
+    name: mongod
+    state: restarted
+
+- name: initialize mongodb
+  command: /usr/bin/mongo --eval "printjson(rs.initiate())"
+
+- name: restart rocketchat
+  systemd:
+    name: rocketchat
+    state: restarted
+
+...
diff --git a/roles/rocketchat/meta/.galaxy_install_info b/roles/rocketchat/meta/.galaxy_install_info
new file mode 100644
index 00000000..f8a7385f
--- /dev/null
+++ b/roles/rocketchat/meta/.galaxy_install_info
@@ -0,0 +1,2 @@
+install_date: Thu Aug  8 13:28:09 2019
+version: 1.0.0
diff --git a/roles/rocketchat/meta/main.yml b/roles/rocketchat/meta/main.yml
new file mode 100644
index 00000000..c027d842
--- /dev/null
+++ b/roles/rocketchat/meta/main.yml
@@ -0,0 +1,22 @@
+---
+
+galaxy_info:
+  author: Nicolas Karolak
+  description: Install and update Rocket.Chat (manual installation)
+  company: UbiCast
+
+  license: BSD
+
+  min_ansible_version: 2.4
+
+  platforms:
+    - name: Debian
+      versions:
+        - 9
+
+  galaxy_tags:
+    - system
+
+dependencies: []
+
+...
diff --git a/roles/rocketchat/molecule/default/Dockerfile.j2 b/roles/rocketchat/molecule/default/Dockerfile.j2
new file mode 100644
index 00000000..e6aa95d3
--- /dev/null
+++ b/roles/rocketchat/molecule/default/Dockerfile.j2
@@ -0,0 +1,14 @@
+# Molecule managed
+
+{% if item.registry is defined %}
+FROM {{ item.registry.url }}/{{ item.image }}
+{% else %}
+FROM {{ item.image }}
+{% endif %}
+
+RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \
+    elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash && dnf clean all; \
+    elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
+    elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \
+    elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \
+    elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi
diff --git a/roles/rocketchat/molecule/default/molecule.yml b/roles/rocketchat/molecule/default/molecule.yml
new file mode 100644
index 00000000..638e5ec4
--- /dev/null
+++ b/roles/rocketchat/molecule/default/molecule.yml
@@ -0,0 +1,23 @@
+---
+dependency:
+  name: galaxy
+driver:
+  name: docker
+lint:
+  name: yamllint
+platforms:
+  - name: debian-stretch
+    image: jrei/systemd-debian:9
+    command: /lib/systemd/systemd
+    capabilities:
+      - SYS_ADMIN
+    volumes:
+      - /sys/fs/cgroup:/sys/fs/cgroup:ro
+provisioner:
+  name: ansible
+  lint:
+    name: ansible-lint
+verifier:
+  name: testinfra
+  lint:
+    name: flake8
diff --git a/roles/rocketchat/molecule/default/playbook.yml b/roles/rocketchat/molecule/default/playbook.yml
new file mode 100644
index 00000000..b2c81c96
--- /dev/null
+++ b/roles/rocketchat/molecule/default/playbook.yml
@@ -0,0 +1,5 @@
+---
+- name: Converge
+  hosts: all
+  roles:
+    - role: ansible-role-rocketchat
diff --git a/roles/rocketchat/molecule/default/tests/test_default.py b/roles/rocketchat/molecule/default/tests/test_default.py
new file mode 100644
index 00000000..0363a9b0
--- /dev/null
+++ b/roles/rocketchat/molecule/default/tests/test_default.py
@@ -0,0 +1,95 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ["MOLECULE_INVENTORY_FILE"]
+).get_hosts("all")
+
+
+def test_user(host):
+    u = host.user("rocketchat")
+
+    assert u.exists
+    assert u.name == "rocketchat"
+    assert u.group == "rocketchat"
+    assert u.shell == "/usr/bin/nologin"
+
+
+def test_mongodb_repo(host):
+    f = host.file("/etc/apt/sources.list.d/mongodb-org-4.0.list")
+
+    assert f.exists
+    assert f.contains("repo.mongodb.org")
+
+
+def test_mongodb_pkg(host):
+    p = host.package("mongodb-org")
+
+    assert p.is_installed
+
+
+def test_nodejs_repo(host):
+    f = host.file("/etc/apt/sources.list.d/nodesource.list")
+
+    assert f.exists
+    assert f.contains("deb.nodesource.com")
+
+
+def test_nodejs_pkg(host):
+    p = host.package("nodejs")
+
+    assert p.is_installed
+
+
+def test_mongod_conf(host):
+    f = host.file("/etc/mongod.conf")
+
+    assert f.exists
+    assert f.contains("engine: mmapv1")
+    assert f.contains("replSetName: rs01")
+
+
+def test_mongod_service(host):
+    s = host.service("mongod")
+
+    assert s.is_running
+
+
+def test_mongod_socket(host):
+    s = host.socket("tcp://127.0.0.1:27017")
+
+    assert s.is_listening
+
+
+def test_n_bin(host):
+    f = host.file("/usr/local/bin/n")
+
+    assert f.exists
+
+
+def test_rocketchat_directory(host):
+    f = host.file("/usr/local/rocketchat")
+
+    assert f.exists
+    assert f.is_directory
+    assert f.user == "rocketchat"
+    assert f.group == "rocketchat"
+
+
+def test_rocketchat_service_file(host):
+    f = host.file("/lib/systemd/system/rocketchat.service")
+
+    assert f.exists
+
+
+def test_rocketchat_service(host):
+    s = host.service("rocketchat")
+
+    assert s.is_running
+
+
+def test_rocketchat_socket(host):
+    s = host.socket("tcp://3000")
+
+    assert s.is_listening
diff --git a/roles/rocketchat/requirements.dev.in b/roles/rocketchat/requirements.dev.in
new file mode 100644
index 00000000..d792db47
--- /dev/null
+++ b/roles/rocketchat/requirements.dev.in
@@ -0,0 +1,6 @@
+-r requirements.in
+ansible-lint
+molecule[docker,ec2]
+pip-tools
+pre-commit
+yamllint
diff --git a/roles/rocketchat/requirements.dev.txt b/roles/rocketchat/requirements.dev.txt
new file mode 100644
index 00000000..40279f2b
--- /dev/null
+++ b/roles/rocketchat/requirements.dev.txt
@@ -0,0 +1,109 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+#    pip-compile --output-file=requirements.dev.txt requirements.dev.in
+#
+ansible-lint==4.1.0
+ansible==2.8.2
+anyconfig==0.9.7          # via molecule
+appdirs==1.4.3            # via black
+argparse==1.4.0           # via aws-amicleaner
+arrow==0.14.2             # via jinja2-time
+asn1crypto==0.24.0        # via cryptography
+aspy.yaml==1.3.0          # via pre-commit
+astroid==2.2.5            # via pylint
+atomicwrites==1.3.0       # via pytest
+attrs==19.1.0             # via black, pytest
+aws-amicleaner==0.2.2
+awscli==1.16.199          # via aws-amicleaner
+backcall==0.1.0           # via ipython
+binaryornot==0.4.4        # via cookiecutter
+black==19.3b0
+blessings==1.7            # via aws-amicleaner
+boto3==1.9.189            # via aws-amicleaner, molecule
+boto==2.49.0              # via aws-amicleaner, molecule
+botocore==1.12.189        # via awscli, boto3, s3transfer
+cerberus==1.2             # via molecule
+certifi==2019.6.16        # via requests
+cffi==1.12.3              # via cryptography
+cfgv==2.0.0               # via pre-commit
+chardet==3.0.4            # via binaryornot, requests
+click-completion==0.3.1   # via molecule
+click==6.7                # via black, click-completion, cookiecutter, molecule, python-gilt
+colorama==0.3.9           # via awscli, molecule, python-gilt
+cookiecutter==1.6.0       # via molecule
+cryptography==2.7         # via ansible
+decorator==4.4.0          # via ipython, traitlets
+docker==4.0.2             # via molecule
+docutils==0.14            # via awscli, botocore
+entrypoints==0.3          # via flake8
+fasteners==0.15           # via python-gilt
+flake8==3.7.8             # via molecule
+future==0.17.1            # via cookiecutter
+git-url-parse==1.2.2      # via python-gilt
+identify==1.4.5           # via pre-commit
+idna==2.7                 # via molecule, requests
+importlib-metadata==0.18  # via pluggy, pre-commit, pytest
+ipython-genutils==0.2.0   # via traitlets
+ipython==7.6.1
+isort==4.3.21             # via pylint
+jedi==0.14.1              # via ipython
+jinja2-time==0.2.0        # via cookiecutter
+jinja2==2.10              # via ansible, click-completion, cookiecutter, jinja2-time, molecule
+jmespath==0.9.4           # via boto3, botocore
+lazy-object-proxy==1.4.1  # via astroid
+markupsafe==1.1.1         # via jinja2
+mccabe==0.6.1             # via flake8, pylint
+molecule[docker,ec2]==2.20.2
+monotonic==1.5            # via fasteners
+more-itertools==7.1.0     # via pytest
+nodeenv==1.3.3            # via pre-commit
+packaging==19.0           # via pytest
+parso==0.5.1              # via jedi
+pathspec==0.5.9           # via yamllint
+pbr==5.1.1                # via git-url-parse, molecule, python-gilt
+pexpect==4.6.0            # via ipython, molecule
+pickleshare==0.7.5        # via ipython
+pluggy==0.12.0            # via pytest
+poyo==0.4.2               # via cookiecutter
+pre-commit==1.17.0
+prettytable==0.7.2        # via aws-amicleaner
+prompt-toolkit==2.0.9     # via ipython
+psutil==5.4.6             # via molecule
+ptyprocess==0.6.0         # via pexpect
+py==1.8.0                 # via pytest
+pyasn1==0.4.5             # via rsa
+pycodestyle==2.5.0        # via flake8
+pycparser==2.19           # via cffi
+pyflakes==2.1.1           # via flake8
+pygments==2.4.2           # via ipython
+pylint==2.3.1
+pyparsing==2.4.0          # via packaging
+pytest==5.0.1             # via testinfra
+python-dateutil==2.8.0    # via arrow, botocore
+python-gilt==1.2.1        # via molecule
+pyyaml==3.13              # via ansible, ansible-lint, aspy.yaml, awscli, molecule, pre-commit, python-gilt, yamllint
+requests==2.22.0          # via cookiecutter, docker
+rsa==3.4.2                # via awscli
+ruamel.yaml==0.15.100     # via ansible-lint
+s3transfer==0.2.1         # via awscli, boto3
+sh==1.12.14               # via molecule, python-gilt
+six==1.11.0               # via ansible-lint, astroid, blessings, cfgv, click-completion, cryptography, docker, fasteners, molecule, packaging, pre-commit, prompt-toolkit, python-dateutil, testinfra, traitlets, websocket-client
+tabulate==0.8.2           # via molecule
+testinfra==3.0.5          # via molecule
+toml==0.10.0              # via black, pre-commit
+traitlets==4.3.2          # via ipython
+tree-format==0.1.2        # via molecule
+typed-ast==1.4.0          # via astroid
+urllib3==1.25.3           # via botocore, requests
+virtualenv==16.6.2        # via pre-commit
+wcwidth==0.1.7            # via prompt-toolkit, pytest
+websocket-client==0.56.0  # via docker
+whichcraft==0.6.0         # via cookiecutter
+wrapt==1.11.2             # via astroid
+yamllint==1.16.0
+zipp==0.5.2               # via importlib-metadata
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools==41.0.1        # via ipython
diff --git a/roles/rocketchat/requirements.in b/roles/rocketchat/requirements.in
new file mode 100644
index 00000000..5bc8cb22
--- /dev/null
+++ b/roles/rocketchat/requirements.in
@@ -0,0 +1 @@
+ansible ~= 2.8.0
diff --git a/roles/rocketchat/requirements.txt b/roles/rocketchat/requirements.txt
new file mode 100644
index 00000000..f10e7ead
--- /dev/null
+++ b/roles/rocketchat/requirements.txt
@@ -0,0 +1,15 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+#    pip-compile --output-file=requirements.txt requirements.in
+#
+ansible==2.8.2
+asn1crypto==0.24.0        # via cryptography
+cffi==1.12.3              # via cryptography
+cryptography==2.7         # via ansible
+jinja2==2.10.1            # via ansible
+markupsafe==1.1.1         # via jinja2
+pycparser==2.19           # via cffi
+pyyaml==5.1.1             # via ansible
+six==1.12.0               # via cryptography
diff --git a/roles/rocketchat/tasks/main.yml b/roles/rocketchat/tasks/main.yml
new file mode 100644
index 00000000..0002bf8b
--- /dev/null
+++ b/roles/rocketchat/tasks/main.yml
@@ -0,0 +1,162 @@
+---
+
+- name: add role requirement
+  apt:
+    force_apt_get: true
+    name: "{{ rc_apt_packages_requirements }}"
+    update_cache: true
+    cache_valid_time: 3600
+    state: present
+
+- name: add group
+  group:
+    name: "{{ rc_group }}"
+    system: true
+    state: present
+
+- name: add user
+  user:
+    name: "{{ rc_user }}"
+    group: "{{ rc_group }}"
+    home: /var/run/{{ rc_user }}
+    shell: /usr/bin/nologin
+    system: true
+    state: present
+
+- name: add mongodb repository key
+  apt_key:
+    url: "{{ rc_mongo_repo_key_url }}"
+    state: present
+
+- name: add mongodb repository
+  apt_repository:
+    repo: "{{ rc_mongo_repo }}"
+    filename: "{{ rc_mongo_repo_file }}"
+    state: present
+
+- name: add nodesource repository key
+  apt_key:
+    url: "{{ rc_node_repo_key_url }}"
+    state: present
+
+- name: add nodesource repository
+  apt_repository:
+    repo: "{{ rc_node_repo }}"
+    filename: "{{ rc_node_repo_file }}"
+    state: present
+
+- name: install system requirements
+  apt:
+    force_apt_get: true
+    name: "{{ rc_apt_packages }}"
+    update_cache: true
+    cache_valid_time: 3600
+    state: present
+
+- name: set mongodb engine
+  notify: restart mongodb
+  lineinfile:
+    path: /etc/mongod.conf
+    regexp: '^#?  engine:'
+    line: '  engine: mmapv1'
+
+- name: set mongodb replset name
+  notify:
+    - restart mongodb
+    - initialize mongodb
+  blockinfile:
+    path: /etc/mongod.conf
+    block: |
+      replication:
+        replSetName: rs01
+
+- name: install global npm requirements
+  changed_when: false  # npm module is not idempotent
+  loop: "{{ rc_npm_packages }}"
+  npm:
+    name: "{{ item }}"
+    global: true
+    state: present
+
+- name: fix nodejs version
+  command: n {{ rc_node_version }}
+  args:
+    creates: /usr/local/n/versions/node/{{ rc_node_version }}
+
+- name: download archive
+  register: rc_dl_archive
+  get_url:
+    url: "{{ rc_url }}"
+    dest: /var/cache/rocketchat_{{ rc_version }}.tgz
+
+- name: create temp extract directory
+  when: rc_dl_archive is changed
+  file:
+    path: /var/cache/rc_{{ rc_version }}
+    state: directory
+
+- name: extract archive content
+  when: rc_dl_archive is changed
+  unarchive:
+    src: /var/cache/rocketchat_{{ rc_version }}.tgz
+    dest: /var/cache/rc_{{ rc_version }}
+    remote_src: true
+
+- name: install npm requirements
+  when: rc_dl_archive is changed
+  npm:
+    path: /var/cache/rc_{{ rc_version }}/bundle/programs/server
+    state: present
+
+- name: check if service exists
+  register: rc_service
+  stat:
+    path: /lib/systemd/system/rocketchat.service
+
+- name: stop rocketchat
+  when:
+    - rc_service.stat.exists
+    - rc_dl_archive is changed
+  systemd:
+    name: rocketchat
+    state: stopped
+
+- name: remove current application files
+  when: rc_dl_archive is changed
+  file:
+    path: "{{ rc_directory }}"
+    state: absent
+
+- name: copy application files
+  when: rc_dl_archive is changed
+  notify: restart rocketchat
+  copy:
+    remote_src: true
+    src: /var/cache/rc_{{ rc_version }}/bundle/
+    dest: "{{ rc_directory }}"
+    owner: "{{ rc_user }}"
+    group: "{{ rc_group }}"
+
+- name: remove cache files
+  when: rc_dl_archive is changed
+  file:
+    path: /var/cache/rc_{{ rc_version }}
+    state: absent
+
+- name: deploy service file
+  notify:
+    - reload systemd
+    - restart rocketchat
+  template:
+    src: rocketchat.service.j2
+    dest: /lib/systemd/system/rocketchat.service
+
+- meta: flush_handlers
+
+- name: enable rocketchat service
+  systemd:
+    name: rocketchat
+    enabled: true
+    state: started
+
+...
diff --git a/roles/rocketchat/templates/rocketchat.service.j2 b/roles/rocketchat/templates/rocketchat.service.j2
new file mode 100644
index 00000000..850201db
--- /dev/null
+++ b/roles/rocketchat/templates/rocketchat.service.j2
@@ -0,0 +1,15 @@
+[Unit]
+Description=The Rocket.Chat server
+After=network.target remote-fs.target nss-lookup.target nginx.target mongod.target
+
+[Service]
+ExecStartPre=/bin/sleep 5
+ExecStart=/usr/local/bin/node {{ rc_directory }}/main.js
+StandardOutput=syslog
+StandardError=syslog
+SyslogIdentifier=rocketchat
+User={{ rc_user }}
+Environment=MONGO_URL={{ rc_mongo_url }} MONGO_OPLOG_URL={{ rc_mongo_oplog_url }} ROOT_URL={{ rc_root_url }} PORT={{ rc_port }} MAIL_URL={{ rc_mail_url }}
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/server/defaults/main.yml b/roles/server/defaults/main.yml
new file mode 100644
index 00000000..67821f7b
--- /dev/null
+++ b/roles/server/defaults/main.yml
@@ -0,0 +1,60 @@
+---
+
+server_packages:
+  - ubicast-mediaserver
+  - ubicast-mediaserver-runtime
+  - celerity-utils
+
+server_mail: dev-mediaserver@ubicast.eu
+server_default_email_sender: "noreply@{{ server_hostname }}"
+server_email_sender: "{{ envsetup_email_sender | default(server_default_email_sender, true) }}"
+
+server_id: "{{ envsetup_ms_id }}"
+server_instance_name: "{{ server_id | regex_replace('^f\\d+_(.*)$', '\\1') }}"
+server_hostname: "{{ envsetup_ms_server_name }}"
+server_campusmanager: "{{ envsetup_cm_server_name | d('mirismanager.' + server_hostname) }}"
+server_api_key: "{{ envsetup_ms_api_key }}"
+server_superuser_passwd: "{{ envsetup_ms_superuser_pwd }}"
+server_admin_passwd: "{{ envsetup_ms_admin_pwd }}"
+server_instances:
+  - name: "{{ server_instance_name }}"
+    ms_server_name: "{{ server_hostname }}"
+    ms_id: "{{ server_id }}"
+    ms_api_key: "{{ server_api_key }}"
+    cm_server_name: "{{ server_campusmanager }}"
+    ms_superuser_pwd: "{{ server_superuser_passwd }}"
+    ms_admin_pwd: "{{ server_admin_passwd }}"
+
+server_celerity_signing_key: "{{ envsetup_celerity_signing_key }}"
+
+server_wowza_live_pwd: "{{ envsetup_wowza_live_pwd | d() }}"
+
+server_fail2ban_enabled: "{{ envsetup_fail2ban_enabled | d(true) }}"
+server_f2b_filter:
+  name: server
+  content: |
+    [INCLUDES]
+    before = common.conf
+    [Definition]
+    failregex = INFO Wrong credentials given to login\. IP: <HOST>, username: \S+\.$
+                INFO Wrong crendentials given to login\. IP: <HOST>, username: \S+\.$
+    ignoreregex =
+server_f2b_jail:
+  name: server
+  content: |
+    [server]
+    logpath = /home/*/mstmp/mediaserver.log
+    enabled = {% if server_fail2ban_enabled | bool %}true{% else %}false{% endif %}
+
+server_firewall_enabled: true
+server_ferm_rules_filename: server
+server_ferm_input_rules:
+  - proto:
+      - tcp
+    dport:
+      - 80
+      - 443
+server_ferm_output_rules: []
+server_ferm_global_settings:
+
+...
diff --git a/roles/server/handlers/main.yml b/roles/server/handlers/main.yml
new file mode 100644
index 00000000..a52d49a0
--- /dev/null
+++ b/roles/server/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+
+- name: mscontroller restart
+  command:
+    cmd: mscontroller.py restart
+
+- name: restart nginx
+  systemd:
+    name: nginx
+    state: restarted
+
+...
diff --git a/roles/server/tasks/main.yml b/roles/server/tasks/main.yml
new file mode 100644
index 00000000..6d8d96a7
--- /dev/null
+++ b/roles/server/tasks/main.yml
@@ -0,0 +1,128 @@
+---
+
+- name: mediaserver install
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ server_packages }}"
+
+- name: resolve domain name to localhost
+  when: not in_docker
+  notify: restart nginx
+  loop: "{{ server_instances }}"
+  lineinfile:
+    path: /etc/hosts
+    line: '127.0.1.1 {{ item.ms_server_name }}'
+    backup: true
+
+- name: create instances
+  loop: "{{ server_instances }}"
+  environment:
+    MS_ID: "{{ item.ms_id }}"
+    MS_SERVER_NAME: "{{ item.ms_server_name }}"
+    MS_API_KEY: "{{ item.ms_api_key }}"
+    CM_SERVER_NAME: "{{ item.cm_server_name }}"
+    MS_SUPERUSER_PWD: "{{ item.ms_superuser_pwd }}"
+    MS_ADMIN_PWD: "{{ item.ms_admin_pwd }}"
+  command:
+    cmd: msinstaller.py {{ item.name }} --no-input
+    creates: /etc/nginx/sites-available/mediaserver-{{ item.name }}.conf
+
+- name: configure email sender address
+  notify: mscontroller restart
+  lineinfile:
+    path: /etc/mediaserver/msconf.py
+    backup: true
+    regexp: '^#? ?DEFAULT_FROM_EMAIL.*'
+    line: "DEFAULT_FROM_EMAIL = '{{ server_email_sender }}'"
+    validate: python3 -m py_compile %s
+
+- name: configure domain name in nginx conf
+  notify: restart nginx
+  loop: "{{ server_instances }}"
+  replace:
+    path: /etc/nginx/sites-available/mediaserver-{{ item.name }}.conf
+    regexp: '^(\s*server_name).*;$'
+    replace: '\1 {{ item.ms_server_name }};'
+    backup: true
+
+- name: configure domain name in database
+  loop: "{{ server_instances }}"
+  shell:
+    cmd: |
+      python3 /usr/lib/python3/dist-packages/mediaserver/scripts/mssiteconfig.py {{ item.name }} site_url=https://{{ item.ms_server_name }} ;
+      mscontroller.py restart -u {{ item.name }} ;
+      touch /etc/mediaserver/.{{ item.ms_server_name }}.mssiteconfig.log ;
+    creates: /etc/mediaserver/.{{ item.ms_server_name }}.mssiteconfig.log
+
+- name: reset service resources
+  loop: "{{ server_instances }}"
+  shell:
+    cmd: |
+      python3 /usr/lib/python3/dist-packages/mediaserver/scripts/reset_service_resources.py {{ item.name }} local ;
+      mscontroller.py restart -u {{ item.name }} ;
+      touch /etc/mediaserver/.{{ item.ms_server_name }}.reset_service_resources.log ;
+    creates: /etc/mediaserver/.{{ item.ms_server_name }}.reset_service_resources.log
+
+- name: live password configuration
+  when: server_wowza_live_pwd | d(false)
+  lineinfile:
+    path: /etc/mediaserver/lives_conf.py
+    create: true
+    backup: true
+    regexp: '^RTMP_PWD =.*$'
+    line: "RTMP_PWD = '{{ server_wowza_live_pwd }}'"
+    validate: python3 -m py_compile %s
+
+- name: ensure mediaserver is running
+  service:
+    name: mediaserver
+    enabled: true
+    state: started
+
+# SYNCHRONIZE
+
+- name: sync all mediaservers
+  when: groups['server'] | length > 1
+  block:
+
+    - name: save config of first mediaserver
+      when: inventory_hostname == groups['server'][0]
+      register: server_primary_config
+      loop:
+        - /etc/passwd
+        - /etc/shadow
+        - /etc/group
+      slurp:
+        path: "{{ item }}"
+
+    - name: deploy saved config
+      when: inventory_hostname != groups['server'][0]
+      loop: "{{ hostvars[groups['server'][0]].c.results }}"
+      copy:
+        dest: "{{ item.source }}"
+        content: "{{ item.content | b64decode }}"
+
+# FAIL2BAN
+
+- name: fail2ban
+  when: server_fail2ban_enabled
+  vars:
+    f2b_filter: "{{ server_f2b_filter }}"
+    f2b_jail: "{{ server_f2b_jail }}"
+  include_role:
+    name: fail2ban
+
+# FIREWALL
+
+- name: firewall
+  when: server_firewall_enabled
+  vars:
+    ferm_rules_filename: "{{ server_ferm_rules_filename }}"
+    ferm_input_rules: "{{ server_ferm_input_rules }}"
+    ferm_output_rules: "{{ server_ferm_output_rules }}"
+    ferm_global_settings: "{{ server_ferm_global_settings }}"
+  include_role:
+    name: ferm
+
+...
diff --git a/roles/server/templates/celerity-config.py.j2 b/roles/server/templates/celerity-config.py.j2
new file mode 100644
index 00000000..6f18f85c
--- /dev/null
+++ b/roles/server/templates/celerity-config.py.j2
@@ -0,0 +1,12 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+SIGNING_KEY = '{{ server_celerity_signing_key }}'
+SERVER_URL = 'https://{{ server_hostname }}:6200'
+
+# WORKERS_COUNT = 2
+
+# MediaServer interactions
+MEDIASERVERS = {
+    '{{ server_id }}': {'url': 'https://{{ server_hostname }}', 'api_key': '{{ server_api_key }}'},
+}
diff --git a/roles/sysutils/defaults/main.yml b/roles/sysutils/defaults/main.yml
new file mode 100644
index 00000000..b49c31b9
--- /dev/null
+++ b/roles/sysutils/defaults/main.yml
@@ -0,0 +1,50 @@
+---
+
+sysutils_packages:
+  - bash-completion
+  - bmon
+  - cockpit
+  - curl
+  - git
+  - host
+  - htop
+  - ifupdown
+  - iotop
+  - ipython3
+  - lm-sensors
+  - make
+  - net-tools
+  - netcat
+  - nfs-client
+  - openssh-server
+  - pciutils
+  - python3-psutil
+  - python3-openssl
+  - python3-requests
+  - python3-spf
+  - pwgen
+  - rsync
+  - smartmontools
+  - sudo
+  - ubicast-config
+  - unattended-upgrades
+  - vim
+
+sysutils_firewall_enabled: true
+sysutils_ferm_rules_filename: sysutils
+sysutils_ferm_input_rules:
+  # munin
+  - saddr: "{{ groups['monitor'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list }}"
+    proto:
+      - tcp
+    dport:
+      - 4949
+  # cockpit
+  - proto:
+      - tcp
+    dport:
+      - 9090
+sysutils_ferm_output_rules: []
+sysutils_ferm_global_settings:
+
+...
diff --git a/roles/sysutils/tasks/main.yml b/roles/sysutils/tasks/main.yml
new file mode 100644
index 00000000..508a77c1
--- /dev/null
+++ b/roles/sysutils/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+
+- name: install system utilities
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ sysutils_packages }}"
+
+# FIREWALL
+
+- name: firewall
+  when: sysutils_firewall_enabled
+  vars:
+    ferm_rules_filename: "{{ sysutils_ferm_rules_filename }}"
+    ferm_input_rules: "{{ sysutils_ferm_input_rules }}"
+    ferm_output_rules: "{{ sysutils_ferm_output_rules }}"
+    ferm_global_settings: "{{ sysutils_ferm_global_settings }}"
+  include_role:
+    name: ferm
+
+...
diff --git a/roles/users/defaults/main.yml b/roles/users/defaults/main.yml
new file mode 100644
index 00000000..73b625ba
--- /dev/null
+++ b/roles/users/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+
+users:
+  - name: ubicast
+    passwd: "{{ envsetup_shell_ubicast_pwd | password_hash('sha512', 'envsetup') }}"
+  - name: admin
+    passwd: "{{ envsetup_shell_admin_pwd | password_hash('sha512', 'envsetup') }}"
+
+users_ssh_authorized_keys:
+  - "{{ lookup('file', 'files/ubicast_support.pub') }}"
+  - "{{ envsetup_ssh_allowed_keys }}"
+
+users_root_change: true
+
+...
diff --git a/1.Base/1.Utilities/bashrc b/roles/users/files/.bashrc
similarity index 100%
rename from 1.Base/1.Utilities/bashrc
rename to roles/users/files/.bashrc
diff --git a/1.Base/1.Utilities/vimrc b/roles/users/files/.vimrc
similarity index 100%
rename from 1.Base/1.Utilities/vimrc
rename to roles/users/files/.vimrc
diff --git a/1.Base/2.ubicast_shell_access/ubicast_support.pub b/roles/users/files/ubicast_support.pub
similarity index 100%
rename from 1.Base/2.ubicast_shell_access/ubicast_support.pub
rename to roles/users/files/ubicast_support.pub
diff --git a/roles/users/handlers/main.yml b/roles/users/handlers/main.yml
new file mode 100644
index 00000000..fa217d14
--- /dev/null
+++ b/roles/users/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+
+- name: restart sshd
+  service:
+    name: sshd
+    state: restarted
+
+...
diff --git a/roles/users/tasks/main.yml b/roles/users/tasks/main.yml
new file mode 100644
index 00000000..6c214dfa
--- /dev/null
+++ b/roles/users/tasks/main.yml
@@ -0,0 +1,66 @@
+---
+
+- name: create users groups
+  loop: "{{ users }}"
+  group:
+    name: "{{ item.name }}"
+    state: present
+
+- name: create users
+  loop: "{{ users }}"
+  user:
+    name: "{{ item.name }}"
+    group: "{{ item.name }}"
+    shell: /bin/bash
+    generate_ssh_key: true
+    ssh_key_type: ed25519
+    ssh_key_file: .ssh/id_ed25519
+    append: true
+    groups:
+      - sudo
+    state: present
+
+- name: set users passwords
+  loop: "{{ users }}"
+  user:
+    name: "{{ item.name }}"
+    password: "{{ item.passwd }}"
+    update_password: always
+
+- name: copy .bashrc
+  loop: "{{ users }}"
+  copy:
+    src: .bashrc
+    dest: ~{{ item.name }}/.bashrc
+
+- name: copy .vimrc
+  loop: "{{ users }}"
+  copy:
+    src: .vimrc
+    dest: ~{{ item.name }}/.vimrc
+
+- name: copy .bashrc for root
+  when: users_root_change
+  copy:
+    src: .bashrc
+    dest: ~root/.bashrc
+
+- name: copy .vimrc for root
+  when: users_root_change
+  copy:
+    src: .vimrc
+    dest: ~root/.vimrc
+
+- name: set users allowed ssh keys
+  loop: "{{ users | product(users_ssh_authorized_keys) | list }}"
+  authorized_key:
+    user: "{{ item[0].name }}"
+    key: "{{ item[1] }}"
+
+- name: set root allowed ssh keys
+  loop: "{{ users_ssh_authorized_keys }}"
+  authorized_key:
+    user: root
+    key: "{{ item }}"
+
+...
diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml
new file mode 100644
index 00000000..00acdfc7
--- /dev/null
+++ b/roles/vault/defaults/main.yml
@@ -0,0 +1,58 @@
+---
+
+mv_packages:
+  - git
+  - rsync
+
+mv_repo_url: https://mirismanager.ubicast.eu/github.com/laurent22/rsync-time-backup
+mv_repo_path: /usr/local/share/rsync-time-backup
+mv_repo_update: false
+
+mv_script_path: /usr/local/sbin/rsync_tmbackup
+
+mv_mailer_enabled: true
+mv_mailer_script_path: /usr/local/sbin/systemd-mailer
+mv_mailer_from: backup <backup@{{ ansible_fqdn }}>
+mv_mailer_to: sysadmin+backup@ubicast.eu
+mv_mailer_service_name: status-email-admin
+mv_mailer_service_path: /etc/systemd/system/{{ mv_mailer_service_name }}@.service
+
+mv_base_dir: /backup
+
+# mv_backup defaults
+mv_backup_name: self
+mv_backup_timer_calendar: "*-*-* 22:00:00"
+mv_rsync_flags: -D --numeric-ids --links --hard-links --one-file-system --itemize-changes --times --recursive --perms --owner --group --stats --human-readable --timeout 30
+mv_retention_strategy: "1:1 30:0"
+mv_src_dir: /etc
+mv_dest_dir: "{{ mv_base_dir }}/{{ mv_backup_name }}"
+mv_exclude_list_name: excluded_patterns
+mv_exclude_list_items:
+  - "- .zfs/"
+  - "- *.log"
+  - "- *.pyc"
+  - "- *.swp"
+  - "- *.pid"
+  - "- *chunked_*/"
+  - "- __pycache__/"
+  - "- apt-cacher-ng/"
+  - "- */msinstance-disabled/"
+  - "- *.lock"
+  - "- .nfs*"
+  - "- *.m3u8"
+  - "- *.ts"
+
+# default backup list
+mv_backup:
+  - name: "{{ mv_backup_name }}"
+    timer_calendar: "{{ mv_backup_timer_calendar }}"
+    rsync_flags: "{{ mv_rsync_flags }}"
+    retention_strategy: "{{ mv_retention_strategy }}"
+    src_host:
+    src_dir: "{{ mv_src_dir }}"
+    dest_host:
+    dest_dir: "{{ mv_dest_dir }}"
+    exclude_list_name: "{{ mv_exclude_list_name }}"
+    exclude_list_items: "{{ mv_exclude_list_items }}"
+
+...
diff --git a/roles/vault/handlers/main.yml b/roles/vault/handlers/main.yml
new file mode 100644
index 00000000..bd9ee1c5
--- /dev/null
+++ b/roles/vault/handlers/main.yml
@@ -0,0 +1,7 @@
+---
+
+- name: systemd daemon reload
+  systemd:
+    daemon_reload: true
+
+...
diff --git a/roles/vault/tasks/main.yml b/roles/vault/tasks/main.yml
new file mode 100644
index 00000000..cd9eeabe
--- /dev/null
+++ b/roles/vault/tasks/main.yml
@@ -0,0 +1,104 @@
+---
+
+- name: install packages
+  package:
+    force_apt_get: true
+    install_recommends: false
+    name: "{{ mv_packages }}"
+    state: present
+
+- name: generate ssh keys pair
+  user:
+    name: root
+    generate_ssh_key: true
+    ssh_key_type: ed25519
+    ssh_key_file: .ssh/id_ed25519
+
+- name: clone repo
+  when: not ansible_check_mode
+  git:
+    repo: "{{ mv_repo_url }}"
+    dest: "{{ mv_repo_path }}"
+    update: "{{ mv_repo_update }}"
+    version: master
+
+- name: symlink script
+  when: not ansible_check_mode
+  file:
+    src: "{{ mv_repo_path }}/rsync_tmbackup.sh"
+    path: "{{ mv_script_path }}"
+    state: link
+
+- name: create mailer script
+  when: mv_mailer_enabled
+  template:
+    src: systemd-mailer-script.j2
+    dest: "{{ mv_mailer_script_path }}"
+    mode: 0755
+
+- name: create mailer service
+  when: mv_mailer_enabled
+  notify: systemd daemon reload
+  template:
+    src: systemd-mailer-service.j2
+    dest: "{{ mv_mailer_service_path }}"
+
+- name: check base directory exists
+  register: mv_base_dir_check
+  stat:
+    path: "{{ mv_base_dir }}"
+
+- name: create backup base directory
+  when: not mv_base_dir_check.stat.exists
+  file:
+    path: "{{ mv_base_dir }}"
+    state: directory
+
+- name: create exclude list file
+  loop: "{{ mv_backup }}"
+  copy:
+    dest: "{{ mv_base_dir }}/{{ item['exclude_list_name'] | default(mv_exclude_list_name) }}.txt"
+    content: |
+      {% for exclude_item in item['exclude_list_items'] | default(mv_exclude_list_items) %}
+      {{ exclude_item }}
+      {% endfor %}
+
+- name: create backup destination directory
+  loop: "{{ mv_backup }}"
+  file:
+    path: "{{ item['dest_dir'] | default(mv_base_dir + '/' + item['name']) }}"
+    mode: 0750
+    state: directory
+
+- name: create backup marker
+  loop: "{{ mv_backup }}"
+  file:
+    path: "{{ item['dest_dir'] | default(mv_base_dir + '/' + item['name']) }}/backup.marker"
+    access_time: preserve
+    modification_time: preserve
+    state: touch
+
+- name: add backup service
+  notify: systemd daemon reload
+  loop: "{{ mv_backup }}"
+  template:
+    src: systemd-backup-service.j2
+    dest: /etc/systemd/system/backup-{{ item['name'] }}.service
+
+- name: add backup timer
+  notify: systemd daemon reload
+  loop: "{{ mv_backup }}"
+  template:
+    src: systemd-backup-timer.j2
+    dest: /etc/systemd/system/backup-{{ item['name'] }}.timer
+
+- name: enable backup timer
+  loop: "{{ mv_backup }}"
+  systemd:
+    name: backup-{{ item['name'] }}.timer
+    daemon_reload: true
+    enabled: true
+    masked: false
+    state: started
+
+...
diff --git a/roles/vault/templates/systemd-backup-service.j2 b/roles/vault/templates/systemd-backup-service.j2
new file mode 100644
index 00000000..ae3b88e1
--- /dev/null
+++ b/roles/vault/templates/systemd-backup-service.j2
@@ -0,0 +1,9 @@
+[Unit]
+Description=backup-{{ item['name'] }}
+
+[Service]
+Type=oneshot
+ExecStart={{ mv_script_path }} --rsync-set-flags "{% if item['rsync_flags'] is defined %}{{ item['rsync_flags'] }}{% else %}{{ mv_rsync_flags }}{% endif %}" --strategy "{% if item['retention_strategy'] is defined %}{{ item['retention_strategy'] }}{% else %}{{ mv_retention_strategy }}{% endif %}" {% if item['src_host'] is defined and item['src_host'] %}{{ item['src_host'] }}:{% endif %}{% if item['src_dir'] is defined %}{{ item['src_dir'] }}{% else %}{{ mv_src_dir }}{% endif %} {% if item['dest_host'] is defined and item['dest_host'] %}{{ item['dest_host'] }}:{% endif %}{% if item ['dest_dir'] is defined %}{{ item['dest_dir'] }}{% else %}{{ mv_base_dir }}/{{ item['name'] }}{% endif %} {{ mv_base_dir }}/{% if item['exclude_list_name'] is defined %}{{ item['exclude_list_name'] }}{% else %}{{ mv_exclude_list_name }}{% endif %}.txt
+{% if mv_mailer_enabled %}
+OnFailure={{ mv_mailer_service_name }}@%n.service
+{% endif %}
diff --git a/roles/vault/templates/systemd-backup-timer.j2 b/roles/vault/templates/systemd-backup-timer.j2
new file mode 100644
index 00000000..9ee746a1
--- /dev/null
+++ b/roles/vault/templates/systemd-backup-timer.j2
@@ -0,0 +1,9 @@
+[Unit]
+Description=backup-{{ item['name'] }}-timer
+
+[Timer]
+OnCalendar={% if item['timer_calendar'] is defined %}{{ item['timer_calendar'] }}{% else %}{{mv_backup_timer_calendar }}{% endif %}
+
+
+[Install]
+WantedBy=timers.target
diff --git a/roles/vault/templates/systemd-mailer-script.j2 b/roles/vault/templates/systemd-mailer-script.j2
new file mode 100644
index 00000000..c14375d2
--- /dev/null
+++ b/roles/vault/templates/systemd-mailer-script.j2
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+sendmail -t <<ERRMAIL
+To: $1
+From: {{ mv_mailer_from }}
+Subject: $2
+Content-Transfer-Encoding: 8bit
+Content-Type: text/plain; charset=UTF-8
+
+$(systemctl status --full "$2")
+ERRMAIL
diff --git a/roles/vault/templates/systemd-mailer-service.j2 b/roles/vault/templates/systemd-mailer-service.j2
new file mode 100644
index 00000000..4feb61d5
--- /dev/null
+++ b/roles/vault/templates/systemd-mailer-service.j2
@@ -0,0 +1,8 @@
+[Unit]
+Description=status email for %i to {{ mv_mailer_to }}
+
+[Service]
+Type=oneshot
+ExecStart={{ mv_mailer_script_path }} {{ mv_mailer_to }} %i
+User=nobody
+Group=systemd-journal
diff --git a/roles/worker/defaults/main.yml b/roles/worker/defaults/main.yml
new file mode 100644
index 00000000..9c9796db
--- /dev/null
+++ b/roles/worker/defaults/main.yml
@@ -0,0 +1,31 @@
+---
+
+worker_celerity_signing_key: "{{ envsetup_celerity_signing_key }}"
+worker_celerity_server: "{{ envsetup_celerity_server | d(envsetup_ms_server_name, true) }}"
+
+worker_workers_count: 2
+
+worker_ms_id: "{{ envsetup_ms_id }}"
+worker_ms_api_key: "{{ envsetup_ms_api_key }}"
+worker_ms_hostname: "{{ envsetup_ms_server_name }}"
+worker_ms_instances:
+  - ms_id: "{{ worker_ms_id }}"
+    ms_api_key: "{{ worker_ms_api_key }}"
+    ms_server_name: "{{ worker_ms_hostname }}"
+
+worker_firewall_enabled: true
+worker_ferm_rules_filename: worker
+worker_ferm_input_rules: []
+worker_ferm_output_rules:
+  - proto:
+      - tcp
+    dport:
+      - 80
+      - 443
+  - proto:
+      - tcp
+    dport:
+      - 6200
+worker_ferm_global_settings:
+
+...
diff --git a/roles/worker/handlers/main.yml b/roles/worker/handlers/main.yml
new file mode 100644
index 00000000..d06d284e
--- /dev/null
+++ b/roles/worker/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+
+- name: restart celerity-workers
+  service:
+    name: celerity-workers
+    state: restarted
+
+...
diff --git a/roles/worker/tasks/main.yml b/roles/worker/tasks/main.yml
new file mode 100644
index 00000000..b23cfcf1
--- /dev/null
+++ b/roles/worker/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+
+- name: install celerity worker
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    name: celerity-workers
+
+- name: config celerity worker
+  notify: restart celerity-workers
+  template:
+    src: celerity-config.py.j2
+    dest: /etc/celerity/config.py
+
+- name: ensure celerity worker is running
+  service:
+    name: celerity-workers
+    enabled: true
+    state: started
+
+# FIREWALL
+
+- name: firewall
+  when: worker_firewall_enabled
+  vars:
+    ferm_rules_filename: "{{ worker_ferm_rules_filename }}"
+    ferm_input_rules: "{{ worker_ferm_input_rules }}"
+    ferm_output_rules: "{{ worker_ferm_output_rules }}"
+    ferm_global_settings: "{{ worker_ferm_global_settings }}"
+  include_role:
+    name: ferm
+
+...
diff --git a/roles/worker/templates/celerity-config.py.j2 b/roles/worker/templates/celerity-config.py.j2
new file mode 100644
index 00000000..bad0f9c6
--- /dev/null
+++ b/roles/worker/templates/celerity-config.py.j2
@@ -0,0 +1,14 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+SIGNING_KEY = '{{ worker_celerity_signing_key }}'
+SERVER_URL = 'https://{{ worker_celerity_server }}:6200'
+
+WORKERS_COUNT = {{ worker_workers_count }}
+
+# MediaServer interactions
+MEDIASERVERS = {
+{% for instance in worker_ms_instances %}
+    '{{ instance.ms_id }}': {'url': 'https://{{ instance.ms_server_name }}', 'api_key': '{{ instance.ms_api_key }}'},
+{% endfor %}
+}
diff --git a/roles/wowza/defaults/main.yml b/roles/wowza/defaults/main.yml
new file mode 100644
index 00000000..76eddb5d
--- /dev/null
+++ b/roles/wowza/defaults/main.yml
@@ -0,0 +1,72 @@
+---
+
+wowza_requirements:
+  - openjdk-11-jre-headless
+  - xz-utils
+
+wowza_version: 4.7.7
+wowza_package_url: https://panel.ubicast.eu/media/storage/WowzaStreamingEngine-{{ wowza_version }}-linux-x64-installer.deb
+wowza_license: "{{ envsetup_wowza_license }}"
+wowza_manager_pwd: "{{ envsetup_wowza_manager_pwd }}"
+wowza_live_pwd: "{{ envsetup_wowza_live_pwd }}"
+wowza_logs_retention: 7
+
+# server
+
+wowza_rest_enabled: false
+wowza_rest_address: localhost
+wowza_rest_port: 8087
+
+wowza_command_enabled: false
+wowza_command_listen: localhost
+wowza_command_port: 8083
+
+wowza_stats_enabled: false
+
+wowza_jmx_enabled: false
+wowza_jmx_address: localhost
+wowza_jmx_rmi_adress: localhost
+wowza_jmx_rmi_conn_port: 8084
+wowza_jmx_rmi_registry_port: 8085
+
+# vhost
+
+wowza_rtmp_enabled: true
+wowza_rtmp_address: "*"
+wowza_rtmp_port: 1935
+
+wowza_rtmps_enabled: false
+wowza_rtmps_address: "*"
+wowza_rtmps_port: 443
+
+wowza_admin_enabled: false
+wowza_admin_address: localhost
+wowza_admin_port: 8086
+
+# manager
+
+wowza_manager_enabled: false
+wowza_manager_address: localhost
+wowza_manager_port: 8088
+
+# proxy
+
+wowza_proxy: "{{ envsetup_proxy_http | d() }}"
+wowza_proxy_host: "{{ wowza_proxy | regex_replace('https?://(?:[\\w_\\-]+:[\\w_\\-]+@)?([\\w_\\-\\.]+)(?::[\\d]+)?/?', '\\1') }}"
+wowza_proxy_port: "{{ wowza_proxy | regex_replace('https?://(?:[\\w_\\-]+:[\\w_\\-]+@)?[\\w_\\-\\.]+(?::([\\d]+))?/?', '\\1') }}"
+wowza_proxy_user: "{{ wowza_proxy | regex_replace('https?://(?:([\\w_\\-]+):[\\w_\\-]+@)?[\\w_\\-\\.]+(?::[\\d]+)?/?', '\\1') }}"
+wowza_proxy_pass: "{{ wowza_proxy | regex_replace('https?://(?:[\\w_\\-]+:([\\w_\\-]+)@)?[\\w_\\-\\.]+(?::[\\d]+)?/?', '\\1') }}"
+
+# firewall
+
+wowza_firewall_enabled: true
+wowza_ferm_rules_filename: wowza
+wowza_ferm_input_rules:
+  - proto:
+      - tcp
+    dport:
+      - 1935
+wowza_ferm_output_rules: []
+wowza_ferm_global_settings:
+
+...
diff --git a/roles/wowza/handlers/main.yml b/roles/wowza/handlers/main.yml
new file mode 100644
index 00000000..b9e545cb
--- /dev/null
+++ b/roles/wowza/handlers/main.yml
@@ -0,0 +1,13 @@
+---
+
+- name: restart wowza
+  systemd:
+    name: WowzaStreamingEngine
+    state: restarted
+
+- name: restart wowza manager
+  systemd:
+    name: WowzaStreamingEngineManager
+    state: restarted
+
+...
diff --git a/roles/wowza/tasks/main.yml b/roles/wowza/tasks/main.yml
new file mode 100644
index 00000000..e01a168e
--- /dev/null
+++ b/roles/wowza/tasks/main.yml
@@ -0,0 +1,122 @@
+---
+
+- name: install wowza requirements
+  apt:
+    force_apt_get: true
+    name: "{{ wowza_requirements }}"
+    state: "present"
+
+- name: install wowza
+  apt:
+    force_apt_get: true
+    install_recommends: false
+    deb: "{{ wowza_package_url }}"
+    state: present
+
+- name: fix wowza init scripts
+  loop:
+    - /etc/init.d/WowzaStreamingEngine
+    - /etc/init.d/WowzaStreamingEngineManager
+  replace:
+    path: "{{ item }}"
+    regexp: '^#### BEGIN INIT INFO$'
+    replace: '### BEGIN INIT INFO'
+
+- name: enable wowza service
+  systemd:
+    name: WowzaStreamingEngine
+    enabled: true
+
+- name: disable wowza manager service
+  when: not wowza_manager_enabled
+  systemd:
+    name: WowzaStreamingEngineManager
+    enabled: "{{ wowza_manager_enabled }}"
+    state: stopped
+
+- name: configure wowza license
+  notify: restart wowza
+  copy:
+    dest: /usr/local/WowzaStreamingEngine/conf/Server.license
+    content: "{{ wowza_license }}"
+
+- name: configure wowza manager password
+  when: wowza_manager_enabled
+  notify: restart wowza manager
+  copy:
+    dest: /usr/local/WowzaStreamingEngine/conf/admin.password
+    content: ubicast {{ wowza_manager_pwd }} admin
+
+- name: fix permissions on wowza logs dir
+  notify: restart wowza
+  file:
+    path: /usr/local/WowzaStreamingEngine/logs
+    mode: 0755
+    state: directory
+
+- name: ensure live application directory exists
+  file:
+    path: /usr/local/WowzaStreamingEngine/applications/live
+    state: directory
+
+- name: configure wowza live application
+  notify: restart wowza
+  template:
+    src: live-application.xml.j2
+    dest: /usr/local/WowzaStreamingEngine/conf/live/Application.xml
+
+- name: configure wowza tunning
+  notify: restart wowza
+  template:
+    src: Tune.xml.j2
+    dest: /usr/local/WowzaStreamingEngine/conf/Tune.xml
+
+- name: configure wowza server
+  notify: restart wowza
+  template:
+    src: Server.xml.j2
+    dest: /usr/local/WowzaStreamingEngine/conf/Server.xml
+
+- name: configure wowza vhost
+  notify: restart wowza
+  template:
+    src: VHost.xml.j2
+    dest: /usr/local/WowzaStreamingEngine/conf/VHost.xml
+
+- name: set wowza manager listening address
+  when: wowza_manager_enabled
+  notify: restart wowza manager
+  replace:
+    path: /usr/local/WowzaStreamingEngine/manager/bin/startmgr.sh
+    regexp: 'war --httpPort'
+    replace: 'war --httpListenAddress={{ wowza_manager_address }} --httpPort'
+
+- name: set wowza manager listening port
+  when: wowza_manager_enabled
+  notify: restart wowza manager
+  replace:
+    path: /usr/local/WowzaStreamingEngine/manager/bin/startmgr.sh
+    regexp: '--httpPort=\d+'
+    replace: '--httpPort={{ wowza_manager_port }}'
+
+- name: logs cleanup cron
+  copy:
+    dest: /etc/cron.d/wowza-logs-cleanup
+    mode: 0755
+    content: |
+      #!/bin/bash
+      find /usr/local/WowzaStreamingEngine/logs/ -type f -mtime +{{ wowza_logs_retention }} -delete
+
+# FIREWALL
+
+- name: firewall
+  when: wowza_firewall_enabled
+  vars:
+    ferm_rules_filename: "{{ wowza_ferm_rules_filename }}"
+    ferm_input_rules: "{{ wowza_ferm_input_rules }}"
+    ferm_output_rules: "{{ wowza_ferm_output_rules }}"
+    ferm_global_settings: "{{ wowza_ferm_global_settings }}"
+  include_role:
+    name: ferm
+
+...
diff --git a/roles/wowza/templates/Server.xml.j2 b/roles/wowza/templates/Server.xml.j2
new file mode 100644
index 00000000..d61e197a
--- /dev/null
+++ b/roles/wowza/templates/Server.xml.j2
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Root version="2">
+  <Server>
+    <Name>Wowza Streaming Engine</Name>
+    <Description>Wowza Streaming Engine is robust, customizable, and scalable server software that powers reliable streaming of high-quality video and audio to any device, anywhere.</Description>
+    {% if wowza_rest_enabled %}
+    <RESTInterface>
+      <Enable>{{ wowza_rest_enabled }}</Enable>
+      <IPAddress>{{ wowza_rest_address }}</IPAddress>
+      <Port>{{ wowza_rest_port }}</Port>
+      <!-- none, basic, digest, remotehttp, digestfile -->
+      <AuthenticationMethod>digest</AuthenticationMethod>
+      <DiagnosticURLEnable>true</DiagnosticURLEnable>
+      <IPWhiteList>127.0.0.1</IPWhiteList>
+      <IPBlackList></IPBlackList>
+      <EnableXMLFile>false</EnableXMLFile>
+      <DocumentationServerEnable>false</DocumentationServerEnable>
+      <DocumentationServerPort>8089</DocumentationServerPort>
+      <!-- none, basic, digest, remotehttp, digestfile -->
+      <DocumentationServerAuthenticationMethod>digest</DocumentationServerAuthenticationMethod>
+      <Properties>
+      </Properties>
+    </RESTInterface>
+    {% endif %}
+    {% if wowza_command_enabled %}
+    <CommandInterface>
+      <HostPort>
+        <ProcessorCount>${com.wowza.wms.TuningAuto}</ProcessorCount>
+        <IpAddress>{{ wowza_command_address }}</IpAddress>
+        <Port>{{ wowza_command_port }}</Port>
+      </HostPort>
+    </CommandInterface>
+    {% endif %}
+    <Stats>
+      <Enable>{{ wowza_stats_enabled }}</Enable>
+    </Stats>
+    {% if wowza_jmx_enabled %}
+    <AdminInterface>
+      <!-- Objects exposed through JMX interface: Server, VHost, VHostItem, Application, ApplicationInstance, MediaCaster, Module, Client, MediaStream, SharedObject, Acceptor, IdleWorker -->
+      <ObjectList>Server,VHost,VHostItem,Application,ApplicationInstance,MediaCaster,Module,IdleWorker</ObjectList>
+    </AdminInterface>
+    <!-- JMXUrl: service:jmx:rmi://localhost:8084/jndi/rmi://localhost:8085/jmxrmi -->
+    <JMXRemoteConfiguration>
+      <Enable>{{ wowza_jmx_enabled }}</Enable>
+      <IpAddress>{{ wowza_jms_address }}</IpAddress> <!-- set to localhost or internal ip address if behind NAT -->
+      <RMIServerHostName>{{ wowza_jmx_rmi_hostname }}</RMIServerHostName> <!-- set to external ip address or domain name if behind NAT -->
+      <RMIConnectionPort>{{ wowza_jmx_rmi_conn_port }}</RMIConnectionPort>
+      <RMIRegistryPort>{{ wowza_jmx_rmi_registry_port }}</RMIRegistryPort>
+      <Authenticate>true</Authenticate>
+      <PasswordFile>${com.wowza.wms.ConfigHome}/conf/jmxremote.password</PasswordFile>
+      <AccessFile>${com.wowza.wms.ConfigHome}/conf/jmxremote.access</AccessFile>
+      <SSLSecure>false</SSLSecure>
+    </JMXRemoteConfiguration>
+    {% endif %}
+    <UserAgents>Shockwave Flash|CFNetwork|MacNetwork/1.0 (Macintosh)</UserAgents>
+    <Streams>
+      <DefaultStreamPrefix>mp4</DefaultStreamPrefix>
+    </Streams>
+    <ServerListeners>
+      <ServerListener>
+        <BaseClass>com.wowza.wms.mediacache.impl.MediaCacheServerListener</BaseClass>
+      </ServerListener>
+    </ServerListeners>
+    <VHostListeners>
+    </VHostListeners>
+    <HandlerThreadPool>
+      <PoolSize>${com.wowza.wms.TuningAuto}</PoolSize>
+    </HandlerThreadPool>
+    <TransportThreadPool>
+      <PoolSize>${com.wowza.wms.TuningAuto}</PoolSize>
+    </TransportThreadPool>
+    <RTP>
+      <DatagramStartingPort>6970</DatagramStartingPort>
+      <DatagramPortSharing>false</DatagramPortSharing>
+    </RTP>
+    <Manager>
+      <!-- Properties defined are used by the Manager -->
+      <Properties>
+      </Properties>
+    </Manager>
+    <Transcoder>
+      <PluginPaths>
+        <QuickSync></QuickSync>
+      </PluginPaths>
+    </Transcoder>
+    <!-- Properties defined here will be added to the IServer.getProperties() collection -->
+    <Properties>
+      {% if wowza_proxy != "" %}
+      <Property>
+        <Name>licenseServerProxyAddress</Name>
+        <Value>{{ wowza_proxy_host }}</Value>
+      </Property>
+      <Property>
+        <Name>licenseServerProxyPort</Name>
+        <Value>{{ wowza_proxy_port }}</Value>
+        <Type>Integer</Type>
+      </Property>
+      {% if wowza_proxy_user != "" %}
+      <Property>
+        <Name>licenseServerProxyUsername</Name>
+        <Value>{{ wowza_proxy_user }}</Value>
+      </Property>
+      {% endif %}
+      {% if wowza_proxy_pass != "" %}
+      <Property>
+        <Name>licenseServerProxyPassword</Name>
+        <Value>{{ wowza_proxy_pass }}</Value>
+      </Property>
+      {% endif %}
+      {% endif %}
+    </Properties>
+  </Server>
+</Root>
diff --git a/roles/wowza/templates/Tune.xml.j2 b/roles/wowza/templates/Tune.xml.j2
new file mode 100644
index 00000000..330810c2
--- /dev/null
+++ b/roles/wowza/templates/Tune.xml.j2
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Root>
+	<Tune>
+		<HeapSize>2000M</HeapSize>
+		<GarbageCollector>${com.wowza.wms.TuningGarbageCollectorG1Default}</GarbageCollector>
+		<VMOptions>
+			<VMOption>-server</VMOption>
+			<VMOption>-Djava.net.preferIPv4Stack=false</VMOption>
+		</VMOptions>
+	</Tune>
+</Root>
diff --git a/roles/wowza/templates/VHost.xml.j2 b/roles/wowza/templates/VHost.xml.j2
new file mode 100644
index 00000000..0f1bd197
--- /dev/null
+++ b/roles/wowza/templates/VHost.xml.j2
@@ -0,0 +1,338 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Root version="2">
+	<VHost>
+		<Description></Description>
+		<HostPortList>
+
+      {% if wowza_rtmp_enabled %}
+			<!-- RTMP HostPort -->
+			<HostPort>
+				<Name>Default Streaming</Name>
+				<Type>Streaming</Type>
+				<ProcessorCount>${com.wowza.wms.TuningAuto}</ProcessorCount>
+        <IpAddress>{{ wowza_rtmp_address }}</IpAddress>
+        <Port>{{ wowza_rtmp_port }}</Port>
+				<HTTPIdent2Response></HTTPIdent2Response>
+				<SocketConfiguration>
+					<ReuseAddress>true</ReuseAddress>
+					<ReceiveBufferSize>65000</ReceiveBufferSize>
+					<ReadBufferSize>65000</ReadBufferSize>
+					<SendBufferSize>65000</SendBufferSize>
+					<KeepAlive>true</KeepAlive>
+					<AcceptorBackLog>100</AcceptorBackLog>
+				</SocketConfiguration>
+				<HTTPStreamerAdapterIDs>cupertinostreaming,smoothstreaming,sanjosestreaming,dvrchunkstreaming,mpegdashstreaming</HTTPStreamerAdapterIDs>
+				<HTTPProviders>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPCrossdomain</BaseClass>
+						<RequestFilters>*crossdomain.xml</RequestFilters>
+						<AuthenticationMethod>none</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPClientAccessPolicy</BaseClass>
+						<RequestFilters>*clientaccesspolicy.xml</RequestFilters>
+						<AuthenticationMethod>none</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPProviderMediaList</BaseClass>
+						<RequestFilters>*jwplayer.rss|*jwplayer.smil|*medialist.smil|*manifest-rtmp.f4m</RequestFilters>
+						<AuthenticationMethod>none</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPServerVersion</BaseClass>
+						<RequestFilters>*</RequestFilters>
+						<AuthenticationMethod>none</AuthenticationMethod>
+					</HTTPProvider>
+				</HTTPProviders>
+			</HostPort>
+      {% endif %}
+
+      {% if wowza_rtmps_enabled %}
+			<!-- 443 with SSL -->
+			<HostPort>
+				<Name>Default SSL Streaming</Name>
+				<Type>Streaming</Type>
+				<ProcessorCount>${com.wowza.wms.TuningAuto}</ProcessorCount>
+        <IpAddress>{{ wowza_rtmps_address }}</IpAddress>
+        <Port>{{ wowza_rtmps_port }}</Port>
+				<HTTPIdent2Response></HTTPIdent2Response>
+				<SSLConfig>
+					<KeyStorePath>${com.wowza.wms.context.VHostConfigHome}/conf/keystore.jks</KeyStorePath>
+					<KeyStorePassword>[password]</KeyStorePassword>
+					<KeyStoreType>JKS</KeyStoreType>
+					<DomainToKeyStoreMapPath></DomainToKeyStoreMapPath>
+					<SSLProtocol>TLS</SSLProtocol>
+					<Algorithm>SunX509</Algorithm>
+					<CipherSuites></CipherSuites>
+					<Protocols></Protocols>
+				</SSLConfig>
+				<SocketConfiguration>
+					<ReuseAddress>true</ReuseAddress>
+					<ReceiveBufferSize>65000</ReceiveBufferSize>
+					<ReadBufferSize>65000</ReadBufferSize>
+					<SendBufferSize>65000</SendBufferSize>
+					<KeepAlive>true</KeepAlive>
+					<AcceptorBackLog>100</AcceptorBackLog>
+				</SocketConfiguration>
+				<HTTPStreamerAdapterIDs>cupertinostreaming,smoothstreaming,sanjosestreaming,dvrchunkstreaming,mpegdashstreaming</HTTPStreamerAdapterIDs>
+				<HTTPProviders>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPCrossdomain</BaseClass>
+						<RequestFilters>*crossdomain.xml</RequestFilters>
+						<AuthenticationMethod>none</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPClientAccessPolicy</BaseClass>
+						<RequestFilters>*clientaccesspolicy.xml</RequestFilters>
+						<AuthenticationMethod>none</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPProviderMediaList</BaseClass>
+						<RequestFilters>*jwplayer.rss|*jwplayer.smil|*medialist.smil|*manifest-rtmp.f4m</RequestFilters>
+						<AuthenticationMethod>none</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPServerVersion</BaseClass>
+						<RequestFilters>*</RequestFilters>
+						<AuthenticationMethod>none</AuthenticationMethod>
+					</HTTPProvider>
+				</HTTPProviders>
+			</HostPort>
+      {% endif %}
+
+      {% if wowza_admin_enabled %}
+			<!-- Admin HostPort -->
+			<HostPort>
+				<Name>Default Admin</Name>
+				<Type>Admin</Type>
+				<ProcessorCount>${com.wowza.wms.TuningAuto}</ProcessorCount>
+        <IpAddress>{{ wowza_admin_address }}</IpAddress>
+        <Port>{{ wowza_admin_port }}</Port>
+				<HTTPIdent2Response></HTTPIdent2Response>
+				<SocketConfiguration>
+					<ReuseAddress>true</ReuseAddress>
+					<ReceiveBufferSize>16000</ReceiveBufferSize>
+					<ReadBufferSize>16000</ReadBufferSize>
+					<SendBufferSize>16000</SendBufferSize>
+					<KeepAlive>true</KeepAlive>
+					<AcceptorBackLog>100</AcceptorBackLog>
+				</SocketConfiguration>
+				<HTTPStreamerAdapterIDs></HTTPStreamerAdapterIDs>
+				<HTTPProviders>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.streammanager.HTTPStreamManager</BaseClass>
+						<RequestFilters>streammanager*</RequestFilters>
+						<AuthenticationMethod>admin-digest</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPServerInfoXML</BaseClass>
+						<RequestFilters>serverinfo*</RequestFilters>
+						<AuthenticationMethod>admin-digest</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPConnectionInfo</BaseClass>
+						<RequestFilters>connectioninfo*</RequestFilters>
+						<AuthenticationMethod>admin-digest</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPConnectionCountsXML</BaseClass>
+						<RequestFilters>connectioncounts*</RequestFilters>
+						<AuthenticationMethod>admin-digest</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.transcoder.httpprovider.HTTPTranscoderThumbnail</BaseClass>
+						<RequestFilters>transcoderthumbnail*</RequestFilters>
+						<AuthenticationMethod>admin-digest</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPProviderMediaList</BaseClass>
+						<RequestFilters>medialist*</RequestFilters>
+						<AuthenticationMethod>admin-digest</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.livestreamrecord.http.HTTPLiveStreamRecord</BaseClass>
+						<RequestFilters>livestreamrecord*</RequestFilters>
+						<AuthenticationMethod>admin-digest</AuthenticationMethod>
+					</HTTPProvider>
+					<HTTPProvider>
+						<BaseClass>com.wowza.wms.http.HTTPServerVersion</BaseClass>
+						<RequestFilters>*</RequestFilters>
+						<AuthenticationMethod>none</AuthenticationMethod>
+					</HTTPProvider>
+				</HTTPProviders>
+			</HostPort>
+      {% endif %}
+
+		</HostPortList>
+
+		<HTTPStreamerAdapters>
+			<HTTPStreamerAdapter>
+				<ID>smoothstreaming</ID>
+				<Name>smoothstreaming</Name>
+				<Properties>
+				</Properties>
+			</HTTPStreamerAdapter>
+			<HTTPStreamerAdapter>
+				<ID>cupertinostreaming</ID>
+				<Name>cupertinostreaming</Name>
+				<Properties>
+				</Properties>
+			</HTTPStreamerAdapter>
+			<HTTPStreamerAdapter>
+				<ID>sanjosestreaming</ID>
+				<Name>sanjosestreaming</Name>
+				<Properties>
+				</Properties>
+			</HTTPStreamerAdapter>
+			<HTTPStreamerAdapter>
+				<ID>dvrchunkstreaming</ID>
+				<Name>dvrchunkstreaming</Name>
+				<Properties>
+				</Properties>
+			</HTTPStreamerAdapter>
+			<HTTPStreamerAdapter>
+				<ID>mpegdashstreaming</ID>
+				<Name>mpegdashstreaming</Name>
+				<Properties>
+				</Properties>
+			</HTTPStreamerAdapter>
+			<HTTPStreamerAdapter>
+				<ID>tsstreaming</ID>
+				<Name>tsstreaming</Name>
+				<Properties>
+				</Properties>
+			</HTTPStreamerAdapter>
+			<HTTPStreamerAdapter>
+				<ID>webmstreaming</ID>
+				<Name>webmstreaming</Name>
+				<Properties>
+				</Properties>
+			</HTTPStreamerAdapter>
+		</HTTPStreamerAdapters>
+
+		<!-- When set to zero, thread pool configuration is done in Server.xml -->
+		<HandlerThreadPool>
+			<PoolSize>0</PoolSize>
+		</HandlerThreadPool>
+		<TransportThreadPool>
+			<PoolSize>0</PoolSize>
+		</TransportThreadPool>
+		<IdleWorkers>
+			<WorkerCount>${com.wowza.wms.TuningAuto}</WorkerCount>
+			<CheckFrequency>50</CheckFrequency>
+			<MinimumWaitTime>5</MinimumWaitTime>
+		</IdleWorkers>
+		<NetConnections>
+			<ProcessorCount>${com.wowza.wms.TuningAuto}</ProcessorCount>
+			<IdleFrequency>250</IdleFrequency>
+			<SocketConfiguration>
+				<ReuseAddress>true</ReuseAddress>
+				<ReceiveBufferSize>65000</ReceiveBufferSize>
+				<ReadBufferSize>65000</ReadBufferSize>
+				<SendBufferSize>65000</SendBufferSize>
+				<KeepAlive>true</KeepAlive>
+				<AcceptorBackLog>100</AcceptorBackLog>
+			</SocketConfiguration>
+		</NetConnections>
+		<MediaCasters>
+			<ProcessorCount>${com.wowza.wms.TuningAuto}</ProcessorCount>
+			<SocketConfiguration>
+				<ReuseAddress>true</ReuseAddress>
+				<ReceiveBufferSize>65000</ReceiveBufferSize>
+				<ReadBufferSize>65000</ReadBufferSize>
+				<SendBufferSize>65000</SendBufferSize>
+				<KeepAlive>true</KeepAlive>
+				<ConnectionTimeout>10000</ConnectionTimeout>
+			</SocketConfiguration>
+		</MediaCasters>
+		<LiveStreamTranscoders>
+			<MaximumConcurrentTranscodes>0</MaximumConcurrentTranscodes>
+		</LiveStreamTranscoders>
+		<HTTPTunnel>
+			<KeepAliveTimeout>2000</KeepAliveTimeout>
+		</HTTPTunnel>
+		<Client>
+			<ClientTimeout>90000</ClientTimeout>
+			<IdleFrequency>250</IdleFrequency>
+		</Client>
+		<!-- RTP/Authentication/Methods defined in Authentication.xml. Default setup includes; none, basic, digest -->
+		<RTP>
+			<IdleFrequency>75</IdleFrequency>
+			<DatagramConfiguration>
+				<Incoming>
+					<ReuseAddress>true</ReuseAddress>
+					<ReceiveBufferSize>2048000</ReceiveBufferSize>
+					<SendBufferSize>65000</SendBufferSize>
+					<!-- <MulticastBindToAddress>true</MulticastBindToAddress> -->
+					<!-- <MulticastInterfaceAddress>192.168.1.22</MulticastInterfaceAddress> -->
+					<!-- <TrafficClass>0</TrafficClass> -->
+					<MulticastTimeout>50</MulticastTimeout>
+					<DatagramMaximumPacketSize>4096</DatagramMaximumPacketSize>
+				</Incoming>
+				<Outgoing>
+					<ReuseAddress>true</ReuseAddress>
+					<ReceiveBufferSize>65000</ReceiveBufferSize>
+					<SendBufferSize>256000</SendBufferSize>
+					<!-- <MulticastBindToAddress>true</MulticastBindToAddress> -->
+					<!-- <MulticastInterfaceAddress>192.168.1.22</MulticastInterfaceAddress> -->
+					<!-- <TrafficClass>0</TrafficClass> -->
+					<MulticastTimeout>50</MulticastTimeout>
+					<DatagramMaximumPacketSize>4096</DatagramMaximumPacketSize>
+					<SendIGMPJoinMsgOnPublish>false</SendIGMPJoinMsgOnPublish>
+				</Outgoing>
+			</DatagramConfiguration>
+			<UnicastIncoming>
+				<ProcessorCount>${com.wowza.wms.TuningAuto}</ProcessorCount>
+			</UnicastIncoming>
+			<UnicastOutgoing>
+				<ProcessorCount>${com.wowza.wms.TuningAuto}</ProcessorCount>
+			</UnicastOutgoing>
+			<MulticastIncoming>
+				<ProcessorCount>${com.wowza.wms.TuningAuto}</ProcessorCount>
+			</MulticastIncoming>
+			<MulticastOutgoing>
+				<ProcessorCount>${com.wowza.wms.TuningAuto}</ProcessorCount>
+			</MulticastOutgoing>
+		</RTP>
+		<HTTPProvider>
+			<KeepAliveTimeout>2000</KeepAliveTimeout>
+			<KillConnectionTimeout>10000</KillConnectionTimeout>
+			<SlowConnectionBitrate>64000</SlowConnectionBitrate>
+			<IdleFrequency>250</IdleFrequency>
+		</HTTPProvider>
+		<WebSocket>
+			<MaximumMessageSize>512k</MaximumMessageSize>
+			<PacketFragmentationSize>0</PacketFragmentationSize>
+			<MaskOutgoingMessages>false</MaskOutgoingMessages>
+			<IdleFrequency>250</IdleFrequency>
+			<ValidationFrequency>20000</ValidationFrequency>
+			<MaximumPendingWriteBytes>0</MaximumPendingWriteBytes>
+			<PingTimeout>12000</PingTimeout>
+		</WebSocket>
+		<Application>
+			<ApplicationTimeout>60000</ApplicationTimeout>
+			<PingTimeout>12000</PingTimeout>
+			<UnidentifiedSessionTimeout>30000</UnidentifiedSessionTimeout>
+			<ValidationFrequency>20000</ValidationFrequency>
+			<MaximumPendingWriteBytes>0</MaximumPendingWriteBytes>
+			<MaximumSetBufferTime>60000</MaximumSetBufferTime>
+		</Application>
+		<StartStartupStreams>true</StartStartupStreams>
+
+		<Manager>
+			<TestPlayer>
+				<IpAddress>${com.wowza.wms.HostPort.IpAddress}</IpAddress>
+				<Port>${com.wowza.wms.HostPort.FirstStreamingPort}</Port>
+				<SSLEnable>${com.wowza.wms.HostPort.SSLEnable}</SSLEnable>
+			</TestPlayer>
+			<!-- Properties defined are used by the Manager -->
+			<Properties>
+			</Properties>
+		</Manager>
+
+		<!-- Properties defined here will be added to the IVHost.getProperties() collection -->
+		<Properties>
+		</Properties>
+	</VHost>
+</Root>
+
diff --git a/2.Common_services/4.Wowza/live-application.xml b/roles/wowza/templates/live-application.xml.j2
similarity index 61%
rename from 2.Common_services/4.Wowza/live-application.xml
rename to roles/wowza/templates/live-application.xml.j2
index 4ff41aa9..8036b089 100644
--- a/2.Common_services/4.Wowza/live-application.xml
+++ b/roles/wowza/templates/live-application.xml.j2
@@ -4,43 +4,20 @@
 		<Name>live</Name>
 		<AppType>LiveHTTPOrigin</AppType>
 		<Description></Description>
-		<!-- Uncomment to set application level timeout values
-		<ApplicationTimeout>60000</ApplicationTimeout>
-		<PingTimeout>12000</PingTimeout>
-		<ValidationFrequency>8000</ValidationFrequency>
-		<MaximumPendingWriteBytes>0</MaximumPendingWriteBytes>
-		<MaximumSetBufferTime>60000</MaximumSetBufferTime>
-		<MaximumStorageDirDepth>25</MaximumStorageDirDepth>
-		-->
 		<Connections>
 			<AutoAccept>true</AutoAccept>
 			<AllowDomains></AllowDomains>
 		</Connections>
-		<!--
-			StorageDir path variables
-			
-			${com.wowza.wms.AppHome} - Application home directory
-			${com.wowza.wms.ConfigHome} - Configuration home directory
-			${com.wowza.wms.context.VHost} - Virtual host name
-			${com.wowza.wms.context.VHostConfigHome} - Virtual host home directory
-			${com.wowza.wms.context.Application} - Application name
-			${com.wowza.wms.context.ApplicationInstance} - Application instance name
-			
-		-->
 		<Streams>
 			<StreamType>live</StreamType>
 			<StorageDir>${com.wowza.wms.context.VHostConfigHome}/content</StorageDir>
 			<KeyDir>${com.wowza.wms.context.VHostConfigHome}/keys</KeyDir>
-			<!-- LiveStreamPacketizers (separate with commas): cupertinostreamingpacketizer, smoothstreamingpacketizer, sanjosestreamingpacketizer, mpegdashstreamingpacketizer, cupertinostreamingrepeater, smoothstreamingrepeater, sanjosestreamingrepeater, mpegdashstreamingrepeater, dvrstreamingpacketizer, dvrstreamingrepeater -->
 			<LiveStreamPacketizers>cupertinostreamingpacketizer, mpegdashstreamingpacketizer, sanjosestreamingpacketizer, smoothstreamingpacketizer</LiveStreamPacketizers>
-			<!-- Properties defined here will override any properties defined in conf/Streams.xml for any streams types loaded by this application -->
 			<Properties>
 			</Properties>
 		</Streams>
 		<Transcoder>
-			<!-- To turn on transcoder set to: transcoder -->
 			<LiveStreamTranscoder></LiveStreamTranscoder>
-			<!-- [templatename].xml or ${SourceStreamName}.xml -->
 			<Templates>${SourceStreamName}.xml,transrate.xml</Templates>
 			<ProfileDir>${com.wowza.wms.context.VHostConfigHome}/transcoder/profiles</ProfileDir>
 			<TemplateDir>${com.wowza.wms.context.VHostConfigHome}/transcoder/templates</TemplateDir>
@@ -48,33 +25,19 @@
 			</Properties>
 		</Transcoder>
 		<DVR>
-			<!-- As a single server or as an origin, use dvrstreamingpacketizer in LiveStreamPacketizers above -->
-			<!-- Or, in an origin-edge configuration, edges use dvrstreamingrepeater in LiveStreamPacketizers above -->
-			<!-- As an origin, also add dvrchunkstreaming to HTTPStreamers below -->
-			<!-- If this is a dvrstreamingrepeater, define Application/Repeater/OriginURL to point back to the origin -->
-			<!-- To turn on DVR recording set Recorders to dvrrecorder.  This works with dvrstreamingpacketizer  -->
 			<Recorders></Recorders>
-			<!-- As a single server or as an origin, set the Store to dvrfilestorage-->
-			<!-- edges should have this empty -->
 			<Store></Store>
-			<!--  Window Duration is length of live DVR window in seconds.  0 means the window is never trimmed. -->
 			<WindowDuration>0</WindowDuration>
-			<!-- Storage Directory is top level location where dvr is stored.  e.g. c:/temp/dvr -->
 			<StorageDir>${com.wowza.wms.context.VHostConfigHome}/dvr</StorageDir>
-			<!-- valid ArchiveStrategy values are append, version, delete -->
 			<ArchiveStrategy>append</ArchiveStrategy>
-			<!-- Properties for DVR -->
 			<Properties>
 			</Properties>
 		</DVR>
 		<TimedText>
-			<!-- VOD caption providers (separate with commas): vodcaptionprovidermp4_3gpp, vodcaptionproviderttml, vodcaptionproviderwebvtt,  vodcaptionprovidersrt, vodcaptionproviderscc -->
 			<VODTimedTextProviders>vodcaptionprovidermp4_3gpp</VODTimedTextProviders>
-			<!-- Properties for TimedText -->
 			<Properties>
 			</Properties>
 		</TimedText>
-		<!-- HTTPStreamers (separate with commas): cupertinostreaming, smoothstreaming, sanjosestreaming, mpegdashstreaming, dvrchunkstreaming -->
 		<HTTPStreamers>sanjosestreaming, cupertinostreaming, smoothstreaming, mpegdashstreaming</HTTPStreamers>
 		<MediaCache>
 			<MediaCacheSourceList></MediaCacheSourceList>
@@ -94,12 +57,10 @@
 			</Access>
 		</Client>
 		<RTP>
-			<!-- RTP/Authentication/[type]Methods defined in Authentication.xml. Default setup includes; none, basic, digest -->
 			<Authentication>
 				<PublishMethod>digest</PublishMethod>
 				<PlayMethod>none</PlayMethod>
 			</Authentication>
-			<!-- RTP/AVSyncMethod. Valid values are: senderreport, systemclock, rtptimecode -->
 			<AVSyncMethod>senderreport</AVSyncMethod>
 			<MaxRTCPWaitTime>12000</MaxRTCPWaitTime>
 			<IdleFrequency>75</IdleFrequency>
@@ -109,34 +70,28 @@
 			<RTSPConnectionIpAddress>0.0.0.0</RTSPConnectionIpAddress>
 			<RTSPOriginIpAddress>127.0.0.1</RTSPOriginIpAddress>
 			<IncomingDatagramPortRanges>*</IncomingDatagramPortRanges>
-			<!-- Properties defined here will override any properties defined in conf/RTP.xml for any depacketizers loaded by this application -->
 			<Properties>
 			</Properties>
 		</RTP>
 		<MediaCaster>
 			<RTP>
 				<RTSP>
-					<!-- udp, interleave -->
 					<RTPTransportMode>interleave</RTPTransportMode>
 				</RTSP>
 			</RTP>
 			<StreamValidator></StreamValidator>
-			<!-- Properties defined here will override any properties defined in conf/MediaCasters.xml for any MediaCasters loaded by this applications -->
 			<Properties>
 			</Properties>
 		</MediaCaster>
 		<MediaReader>
-			<!-- Properties defined here will override any properties defined in conf/MediaReaders.xml for any MediaReaders loaded by this applications -->
 			<Properties>
 			</Properties>
 		</MediaReader>
 		<MediaWriter>
-			<!-- Properties defined here will override any properties defined in conf/MediaWriter.xml for any MediaWriter loaded by this applications -->
 			<Properties>
 			</Properties>
 		</MediaWriter>
 		<LiveStreamPacketizer>
-			<!-- Properties defined here will override any properties defined in conf/LiveStreamPacketizers.xml for any LiveStreamPacketizers loaded by this applications -->
 			<Properties>
 				<Property>
 					<Name>httpRandomizeMediaName</Name>
@@ -151,7 +106,6 @@
 			</Properties>
 		</LiveStreamPacketizer>
 		<HTTPStreamer>
-			<!-- Properties defined here will override any properties defined in conf/HTTPStreamers.xml for any HTTPStreamer loaded by this applications -->
 			<Properties>
 				<Property>
 					<Name>httpOriginMode</Name>
@@ -211,7 +165,6 @@
 			</Properties>
 		</HTTPStreamer>
 		<Manager>
-			<!-- Properties defined are used by the Manager -->
 			<Properties>
 			</Properties>
 		</Manager>
@@ -250,7 +203,6 @@
 				<Class>com.wowza.wms.security.ModuleSecureURLParams</Class>
 			</Module>
 		</Modules>
-		<!-- Properties defined here will be added to the IApplication.getProperties() and IApplicationInstance.getProperties() collections -->
 		<Properties>
 			<Property>
 				<Name>securityPublishRequirePassword</Name>
@@ -259,7 +211,7 @@
 			</Property>
 			<Property>
 				<Name>secureurlparams.publish</Name>
-				<Value>{{ live_pwd }}.doPublish</Value>
+				<Value>{{ wowza_live_pwd }}.doPublish</Value>
 				<Type>String</Type>
 			</Property>
 		</Properties>
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 64cd4dfa..00000000
--- a/setup.cfg
+++ /dev/null
@@ -1,34 +0,0 @@
-[metadata]
-name = envsetup
-long_description = file: README.md
-long_description_content_type = text/markdown
-
-[options]
-setup_requires=
-    setuptools
-    wheel
-
-[options.extras_require]
-dev =
-    black
-    flake8
-    pylint
-    pysnooper
-
-[flake8]
-exclude =
-    .venv/
-ignore =
-    E501
-    W503
-    W505
-max-line-length = 88
-
-[pylint]
-ignore=
-disable=
-    bad-continuation,
-    invalid-name,
-    missing-docstring,
-    too-few-public-methods,
-    too-many-locals,
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 80e673e6..00000000
--- a/setup.py
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env python3
-
-import setuptools
-
-
-setuptools.setup()
diff --git a/site.yml b/site.yml
new file mode 100755
index 00000000..4979459e
--- /dev/null
+++ b/site.yml
@@ -0,0 +1,68 @@
+#!/usr/bin/env ansible-playbook
+---
+
+- import_playbook: playbooks/includes/python.yml
+  tags:
+    - always
+- import_playbook: playbooks/includes/check_docker.yml
+  tags:
+    - always
+- import_playbook: playbooks/includes/conf.yml
+  tags:
+    - always
+    - conf
+- import_playbook: playbooks/includes/init.yml
+  tags:
+    - init
+- import_playbook: playbooks/includes/base.yml
+  tags:
+    - base
+- import_playbook: playbooks/includes/cluster.yml
+  tags:
+    - cluster
+
+- import_playbook: playbooks/includes/postgres.yml
+  tags:
+    - postgres
+    - monitor
+    - manager
+    - server
+- import_playbook: playbooks/includes/monitor.yml
+  tags:
+    - monitor
+- import_playbook: playbooks/includes/manager.yml
+  tags:
+    - manager
+- import_playbook: playbooks/includes/wowza.yml
+  tags:
+    - wowza
+- import_playbook: playbooks/includes/celerity.yml
+  tags:
+    - celerity
+- import_playbook: playbooks/includes/worker.yml
+  tags:
+    - worker
+- import_playbook: playbooks/includes/server.yml
+  tags:
+    - server
+- import_playbook: playbooks/includes/vault.yml
+  tags:
+    - vault
+- import_playbook: playbooks/includes/import.yml
+  tags:
+    - import
+- import_playbook: playbooks/includes/netcapture.yml
+  tags:
+    - netcapture
+
+- import_playbook: playbooks/includes/certificates.yml
+  tags:
+    - certificates
+    - monitor
+    - manager
+    - server
+- import_playbook: playbooks/includes/network.yml
+  tags:
+    - network
+
+...
diff --git a/tests/test_ssl.py b/tests/test_ssl.py
index 74f1ae04..dc652780 100755
--- a/tests/test_ssl.py
+++ b/tests/test_ssl.py
@@ -74,7 +74,12 @@ def main():
             # if mediaserver (the only cert that is mandatory)
             if setting == conf_servers[0]:
                 failure = True
-        elif remaining < datetime.timedelta(days=14):
+        elif remaining < datetime.timedelta(days=7):
+            u.error("{}: expire in {}".format(server_name, str(remaining)))
+            # if mediaserver (the only cert that is mandatory)
+            if setting == conf_servers[0]:
+                failure = True
+        elif remaining < datetime.timedelta(days=30):
             u.warning("{}: expire in {}".format(server_name, str(remaining)))
             # if mediaserver (the only cert that is mandatory)
             if setting == conf_servers[0]:
-- 
GitLab