diff --git a/.docker/Dockerfile b/.docker/Dockerfile
deleted file mode 100644
index c448f13cb848e701a449285d9dd2a4d1e1d4966e..0000000000000000000000000000000000000000
--- a/.docker/Dockerfile
+++ /dev/null
@@ -1,30 +0,0 @@
-FROM registry.ubicast.net/docker/debian-dev:latest
-
-# avoid warnings by switching to noninteractive
-ENV DEBIAN_FRONTEND noninteractive
-# local pyvenv to avoid conflicts with system
-ENV PYVENV ${HOME}/pyvenv
-# add pyvenv to path
-ENV PATH ${PYVENV}/bin:${PATH}
-
-# copy requirement file
-COPY ansible/requirements.dev.txt .
-
-RUN \
-    # install required tools
-    sudo apt-get update && \
-    sudo apt-get install -y libffi-dev libncurses5 libncursesw5 libssl-dev python3-netaddr && \
-    # create pyvenv + install ansible tools
-    python3 -m venv --system-site-packages ${PYVENV} && \
-    pip install -U pip wheel && \
-    pip install -r requirements.dev.txt && \
-    # clean up
-    sudo apt-get autoremove -y && \
-    sudo apt-get clean -y && \
-    sudo rm -rf /var/lib/apt/lists/* &&  \
-    sudo rm requirements.dev.txt 
-
-# switch back to dialog for any ad-hoc use of apt-get
-ENV DEBIAN_FRONTEND dialog
-ENV HOME /root
-USER root
diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..ec7d6ea61bbab144065fea2547daae86f8cab205
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,17 @@
+# Run flake8 (pycodestyle + pyflakes) check.
+# https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
+# Ignored errors:
+# - E501: line too long
+# - E731: do not assign a lambda expression, use a def
+# - W503: line break before binary operator (deprecated rule)
+# - W505: doc line too long
+
+[flake8]
+
+ignore =
+    E501
+    E265
+    W503
+    W505
+
+exclude = .git,submodules
diff --git a/.githooks/pre-commit b/.githooks/pre-commit
deleted file mode 100755
index b3d9c7b3afd4411e57fd16cc41f27600448b5082..0000000000000000000000000000000000000000
--- a/.githooks/pre-commit
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-
-if git rev-parse --verify HEAD >/dev/null 2>&1; then
-        against=HEAD
-else
-        # Initial commit: diff against an empty tree object
-        against=$(git hash-object -t tree /dev/null)
-fi
-
-# Redirect output to stderr.
-exec 1>&2
-
-# Get changed files
-changed_files=$(git diff-index --name-only ${against})
-
-# Verify files content
-for file in ${changed_files}; do
-
-    if grep -qiE 'skyreach_(system|activation|api)_key' "${file}"; then
-    
-        # verify key
-        key=$(grep -iE 'skyreach_(system|activation|api)_key' "${file}" | grep -woiE '[a-z0-9]{32}')
-        if [ -n "${key}" ]; then
-            echo "Error: you are about to commit a secret key in file: ${file}"
-            echo "Please remove it before committing."
-            echo -
-            grep -iE 'skyreach_(system|activation|api)_key' "${file}" | grep -iE '[a-z0-9]{32}'
-            echo -
-            exit 1
-        fi
-    
-    fi
-
-done
-
-# vim:ft=sh
diff --git a/.gitignore b/.gitignore
index 8e7c09ffd7fdb3a539813ae4ecc40a21d4be75fd..2ce16c59cc286a73bc71b7704a028c9269c21983 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,42 +1,33 @@
-# virtualenv
-.venv/
+*.py[cod]
 
-# python
-__pycache__/
-*.pyc
+# C extensions
+*.so
 
-# ansible
-ansible/inventories/_*
-ansible/inventories/local*/host_vars/localhost.yml
-ansible/inventories/offline*/host_vars/localhost.yml
-ansible/playbooks/_*
-ansible/roles/_*
-ansible/roles/elastic.elasticsearch
+# Packages
+*.egg
+*.egg-info
+dist
+build
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+lib
+lib64
+__pycache__
 
-# logs
-*.log
+# Installer logs
+pip-log.txt
 
-# packer
-packer_cache/
-output/
-packer/*.json
-!packer/example.json
+# Unit test / coverage reports
+.coverage
+.tox
+nosetests.xml
 
-# ide
-.vscode/
-*.code-workspace
-.idea/
-*.sublime-workspace
-*.sublime-project
-
-# secrets
-.env/*
-!.env/_reset
-!.env/_config
-!.env/*example
-!.env/example
-
-# envsetup
-conf*.sh
-auto-generated-conf.sh*
-tests/scripts/ms-testing-suite
+# Mr Developer
+.mr.developer.cfg
+.project
+.pydevproject
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 30d50de340082457ad45725e04ae1bd4f467b36e..f8606bc1492ed8309fe59eb66a4cfdd23868da62 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,69 +1,9 @@
----
-
-default:
-  image: registry.ubicast.net/mediaserver/envsetup
-
-stages:
-  - lint
-  - docker
-  - test-pf-std
-  - test-pf-ha
-  - test-pgsql
-
-# * * * * * * * * * * * * *
-
-lint:verify:
-  stage: lint
-  script:
-    - make lint
-
-# * * * * * * * * * * * * *
-
-# Docker envsetup image build job
-docker:image:
-  image: docker:stable
-  stage: docker
-  rules:
-    # Build docker image for schedule pipelines only
-    - if: '$DOCKER_BUILD == "True"'
+flake8:
+  image: python:3-alpine
+  tags:
+    - docker
   before_script:
-    - apk add bash make
-    - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN registry.ubicast.net
-  script:
-    - make docker-build
-    - make docker-push
-
-# * * * * * * * * * * * * *
-
-.test-template:
-  retry: 2
-  timeout: 4h
-  rules:
-    # Run deployment for schedule pipelines
-    - if: '$CI_PIPELINE_SOURCE == "schedule" && $PF_DEPLOY_TEST == "True"'
-      when: always
-    # Run deployment for manual pipelines
-    - if: '$CI_PIPELINE_SOURCE == "web"'
-      when: always
-    - when: never
-
-test:pf-std:
-  extends: .test-template
-  stage: test-pf-std
-  script:
-    - echo $CI_PIPELINE_SOURCE
-    - make test pf-std=1
-
-test:pf-ha:
-  extends: .test-template
-  stage: test-pf-ha
-  script:
-    - make test pf-ha=1
-
-test:pgsql-ha:
-  extends: .test-template
-  stage: test-pgsql
+    - python -m pip install --upgrade pip
+    - pip3 install flake8
   script:
-    - make test pgsql-ha=1 debug=1
-
-...
+    - flake8 .
diff --git a/.lint/ansible-apt-block-check.sh b/.lint/ansible-apt-block-check.sh
deleted file mode 100755
index 3782cf0e73824183cad314444e00248dedeb0bcd..0000000000000000000000000000000000000000
--- a/.lint/ansible-apt-block-check.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-
-# config
-exclude_pattern=()
-exclude_pattern+=('^./ansible/roles/elastic.elasticsearch')
-
-apt_regex='^[^#]*apt:'
-until_regex='^[^#]*until: apt_status is success'
-
-# * * * 
-
-# go to repository root dir
-cd "$(readlink -f "$(dirname "${0}")")"/..
-
-# join function
-join_by() { local IFS="$1"; shift; echo "$*"; }
-
-# set all *.yml files to an array
-mapfile -t yml_files < <(find . -type f -iname '*.yml' | grep -vE "$(join_by '|' "${exclude_pattern[@]}")")
-
-# check every files
-errors_count=0
-for f in "${yml_files[@]}"; do
-
-    # count apt block
-    apt_block_count=$(grep -c "${apt_regex}" "${f}")
-
-    # test if file contain apt block
-    if (( apt_block_count > 0 )); then
-    
-        # get apt block, count apt: and until:
-        apt_blocks="$(awk -v RS='' "/${apt_regex}/" "${f}")"
-        apt_nb="$(echo "${apt_blocks}" | grep -c "${apt_regex}")"
-        until_nb="$(echo "${apt_blocks}" | grep -c "${until_regex}")"
-
-        # test if apt: and until: count differ
-        if (( apt_nb != until_nb )); then
-            echo "- ${f}"
-            (( errors_count++ ))
-        fi
-    fi
-
-done
-
-if (( errors_count != 0 )); then
-    echo "Files listed below contain incomplete apt blocks"
-    echo "Please refer to this documentation: https://docs.google.com/document/d/1B31l4v6VV_3r_ePPiugI8I_D_oRsUKFVIMIjerV_KvM/edit#heading=h.lm0b49ccpi46"
-    echo
-    exit 1
-else
-    exit 0
-fi
-
diff --git a/.lint/ansible-lint.conf b/.lint/ansible-lint.conf
deleted file mode 100644
index 176d2f7b632a0c3afcb98cc2fb54975c9680da64..0000000000000000000000000000000000000000
--- a/.lint/ansible-lint.conf
+++ /dev/null
@@ -1,14 +0,0 @@
----
-
-exclude_paths:
-  - ansible/playbooks/_*.yml
-  - ansible/roles/_*/
-  - ansible/roles/elastic.elasticsearch/
-
-skip_list:
-  - '701'
-  - '403'
-  - '208' 
-  - '106' 
-
-...
diff --git a/.lint/flake8.conf b/.lint/flake8.conf
deleted file mode 100644
index c3b2ebce8eb299238cb33dbf4b7b2597b2c9cebf..0000000000000000000000000000000000000000
--- a/.lint/flake8.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-[flake8]
-
-ignore =
-    E501
-    E265
-    W503
-    W505
-
-per-file-ignores =
-    ansible/roles/elastic.elasticsearch/*:E713
diff --git a/.lint/yamllint.conf b/.lint/yamllint.conf
deleted file mode 100644
index d707c1d080d3743c2eda6c0e5279c7d1750a8f0c..0000000000000000000000000000000000000000
--- a/.lint/yamllint.conf
+++ /dev/null
@@ -1,29 +0,0 @@
----
-
-extends: default
-
-ignore: |
-  .venv/
-  ansible/roles/elastic.elasticsearch/
-
-rules:
-  braces:
-    min-spaces-inside-empty: 0
-    max-spaces-inside-empty: 0
-    min-spaces-inside: 1
-    max-spaces-inside: 1
-    level: error
-  brackets:
-    min-spaces-inside-empty: 0
-    max-spaces-inside-empty: 0
-    min-spaces-inside: 1
-    max-spaces-inside: 1
-    level: error
-  truthy:
-    level: error
-  line-length: disable
-  indentation:
-    spaces: consistent
-    indent-sequences: true
-    check-multi-line-strings: false
-...
diff --git a/Makefile b/Makefile
deleted file mode 100644
index d3e089d18a5db199625e7c1d2dbfc6fd5984e65d..0000000000000000000000000000000000000000
--- a/Makefile
+++ /dev/null
@@ -1,116 +0,0 @@
-SHELL := /bin/bash
-DOCKER_IMAGE_NAME := registry.ubicast.net/mediaserver/envsetup
-VENV := /tmp/pyvensetup
-ANSIBLE_CONFIG := ansible/ansible.cfg
-PIP_BIN = $(shell command -v $(VENV)/bin/pip3 || command -v pip3 || echo pip3)
-PIP_COMPILE_BIN = $(shell command -v $(VENV)/bin/pip-compile || command -v pip-compile)
-ANSIBLE_BIN = $(shell command -v ansible || command -v $(VENV)/bin/ansible)
-ANSIBLE_PLAYBOOK_BIN = $(shell command -v ansible-playbook || command -v $(VENV)/bin/ansible-playbook)
-ANSIBLE_LINT_BIN = $(shell command -v ansible-lint || command -v $(VENV)/bin/ansible-lint)
-ANSIBLE_GALAXY_BIN = $(shell command -v ansible-galaxy || command -v $(VENV)/bin/ansible-galaxy || echo ansible-galaxy)
-YAMLLINT_BIN = $(shell command -v yamllint || command -v $(VENV)/bin/yamllint)
-FLAKE8_BIN = $(shell command -v flake8 || command -v $(VENV)/bin/flake8)
-
-# molecule tests flags
-ifdef debug
-	MOLECULE_FLAGS += --debug
-endif
-ifdef keep
-	MOLECULE_TEST_FLAGS += --destroy=never
-endif
-ifdef pf-std
-	MOLECULE_TEST_FLAGS += --scenario-name pf-std
-endif
-ifdef pf-ha
-	MOLECULE_TEST_FLAGS += --scenario-name pf-ha
-endif
-ifdef pgsql-ha
-	MOLECULE_TEST_FLAGS += --scenario-name pgsql-ha
-endif
-
-.PHONY: all
-## TARGET: DESCRIPTION: ARGS
-all: help
-
-.PHONY: venv
-## venv: Install python3-venv and create a temporary virtualenv
-venv:
-	-@command -v apt-get >/dev/null && apt-get update && apt-get install -y python3-venv
-	python3 -m venv $(VENV)
-
-## ansible/requirements.txt: Update requirements and their depende.dockeres
-## ansible/requirements.dev.txt: Update development requirements and their depende.dockeres
-%.txt: %.in
-	$(PIP_COMPILE_BIN) -U $^ -o $@
-	chmod 644 $@
-
-.PHONY: install
-## install: Install requirements
-install: venv
-	$(PIP_BIN) install -U pip wheel
-	$(PIP_BIN) install -r ansible/requirements.txt
-
-.PHONY: install-galaxy
-install-galaxy:
-	ANSIBLE_CONFIG=$(ANSIBLE_CONFIG) $(ANSIBLE_GALAXY_BIN) install -r ansible/requirements.yml
-
-.PHONY: install-dev
-## install-dev: Install development requirements
-install-dev: install
-	$(PIP_BIN) install -r ansible/requirements.dev.txt
-	[ -d .git/hooks ] || mkdir .git/hooks
-	ln -sfv .githooks/pre-commit .git/hooks/ || echo "Failed to create pre-commit link"
-
-.PHONY: lint
-## lint: Run linters on the project
-lint: 
-	$(FLAKE8_BIN) --config .lint/flake8.conf
-	$(YAMLLINT_BIN) --config-file .lint/yamllint.conf .
-	ANSIBLE_CONFIG=$(ANSIBLE_CONFIG) $(ANSIBLE_LINT_BIN) -c .lint/ansible-lint.conf ansible/playbooks/site.yml
-	.lint/ansible-apt-block-check.sh
-
-.PHONY: test
-## test: Run development tests on the project : SKYREACH_SYSTEM_KEY=<xxx>, debug=1, keep=1, pf-std=1, pgsql-ha=1
-test:
-	cd ansible; molecule $(MOLECULE_FLAGS) test $(MOLECULE_TEST_FLAGS)
-
-.PHONY: deploy
-## deploy: Run deployment playbooks : i=<inventory-path>, l=<host-or-group>, t=<tag>
-deploy:
-ifndef i
-	$(error i is undefined)
-endif
-ifndef l
-	$(eval l=all)
-endif
-ifndef t
-	$(eval t=all)
-endif
-	ANSIBLE_CONFIG=$(ANSIBLE_CONFIG) $(ANSIBLE_BIN) -i $(i) -l $(l) -m ping all
-	ANSIBLE_CONFIG=$(ANSIBLE_CONFIG) $(ANSIBLE_PLAYBOOK_BIN) -i $(i) ansible/playbooks/site.yml -e conf_update=true -l $(l) -t $(t)
-
-.PHONY: docker-build
-## docker-build: Run docker image build for.docker
-docker-build: docker-pull
-	docker build -t $(DOCKER_IMAGE_NAME) -f .docker/Dockerfile .
-
-.PHONY: docker-rebuild
-## docker-rebuild: Force docker image rebuild
-docker-rebuild:
-	docker build --pull --no-cache -t $(DOCKER_IMAGE_NAME) -f .docker/Dockerfile .
-
-.PHONY: docker-pull
-## docker-pull: Pull Docker image from registry
-docker-pull:
-	-docker pull $(DOCKER_IMAGE_NAME)
-
-.PHONY: docker-push
-## docker-push: Push Docker image to registry
-docker-push:
-	docker push $(DOCKER_IMAGE_NAME)
-
-.PHONY: help
-## help: Print this help message
-help:
-	@echo -e "Usage: \n"
-	@sed -n 's/^##//p' ${MAKEFILE_LIST} | column -t -s ':' | sed -e 's/^/ /'
diff --git a/README.md b/README.md
deleted file mode 100644
index 61575590edf0130a8a1676b9de0513f2f4a09110..0000000000000000000000000000000000000000
--- a/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# EnvSetup
-
-## Usage
-
-To deploy UbiCast products:
-
-- [Install required tools](/doc/requirements.md)
-- [Configure hosts inventory](/doc/config.md)
-- [Deploy UbiCast softwares](/doc/deploy.md)
-
-To benchmark the solution:
-- [MediaServer Benchmark](/doc/bench.md)
-
-## Development
-
-To contribute:
-
-- [EnvSetup contributing guide](/doc/contrib.md)
-
diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg
deleted file mode 100644
index bf5567329f04d3bd48393a7aecb1d3c9c28e3f65..0000000000000000000000000000000000000000
--- a/ansible/ansible.cfg
+++ /dev/null
@@ -1,40 +0,0 @@
-[defaults]
-
-# logging
-log_path = ansible.log
-
-# use python3 by default
-interpreter_python = /usr/bin/python3
-
-# disable output for skipped hosts and tasks
-display_skipped_hosts = false
-# skip ssh host key checking
-host_key_checking = false
-# disable creation of *.retry files when playbook fails
-retry_files_enabled = false
-
-# connect as root on hosts
-remote_user = root
-
-# cutom path for roles
-roles_path = roles
-# custom path for modules
-library = library
-# custom path for action plugins
-action_plugins = plugins/action
-
-# improve output format (with line return)
-stdout_callback = debug
-
-# ignore files directory
-inventory_ignore_patterns = files
-
-[ssh_connection]
-# enable pipelining to speed up ansible execution
-pipelining = True
-
-# add custom ssh options
-ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null
-
-# use scp instead of sftp
-scp_if_ssh = true
diff --git a/ansible/inventories/example-ha/group_vars/all.yml b/ansible/inventories/example-ha/group_vars/all.yml
deleted file mode 100644
index a771f99c707c4c1ff9afd08f301e97adecb8b28a..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/group_vars/all.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-
-# customer name
-customer_short_name: customer
-
-# enable letsencrypt certificate
-letsencrypt_enabled: false
-
-# update conf.sh
-conf_update: false
-
-# repmgr configuration
-repmgr_password: my top secret repmgr default password
-repmgr_primary_node: "{{ hostvars['pg1']['ansible_default_ipv4']['address'] }}"
-
-# ha proxy configuration
-hap_config_listen:
-  - name: pgsql-primary
-    content: |2
-      bind localhost:54321
-      default-server inter 2s fall 3 rise 2 on-marked-down shutdown-sessions
-      option tcp-check
-      tcp-check expect string primary
-      maxconn 500
-      server pg1 192.168.122.1:5432 maxconn 500 check port 8543
-      server pg2 192.168.122.2:5432 maxconn 500 check port 8543 backup
-
-...
diff --git a/ansible/inventories/example-ha/group_vars/mediavault.yml b/ansible/inventories/example-ha/group_vars/mediavault.yml
deleted file mode 100644
index 8b1432adaf983602e7a56f14bbcf806eb729c295..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/group_vars/mediavault.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-
-# mediavault backup deployement
-# used by mediavault/add_backup.yml
-
-#mvt_base_dir: /backup
-#mvt_backups:
-#  - name: self-etc
-#    source: "/etc"
-#    dest: "{{ mvt_base_dir }}/self-etc"
-#  - name: data
-#    source: /data
-#    dest: "{{ mvt_base_dir }}/data"
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/cs1.yml b/ansible/inventories/example-ha/host_vars/cs1.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/cs1.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/mi1.yml b/ansible/inventories/example-ha/host_vars/mi1.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/mi1.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/mm1.yml b/ansible/inventories/example-ha/host_vars/mm1.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/mm1.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/mo1.yml b/ansible/inventories/example-ha/host_vars/mo1.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/mo1.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/ms1.yml b/ansible/inventories/example-ha/host_vars/ms1.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/ms1.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/ms2.yml b/ansible/inventories/example-ha/host_vars/ms2.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/ms2.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/mv1.yml b/ansible/inventories/example-ha/host_vars/mv1.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/mv1.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/mw1.yml b/ansible/inventories/example-ha/host_vars/mw1.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/mw1.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/mw2.yml b/ansible/inventories/example-ha/host_vars/mw2.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/mw2.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/pg1.yml b/ansible/inventories/example-ha/host_vars/pg1.yml
deleted file mode 100644
index 99e0f90874b1e01a9f179d19aa668c6f2bb07a9b..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/pg1.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-db_role: primary
-repmgr_node_id: 1
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/pg2.yml b/ansible/inventories/example-ha/host_vars/pg2.yml
deleted file mode 100644
index 240d32eaf861708cf8e7b7c3eafa030b8a5e3033..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/pg2.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-db_role: standby
-repmgr_node_id: 2
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/pg3.yml b/ansible/inventories/example-ha/host_vars/pg3.yml
deleted file mode 100644
index f703de213c54ce12c2b76e8be6b03b926e19aeee..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/pg3.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-db_role: witness
-repmgr_node_id: 3
-
-...
diff --git a/ansible/inventories/example-ha/host_vars/ws1.yml b/ansible/inventories/example-ha/host_vars/ws1.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/host_vars/ws1.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example-ha/hosts b/ansible/inventories/example-ha/hosts
deleted file mode 100644
index fafead6a6b726b0ef55a2ee43bc4636f8514e67e..0000000000000000000000000000000000000000
--- a/ansible/inventories/example-ha/hosts
+++ /dev/null
@@ -1,68 +0,0 @@
-; For hosts parameters see:
-; https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#connecting-to-hosts-behavioral-inventory-parameters
-
-; hosts list
-
-ms1 ansible_host=10.0.0.1
-ms2 ansible_host=10.0.0.2
-mw1 ansible_host=10.0.0.3
-mw2 ansible_host=10.0.0.4
-pg1 ansible_host=192.168.122.1
-pg2 ansible_host=192.168.122.2
-pg3 ansible_host=192.168.122.3
-
-; groups list and their members
-
-[msmonitor]
-ms1
-
-[postgres]
-pg1
-pg2
-pg3
-
-[mirismanager]
-ms1
-
-[mediaserver]
-ms1
-ms2
-
-[live]
-ms1
-ms2
-
-[celerity]
-ms1
-
-[mediaworker]
-mw1
-mw2
-
-[mediaimport]
-ms1
-
-[msmonitor]
-ms1
-
-[munin_server]
-ms1
-
-[munin_node]
-ms1
-ms2
-mw1
-mw2
-pg1
-pg2
-pg3
-
-[mediavault]
-
-[netcapture]
-
-[bench_server]
-
-[bench_worker]
-
-; vim:ft=dosini
diff --git a/ansible/inventories/example/group_vars/all.yml b/ansible/inventories/example/group_vars/all.yml
deleted file mode 100644
index fd64295010da14e414f90e362424e61cd2dba0de..0000000000000000000000000000000000000000
--- a/ansible/inventories/example/group_vars/all.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-
-# customer name
-customer_short_name: customer
-
-# enable letsencrypt certificate
-letsencrypt_enabled: false
-
-# update conf.sh
-conf_update: false
-
-...
diff --git a/ansible/inventories/example/group_vars/mediavault.yml b/ansible/inventories/example/group_vars/mediavault.yml
deleted file mode 100644
index 8b1432adaf983602e7a56f14bbcf806eb729c295..0000000000000000000000000000000000000000
--- a/ansible/inventories/example/group_vars/mediavault.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-
-# mediavault backup deployement
-# used by mediavault/add_backup.yml
-
-#mvt_base_dir: /backup
-#mvt_backups:
-#  - name: self-etc
-#    source: "/etc"
-#    dest: "{{ mvt_base_dir }}/self-etc"
-#  - name: data
-#    source: /data
-#    dest: "{{ mvt_base_dir }}/data"
-
-...
diff --git a/ansible/inventories/example/host_vars/mymediaserver.yml b/ansible/inventories/example/host_vars/mymediaserver.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example/host_vars/mymediaserver.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example/host_vars/mymediavault.yml b/ansible/inventories/example/host_vars/mymediavault.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example/host_vars/mymediavault.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example/host_vars/mymediaworker.yml b/ansible/inventories/example/host_vars/mymediaworker.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example/host_vars/mymediaworker.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example/host_vars/mynetcapture.yml b/ansible/inventories/example/host_vars/mynetcapture.yml
deleted file mode 100644
index 607026068ab1e5851f9a163629fae52ad5a53787..0000000000000000000000000000000000000000
--- a/ansible/inventories/example/host_vars/mynetcapture.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-skyreach_system_key: changeme
-
-...
diff --git a/ansible/inventories/example/hosts b/ansible/inventories/example/hosts
deleted file mode 100644
index 929a7b322b6f712164b21aff258a4efdb0c5e254..0000000000000000000000000000000000000000
--- a/ansible/inventories/example/hosts
+++ /dev/null
@@ -1,50 +0,0 @@
-; For hosts parameters see:
-; https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#connecting-to-hosts-behavioral-inventory-parameters
-
-; hosts list
-
-mymediaserver ansible_host=10.0.0.1
-mymediaworker ansible_host=10.0.0.2
-
-; groups list and their members
-
-[postgres]
-mymediaserver
-
-[mirismanager]
-mymediaserver
-
-[mediaserver]
-mymediaserver
-
-[live]
-mymediaserver
-
-[celerity]
-mymediaserver
-
-[mediaworker]
-mymediaworker
-
-[mediaimport]
-mymediaserver
-
-[msmonitor]
-mymediaserver
-
-[munin_server]
-mymediaserver
-
-[munin_node]
-mymediaserver
-mymediaworker
-
-[mediavault]
-
-[netcapture]
-
-[bench_server]
-
-[bench_worker]
-
-; vim:ft=dosini
diff --git a/ansible/inventories/local-full/host_vars/localhost.dist.yml b/ansible/inventories/local-full/host_vars/localhost.dist.yml
deleted file mode 100644
index a2fceb242fae619a3f8b0b4ce40a8360e568eaab..0000000000000000000000000000000000000000
--- a/ansible/inventories/local-full/host_vars/localhost.dist.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-
-# customer name
-customer_short_name: customer
-
-# enable letsencrypt certificate
-letsencrypt_enabled: false
-
-# auto update conf.sh
-conf_update: false
-
-# activation keys
-skyreach_system_key:
-skyreach_activation_key:
-
-...
diff --git a/ansible/inventories/local-full/hosts b/ansible/inventories/local-full/hosts
deleted file mode 100644
index 2eadb1159ecf06213c3f3b3c015ef36fc3b479dc..0000000000000000000000000000000000000000
--- a/ansible/inventories/local-full/hosts
+++ /dev/null
@@ -1,33 +0,0 @@
-localhost ansible_connection=local
-
-[postgres]
-localhost
-
-[mirismanager]
-localhost
-
-[mediaserver]
-localhost
-
-[live]
-localhost
-
-[celerity]
-localhost
-
-[mediaworker]
-localhost
-
-[mediaimport]
-localhost
-
-[msmonitor]
-localhost
-
-[munin_server]
-localhost
-
-[munin_node]
-localhost
-
-; vim:ft=dosini
diff --git a/ansible/inventories/local-mediaimport/host_vars/localhost.dist.yml b/ansible/inventories/local-mediaimport/host_vars/localhost.dist.yml
deleted file mode 100644
index acc1ed0a4725fb9b4d82ffc6bb48471499d52646..0000000000000000000000000000000000000000
--- a/ansible/inventories/local-mediaimport/host_vars/localhost.dist.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-# activation keys
-skyreach_system_key:
-skyreach_activation_key:
-
-...
diff --git a/ansible/inventories/local-mediaimport/hosts b/ansible/inventories/local-mediaimport/hosts
deleted file mode 100644
index 91de9635c10c8181b5e109ccefefdc12b82c7ad1..0000000000000000000000000000000000000000
--- a/ansible/inventories/local-mediaimport/hosts
+++ /dev/null
@@ -1,6 +0,0 @@
-localhost ansible_connection=local
-
-[mediaimport]
-localhost
-
-; vim:ft=dosini
diff --git a/ansible/inventories/local-mediaserver/host_vars/localhost.dist.yml b/ansible/inventories/local-mediaserver/host_vars/localhost.dist.yml
deleted file mode 100644
index a2fceb242fae619a3f8b0b4ce40a8360e568eaab..0000000000000000000000000000000000000000
--- a/ansible/inventories/local-mediaserver/host_vars/localhost.dist.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-
-# customer name
-customer_short_name: customer
-
-# enable letsencrypt certificate
-letsencrypt_enabled: false
-
-# auto update conf.sh
-conf_update: false
-
-# activation keys
-skyreach_system_key:
-skyreach_activation_key:
-
-...
diff --git a/ansible/inventories/local-mediaserver/hosts b/ansible/inventories/local-mediaserver/hosts
deleted file mode 100644
index a04974401f23b5b313f88bcb4f97b537208746d3..0000000000000000000000000000000000000000
--- a/ansible/inventories/local-mediaserver/hosts
+++ /dev/null
@@ -1,30 +0,0 @@
-localhost ansible_connection=local
-
-[postgres]
-localhost
-
-[mirismanager]
-localhost
-
-[mediaserver]
-localhost
-
-[live]
-localhost
-
-[celerity]
-localhost
-
-[mediaimport]
-localhost
-
-[msmonitor]
-localhost
-
-[munin_server]
-localhost
-
-[munin_node]
-localhost
-
-; vim:ft=dosini
diff --git a/ansible/inventories/local-mediavault/host_vars/localhost.dist.yml b/ansible/inventories/local-mediavault/host_vars/localhost.dist.yml
deleted file mode 100644
index d3e5920e6d785a0b78e30060d02dca673653439c..0000000000000000000000000000000000000000
--- a/ansible/inventories/local-mediavault/host_vars/localhost.dist.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-skyreach_system_key:
-skyreach_activation_key:
-
-...
diff --git a/ansible/inventories/local-mediavault/hosts b/ansible/inventories/local-mediavault/hosts
deleted file mode 100644
index 6dfe3095fa42adfea48681a0ff68a008aeba0bad..0000000000000000000000000000000000000000
--- a/ansible/inventories/local-mediavault/hosts
+++ /dev/null
@@ -1,6 +0,0 @@
-localhost ansible_connection=local
-
-[mediavault]
-localhost
-
-; vim:ft=dosini
diff --git a/ansible/inventories/local-mediaworker/host_vars/localhost.dist.yml b/ansible/inventories/local-mediaworker/host_vars/localhost.dist.yml
deleted file mode 100644
index d3e5920e6d785a0b78e30060d02dca673653439c..0000000000000000000000000000000000000000
--- a/ansible/inventories/local-mediaworker/host_vars/localhost.dist.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-skyreach_system_key:
-skyreach_activation_key:
-
-...
diff --git a/ansible/inventories/local-mediaworker/hosts b/ansible/inventories/local-mediaworker/hosts
deleted file mode 100644
index 4b3a22ad384791913472451b37df48fcacba6d7b..0000000000000000000000000000000000000000
--- a/ansible/inventories/local-mediaworker/hosts
+++ /dev/null
@@ -1,6 +0,0 @@
-localhost ansible_connection=local
-
-[mediaworker]
-localhost
-
-; vim:ft=dosini
diff --git a/ansible/inventories/offline-mediaserver/host_vars/localhost.dist.yml b/ansible/inventories/offline-mediaserver/host_vars/localhost.dist.yml
deleted file mode 100644
index 8e7d14d73903a6cebbef5b274a5ee2233e5f8c9d..0000000000000000000000000000000000000000
--- a/ansible/inventories/offline-mediaserver/host_vars/localhost.dist.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-
-# customer name
-customer_short_name: customer
-
-# install in offline environment
-offline_mode: true
-
-...
diff --git a/ansible/inventories/offline-mediaserver/hosts b/ansible/inventories/offline-mediaserver/hosts
deleted file mode 100644
index bfbca5e0ca8cce7cd3947dddda4bc0b5e4d1c68e..0000000000000000000000000000000000000000
--- a/ansible/inventories/offline-mediaserver/hosts
+++ /dev/null
@@ -1,31 +0,0 @@
-localhost ansible_connection=local
-
-[postgres]
-localhost
-
-[mirismanager]
-localhost
-
-[mediaserver]
-localhost
-
-;[live]
-;localhost
-
-[celerity]
-localhost
-
-[mediaimport]
-localhost
-
-[msmonitor]
-localhost
-
-[munin_server]
-localhost
-
-[munin_node]
-localhost
-
-
-; vim:ft=dosini
diff --git a/ansible/inventories/offline-mediaworker/host_vars/localhost.dist.yml b/ansible/inventories/offline-mediaworker/host_vars/localhost.dist.yml
deleted file mode 100644
index f54433c0b32d94b04392fec656542b6fe02446d6..0000000000000000000000000000000000000000
--- a/ansible/inventories/offline-mediaworker/host_vars/localhost.dist.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-# install in offline environment
-offline_mode: true
-
-...
diff --git a/ansible/inventories/offline-mediaworker/hosts b/ansible/inventories/offline-mediaworker/hosts
deleted file mode 100644
index 4b3a22ad384791913472451b37df48fcacba6d7b..0000000000000000000000000000000000000000
--- a/ansible/inventories/offline-mediaworker/hosts
+++ /dev/null
@@ -1,6 +0,0 @@
-localhost ansible_connection=local
-
-[mediaworker]
-localhost
-
-; vim:ft=dosini
diff --git a/ansible/library/nmcli.py b/ansible/library/nmcli.py
deleted file mode 100644
index 01f285ac45250f44fbe2713f95ab6c0515dcadb4..0000000000000000000000000000000000000000
--- a/ansible/library/nmcli.py
+++ /dev/null
@@ -1,1572 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-import traceback
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
-    'metadata_version': '1.1',
-    'status': ['preview'],
-    'supported_by': 'community'
-}
-
-DOCUMENTATION = '''
----
-module: nmcli
-author:
-- Chris Long (@alcamie101)
-short_description: Manage Networking
-requirements:
-- dbus
-- NetworkManager-libnm (or NetworkManager-glib on older systems)
-- nmcli
-version_added: "2.0"
-description:
-    - Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.
-    - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager-nmlib,
-      libsemanage-python, policycoreutils-python.'
-    - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-glib,
-      libnm-qt-devel.x86_64, nm-connection-editor.x86_64, libsemanage-python, policycoreutils-python.'
-    - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager,
-      python-dbus (or python3-dbus, depending on the Python version in use), libnm-dev.'
-    - 'On older Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager,
-      python-dbus (or python3-dbus, depending on the Python version in use), libnm-glib-dev.'
-options:
-    state:
-        description:
-            - Whether the device should exist or not, taking action if the state is different from what is stated.
-        type: str
-        required: true
-        choices: [ absent, present ]
-    autoconnect:
-        description:
-            - Whether the connection should start on boot.
-            - Whether the connection profile can be automatically activated
-        type: bool
-        default: yes
-    activate:
-        description:
-            - Whether the connection should should be activate.
-        type: bool
-        default: yes
-    conn_name:
-        description:
-            - 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: <type>[-<ifname>][-<num>]'
-        type: str
-        required: true
-    ifname:
-        description:
-            - The interface to bind the connection to.
-            - The connection will only be applicable to this interface name.
-            - A special value of C('*') can be used for interface-independent connections.
-            - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
-            - This parameter defaults to C(conn_name) when left unset.
-        type: str
-    type:
-        description:
-            - This is the type of device or network connection that you wish to create or modify.
-            - Type C(generic) is added in Ansible 2.5.
-        type: str
-        choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, ipip, sit, team, team-slave, vlan, vxlan ]
-    mode:
-        description:
-            - This is the type of device or network connection that you wish to create for a bond, team or bridge.
-        type: str
-        choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ]
-        default: balance-rr
-    master:
-        description:
-            - Master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
-        type: str
-    ip4:
-        description:
-            - The IPv4 address to this interface.
-            - Use the format C(192.0.2.24/24).
-        type: str
-    gw4:
-        description:
-            - The IPv4 gateway for this interface.
-            - Use the format C(192.0.2.1).
-        type: str
-    dns4:
-        description:
-            - A list of up to 3 dns servers.
-            - IPv4 format e.g. to add two IPv4 DNS server addresses, use C(192.0.2.53 198.51.100.53).
-        type: list
-    dns4_search:
-        description:
-            - A list of DNS search domains.
-        type: list
-        version_added: '2.5'
-    ip6:
-        description:
-            - The IPv6 address to this interface.
-            - Use the format C(abbe::cafe).
-        type: str
-    gw6:
-        description:
-            - The IPv6 gateway for this interface.
-            - Use the format C(2001:db8::1).
-        type: str
-    dns6:
-        description:
-            - A list of up to 3 dns servers.
-            - IPv6 format e.g. to add two IPv6 DNS server addresses, use C(2001:4860:4860::8888 2001:4860:4860::8844).
-        type: list
-    dns6_search:
-        description:
-            - A list of DNS search domains.
-        type: list
-        version_added: '2.5'
-    mtu:
-        description:
-            - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
-            - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
-            - This parameter defaults to C(1500) when unset.
-        type: int
-    dhcp_client_id:
-        description:
-            - DHCP Client Identifier sent to the DHCP server.
-        type: str
-        version_added: "2.5"
-    primary:
-        description:
-            - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'.
-        type: str
-    miimon:
-        description:
-            - This is only used with bond - miimon.
-            - This parameter defaults to C(100) when unset.
-        type: int
-    downdelay:
-        description:
-            - This is only used with bond - downdelay.
-        type: int
-    updelay:
-        description:
-            - This is only used with bond - updelay.
-        type: int
-    arp_interval:
-        description:
-            - This is only used with bond - ARP interval.
-        type: int
-    arp_ip_target:
-        description:
-            - This is only used with bond - ARP IP target.
-        type: str
-    stp:
-        description:
-            - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge.
-        type: bool
-        default: yes
-    priority:
-        description:
-            - This is only used with 'bridge' - sets STP priority.
-        type: int
-        default: 128
-    forwarddelay:
-        description:
-            - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds.
-        type: int
-        default: 15
-    hellotime:
-        description:
-            - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds.
-        type: int
-        default: 2
-    maxage:
-        description:
-            - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds.
-        type: int
-        default: 20
-    ageingtime:
-        description:
-            - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds.
-        type: int
-        default: 300
-    mac:
-        description:
-            - This is only used with bridge - MAC address of the bridge.
-            - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel.
-    slavepriority:
-        description:
-            - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave.
-        type: int
-        default: 32
-    path_cost:
-        description:
-            - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave.
-        type: int
-        default: 100
-    hairpin:
-        description:
-            - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
-              frame was received on.
-        type: bool
-        default: yes
-    vlanid:
-        description:
-            - This is only used with VLAN - VLAN ID in range <0-4095>.
-        type: int
-    vlandev:
-        description:
-            - This is only used with VLAN - parent device this VLAN is on, can use ifname.
-        type: str
-    flags:
-        description:
-            - This is only used with VLAN - flags.
-        type: str
-    ingress:
-        description:
-            - This is only used with VLAN - VLAN ingress priority mapping.
-        type: str
-    egress:
-        description:
-            - This is only used with VLAN - VLAN egress priority mapping.
-        type: str
-    vxlan_id:
-        description:
-            - This is only used with VXLAN - VXLAN ID.
-        type: int
-        version_added: "2.8"
-    vxlan_remote:
-       description:
-            - This is only used with VXLAN - VXLAN destination IP address.
-       type: str
-       version_added: "2.8"
-    vxlan_local:
-       description:
-            - This is only used with VXLAN - VXLAN local IP address.
-       type: str
-       version_added: "2.8"
-    ip_tunnel_dev:
-        description:
-            - This is used with IPIP/SIT - parent device this IPIP/SIT tunnel, can use ifname.
-        type: str
-        version_added: "2.8"
-    ip_tunnel_remote:
-       description:
-            - This is used with IPIP/SIT - IPIP/SIT destination IP address.
-       type: str
-       version_added: "2.8"
-    ip_tunnel_local:
-       description:
-            - This is used with IPIP/SIT - IPIP/SIT local IP address.
-       type: str
-       version_added: "2.8"
-'''
-
-EXAMPLES = '''
-# These examples are using the following inventory:
-#
-# ## Directory layout:
-#
-# |_/inventory/cloud-hosts
-# |           /group_vars/openstack-stage.yml
-# |           /host_vars/controller-01.openstack.host.com
-# |           /host_vars/controller-02.openstack.host.com
-# |_/playbook/library/nmcli.py
-# |          /playbook-add.yml
-# |          /playbook-del.yml
-# ```
-#
-# ## inventory examples
-# ### groups_vars
-# ```yml
-# ---
-# #devops_os_define_network
-# storage_gw: "192.0.2.254"
-# external_gw: "198.51.100.254"
-# tenant_gw: "203.0.113.254"
-#
-# #Team vars
-# nmcli_team:
-#   - conn_name: tenant
-#     ip4: '{{ tenant_ip }}'
-#     gw4: '{{ tenant_gw }}'
-#   - conn_name: external
-#     ip4: '{{ external_ip }}'
-#     gw4: '{{ external_gw }}'
-#   - conn_name: storage
-#     ip4: '{{ storage_ip }}'
-#     gw4: '{{ storage_gw }}'
-# nmcli_team_slave:
-#   - conn_name: em1
-#     ifname: em1
-#     master: tenant
-#   - conn_name: em2
-#     ifname: em2
-#     master: tenant
-#   - conn_name: p2p1
-#     ifname: p2p1
-#     master: storage
-#   - conn_name: p2p2
-#     ifname: p2p2
-#     master: external
-#
-# #bond vars
-# nmcli_bond:
-#   - conn_name: tenant
-#     ip4: '{{ tenant_ip }}'
-#     gw4: ''
-#     mode: balance-rr
-#   - conn_name: external
-#     ip4: '{{ external_ip }}'
-#     gw4: ''
-#     mode: balance-rr
-#   - conn_name: storage
-#     ip4: '{{ storage_ip }}'
-#     gw4: '{{ storage_gw }}'
-#     mode: balance-rr
-# nmcli_bond_slave:
-#   - conn_name: em1
-#     ifname: em1
-#     master: tenant
-#   - conn_name: em2
-#     ifname: em2
-#     master: tenant
-#   - conn_name: p2p1
-#     ifname: p2p1
-#     master: storage
-#   - conn_name: p2p2
-#     ifname: p2p2
-#     master: external
-#
-# #ethernet vars
-# nmcli_ethernet:
-#   - conn_name: em1
-#     ifname: em1
-#     ip4: '{{ tenant_ip }}'
-#     gw4: '{{ tenant_gw }}'
-#   - conn_name: em2
-#     ifname: em2
-#     ip4: '{{ tenant_ip1 }}'
-#     gw4: '{{ tenant_gw }}'
-#   - conn_name: p2p1
-#     ifname: p2p1
-#     ip4: '{{ storage_ip }}'
-#     gw4: '{{ storage_gw }}'
-#   - conn_name: p2p2
-#     ifname: p2p2
-#     ip4: '{{ external_ip }}'
-#     gw4: '{{ external_gw }}'
-# ```
-#
-# ### host_vars
-# ```yml
-# ---
-# storage_ip: "192.0.2.91/23"
-# external_ip: "198.51.100.23/21"
-# tenant_ip: "203.0.113.77/23"
-# ```
-
-
-
-## playbook-add.yml example
-
----
-- hosts: openstack-stage
-  remote_user: root
-  tasks:
-
-  - name: install needed network manager libs
-    package:
-      name:
-        - NetworkManager-libnm
-        - nm-connection-editor
-        - libsemanage-python
-        - policycoreutils-python
-      state: present
-
-##### Working with all cloud nodes - Teaming
-  - name: Try nmcli add team - conn_name only & ip4 gw4
-    nmcli:
-      type: team
-      conn_name: '{{ item.conn_name }}'
-      ip4: '{{ item.ip4 }}'
-      gw4: '{{ item.gw4 }}'
-      state: present
-    with_items:
-      - '{{ nmcli_team }}'
-
-  - name: Try nmcli add teams-slave
-    nmcli:
-      type: team-slave
-      conn_name: '{{ item.conn_name }}'
-      ifname: '{{ item.ifname }}'
-      master: '{{ item.master }}'
-      state: present
-    with_items:
-      - '{{ nmcli_team_slave }}'
-
-###### Working with all cloud nodes - Bonding
-  - name: Try nmcli add bond - conn_name only & ip4 gw4 mode
-    nmcli:
-      type: bond
-      conn_name: '{{ item.conn_name }}'
-      ip4: '{{ item.ip4 }}'
-      gw4: '{{ item.gw4 }}'
-      mode: '{{ item.mode }}'
-      state: present
-    with_items:
-      - '{{ nmcli_bond }}'
-
-  - name: Try nmcli add bond-slave
-    nmcli:
-      type: bond-slave
-      conn_name: '{{ item.conn_name }}'
-      ifname: '{{ item.ifname }}'
-      master: '{{ item.master }}'
-      state: present
-    with_items:
-      - '{{ nmcli_bond_slave }}'
-
-##### Working with all cloud nodes - Ethernet
-  - name: Try nmcli add Ethernet - conn_name only & ip4 gw4
-    nmcli:
-      type: ethernet
-      conn_name: '{{ item.conn_name }}'
-      ip4: '{{ item.ip4 }}'
-      gw4: '{{ item.gw4 }}'
-      state: present
-    with_items:
-      - '{{ nmcli_ethernet }}'
-
-## playbook-del.yml example
-- hosts: openstack-stage
-  remote_user: root
-  tasks:
-
-  - name: Try nmcli del team - multiple
-    nmcli:
-      conn_name: '{{ item.conn_name }}'
-      state: absent
-    with_items:
-      - conn_name: em1
-      - conn_name: em2
-      - conn_name: p1p1
-      - conn_name: p1p2
-      - conn_name: p2p1
-      - conn_name: p2p2
-      - conn_name: tenant
-      - conn_name: storage
-      - conn_name: external
-      - conn_name: team-em1
-      - conn_name: team-em2
-      - conn_name: team-p1p1
-      - conn_name: team-p1p2
-      - conn_name: team-p2p1
-      - conn_name: team-p2p2
-
-  - name: Add an Ethernet connection with static IP configuration
-    nmcli:
-    conn_name: my-eth1
-    ifname: eth1
-    type: ethernet
-    ip4: 192.0.2.100/24
-    gw4: 192.0.2.1
-    state: present
-
-  - name: Add an Team connection with static IP configuration
-    nmcli:
-      conn_name: my-team1
-      ifname: my-team1
-      type: team
-      ip4: 192.0.2.100/24
-      gw4: 192.0.2.1
-      state: present
-      autoconnect: yes
-
-  - name: Optionally, at the same time specify IPv6 addresses for the device
-    nmcli:
-      conn_name: my-eth1
-      ifname: eth1
-      type: ethernet
-      ip4: 192.0.2.100/24
-      gw4: 192.0.2.1
-      ip6: 2001:db8::cafe
-      gw6: 2001:db8::1
-      state: present
-
-  - name: Add two IPv4 DNS server addresses
-    nmcli:
-      conn_name: my-eth1
-      type: ethernet
-      dns4:
-      - 192.0.2.53
-      - 198.51.100.53
-      state: present
-
-  - name: Make a profile usable for all compatible Ethernet interfaces
-    nmcli:
-      ctype: ethernet
-      name: my-eth1
-      ifname: '*'
-      state: present
-
-  - name: Change the property of a setting e.g. MTU
-    nmcli:
-      conn_name: my-eth1
-      mtu: 9000
-      type: ethernet
-      state: present
-
-  - name: Add VxLan
-    nmcli:
-      type: vxlan
-      conn_name: vxlan_test1
-      vxlan_id: 16
-      vxlan_local: 192.168.1.2
-      vxlan_remote: 192.168.1.5
-
-  - name: Add ipip
-    nmcli:
-      type: ipip
-      conn_name: ipip_test1
-      ip_tunnel_dev: eth0
-      ip_tunnel_local: 192.168.1.2
-      ip_tunnel_remote: 192.168.1.5
-
-  - name: Add sit
-    nmcli:
-      type: sit
-      conn_name: sit_test1
-      ip_tunnel_dev: eth0
-      ip_tunnel_local: 192.168.1.2
-      ip_tunnel_remote: 192.168.1.5
-
-# nmcli exits with status 0 if it succeeds and exits with a status greater
-# than zero when there is a failure. The following list of status codes may be
-# returned:
-#
-#     - 0 Success - indicates the operation succeeded
-#     - 1 Unknown or unspecified error
-#     - 2 Invalid user input, wrong nmcli invocation
-#     - 3 Timeout expired (see --wait option)
-#     - 4 Connection activation failed
-#     - 5 Connection deactivation failed
-#     - 6 Disconnecting device failed
-#     - 7 Connection deletion failed
-#     - 8 NetworkManager is not running
-#     - 9 nmcli and NetworkManager versions mismatch
-#     - 10 Connection, device, or access point does not exist.
-'''
-
-RETURN = r"""#
-"""
-
-DBUS_IMP_ERR = None
-try:
-    import dbus
-    HAVE_DBUS = True
-except ImportError:
-    DBUS_IMP_ERR = traceback.format_exc()
-    HAVE_DBUS = False
-
-NM_CLIENT_IMP_ERR = None
-HAVE_NM_CLIENT = True
-try:
-    import gi
-    gi.require_version('NM', '1.0')
-    # from gi.repository import NM
-except (ImportError, ValueError):
-    try:
-        import gi
-        gi.require_version('NMClient', '1.0')
-        gi.require_version('NetworkManager', '1.0')
-        # from gi.repository import NetworkManager, NMClient
-    except (ImportError, ValueError):
-        NM_CLIENT_IMP_ERR = traceback.format_exc()
-        HAVE_NM_CLIENT = False
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib  # noqa: E402
-from ansible.module_utils._text import to_native  # noqa: E402
-
-
-class Nmcli(object):
-    """
-    This is the generic nmcli manipulation class that is subclassed based on platform.
-    A subclass may wish to override the following action methods:-
-            - create_connection()
-            - delete_connection()
-            - modify_connection()
-            - show_connection()
-            - up_connection()
-            - down_connection()
-    All subclasses MUST define platform and distribution (which may be None).
-    """
-
-    platform = 'Generic'
-    distribution = None
-    if HAVE_DBUS:
-        bus = dbus.SystemBus()
-    # The following is going to be used in dbus code
-    DEVTYPES = {
-        1: "Ethernet",
-        2: "Wi-Fi",
-        5: "Bluetooth",
-        6: "OLPC",
-        7: "WiMAX",
-        8: "Modem",
-        9: "InfiniBand",
-        10: "Bond",
-        11: "VLAN",
-        12: "ADSL",
-        13: "Bridge",
-        14: "Generic",
-        15: "Team",
-        16: "VxLan",
-        17: "ipip",
-        18: "sit",
-    }
-    STATES = {
-        0: "Unknown",
-        10: "Unmanaged",
-        20: "Unavailable",
-        30: "Disconnected",
-        40: "Prepare",
-        50: "Config",
-        60: "Need Auth",
-        70: "IP Config",
-        80: "IP Check",
-        90: "Secondaries",
-        100: "Activated",
-        110: "Deactivating",
-        120: "Failed"
-    }
-
-    def __init__(self, module):
-        self.module = module
-        self.state = module.params['state']
-        self.autoconnect = module.params['autoconnect']
-        self.activate = module.params['activate']
-        self.conn_name = module.params['conn_name']
-        self.master = module.params['master']
-        self.ifname = module.params['ifname']
-        self.type = module.params['type']
-        self.ip4 = module.params['ip4']
-        self.gw4 = module.params['gw4']
-        self.dns4 = ' '.join(module.params['dns4']) if module.params.get('dns4') else None
-        self.dns4_search = ' '.join(module.params['dns4_search']) if module.params.get('dns4_search') else None
-        self.ip6 = module.params['ip6']
-        self.gw6 = module.params['gw6']
-        self.dns6 = ' '.join(module.params['dns6']) if module.params.get('dns6') else None
-        self.dns6_search = ' '.join(module.params['dns6_search']) if module.params.get('dns6_search') else None
-        self.mtu = module.params['mtu']
-        self.stp = module.params['stp']
-        self.priority = module.params['priority']
-        self.mode = module.params['mode']
-        self.miimon = module.params['miimon']
-        self.primary = module.params['primary']
-        self.downdelay = module.params['downdelay']
-        self.updelay = module.params['updelay']
-        self.arp_interval = module.params['arp_interval']
-        self.arp_ip_target = module.params['arp_ip_target']
-        self.slavepriority = module.params['slavepriority']
-        self.forwarddelay = module.params['forwarddelay']
-        self.hellotime = module.params['hellotime']
-        self.maxage = module.params['maxage']
-        self.ageingtime = module.params['ageingtime']
-        self.hairpin = module.params['hairpin']
-        self.path_cost = module.params['path_cost']
-        self.mac = module.params['mac']
-        self.vlanid = module.params['vlanid']
-        self.vlandev = module.params['vlandev']
-        self.flags = module.params['flags']
-        self.ingress = module.params['ingress']
-        self.egress = module.params['egress']
-        self.vxlan_id = module.params['vxlan_id']
-        self.vxlan_local = module.params['vxlan_local']
-        self.vxlan_remote = module.params['vxlan_remote']
-        self.ip_tunnel_dev = module.params['ip_tunnel_dev']
-        self.ip_tunnel_local = module.params['ip_tunnel_local']
-        self.ip_tunnel_remote = module.params['ip_tunnel_remote']
-        self.nmcli_bin = self.module.get_bin_path('nmcli', True)
-        self.dhcp_client_id = module.params['dhcp_client_id']
-
-    def execute_command(self, cmd, use_unsafe_shell=False, data=None):
-        return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
-
-    def merge_secrets(self, proxy, config, setting_name):
-        try:
-            # returns a dict of dicts mapping name::setting, where setting is a dict
-            # mapping key::value.  Each member of the 'setting' dict is a secret
-            secrets = proxy.GetSecrets(setting_name)
-
-            # Copy the secrets into our connection config
-            for setting in secrets:
-                for key in secrets[setting]:
-                    config[setting_name][key] = secrets[setting][key]
-        except Exception:
-            pass
-
-    def dict_to_string(self, d):
-        # Try to trivially translate a dictionary's elements into nice string
-        # formatting.
-        dstr = ""
-        for key in d:
-            val = d[key]
-            str_val = ""
-            add_string = True
-            if isinstance(val, dbus.Array):
-                for elt in val:
-                    if isinstance(elt, dbus.Byte):
-                        str_val += "%s " % int(elt)
-                    elif isinstance(elt, dbus.String):
-                        str_val += "%s" % elt
-            elif isinstance(val, dbus.Dictionary):
-                dstr += self.dict_to_string(val)
-                add_string = False
-            else:
-                str_val = val
-            if add_string:
-                dstr += "%s: %s\n" % (key, str_val)
-        return dstr
-
-    def connection_to_string(self, config):
-        # dump a connection configuration to use in list_connection_info
-        setting_list = []
-        for setting_name in config:
-            setting_list.append(self.dict_to_string(config[setting_name]))
-        return setting_list
-
-    @staticmethod
-    def bool_to_string(boolean):
-        if boolean:
-            return "yes"
-        else:
-            return "no"
-
-    def list_connection_info(self):
-        # Ask the settings service for the list of connections it provides
-        bus = dbus.SystemBus()
-
-        service_name = "org.freedesktop.NetworkManager"
-        settings = None
-        try:
-            proxy = bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings")
-            settings = dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings")
-        except dbus.exceptions.DBusException as e:
-            self.module.fail_json(msg="Unable to read Network Manager settings from DBus system bus: %s" % to_native(e),
-                                  details="Please check if NetworkManager is installed and"
-                                          " service network-manager is started.")
-        connection_paths = settings.ListConnections()
-        connection_list = []
-        # List each connection's name, UUID, and type
-        for path in connection_paths:
-            con_proxy = bus.get_object(service_name, path)
-            settings_connection = dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection")
-            config = settings_connection.GetSettings()
-
-            # Now get secrets too; we grab the secrets for each type of connection
-            # (since there isn't a "get all secrets" call because most of the time
-            # you only need 'wifi' secrets or '802.1x' secrets, not everything) and
-            # merge that into the configuration data - To use at a later stage
-            self.merge_secrets(settings_connection, config, '802-11-wireless')
-            self.merge_secrets(settings_connection, config, '802-11-wireless-security')
-            self.merge_secrets(settings_connection, config, '802-1x')
-            self.merge_secrets(settings_connection, config, 'gsm')
-            self.merge_secrets(settings_connection, config, 'cdma')
-            self.merge_secrets(settings_connection, config, 'ppp')
-
-            # Get the details of the 'connection' setting
-            s_con = config['connection']
-            connection_list.append(s_con['id'])
-            connection_list.append(s_con['uuid'])
-            connection_list.append(s_con['type'])
-            connection_list.append(self.connection_to_string(config))
-        return connection_list
-
-    def connection_exists(self):
-        # we are going to use name and type in this instance to find if that connection exists and is of type x
-        connections = self.list_connection_info()
-
-        for con_item in connections:
-            if self.conn_name == con_item:
-                return True
-
-    def down_connection(self):
-        cmd = [self.nmcli_bin, 'con', 'down', self.conn_name]
-        return self.execute_command(cmd)
-
-    def up_connection(self):
-        cmd = [self.nmcli_bin, 'con', 'up', self.conn_name]
-        return self.execute_command(cmd)
-
-    def create_connection_team(self):
-        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'team', 'con-name']
-        # format for creating team interface
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-
-        options = {
-            'ipv4.address': self.ip4,
-            'ipv4.gateway': self.gw4,
-            'ipv6.address': self.ip6,
-            'ipv6.gateway': self.gw6,
-            'autoconnect': self.bool_to_string(self.autoconnect),
-            'ipv4.dns-search': self.dns4_search,
-            'ipv6.dns-search': self.dns6_search,
-            'ipv4.dhcp-client-id': self.dhcp_client_id,
-        }
-
-        for key, value in options.items():
-            if value is not None:
-                cmd.extend([key, value])
-
-        return cmd
-
-    def modify_connection_team(self):
-        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name]
-        options = {
-            'ipv4.address': self.ip4,
-            'ipv4.gateway': self.gw4,
-            'ipv4.dns': self.dns4,
-            'ipv6.address': self.ip6,
-            'ipv6.gateway': self.gw6,
-            'ipv6.dns': self.dns6,
-            'autoconnect': self.bool_to_string(self.autoconnect),
-            'ipv4.dns-search': self.dns4_search,
-            'ipv6.dns-search': self.dns6_search,
-            'ipv4.dhcp-client-id': self.dhcp_client_id,
-        }
-
-        for key, value in options.items():
-            if value is not None:
-                cmd.extend([key, value])
-
-        return cmd
-
-    def create_connection_team_slave(self):
-        cmd = [self.nmcli_bin, 'connection', 'add', 'type', self.type, 'con-name']
-        # format for creating team-slave interface
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-        cmd.append('master')
-        if self.conn_name is not None:
-            cmd.append(self.master)
-        return cmd
-
-    def modify_connection_team_slave(self):
-        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name, 'connection.master', self.master]
-        # format for modifying team-slave interface
-        if self.mtu is not None:
-            cmd.append('802-3-ethernet.mtu')
-            cmd.append(self.mtu)
-        return cmd
-
-    def create_connection_bond(self):
-        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'bond', 'con-name']
-        # format for creating bond interface
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-        options = {
-            'mode': self.mode,
-            'ipv4.address': self.ip4,
-            'ipv4.gateway': self.gw4,
-            'ipv6.address': self.ip6,
-            'ipv6.gateway': self.gw6,
-            'autoconnect': self.bool_to_string(self.autoconnect),
-            'ipv4.dns-search': self.dns4_search,
-            'ipv6.dns-search': self.dns6_search,
-            'miimon': self.miimon,
-            'downdelay': self.downdelay,
-            'updelay': self.updelay,
-            'arp-interval': self.arp_interval,
-            'arp-ip-target': self.arp_ip_target,
-            'primary': self.primary,
-            'ipv4.dhcp-client-id': self.dhcp_client_id,
-        }
-
-        for key, value in options.items():
-            if value is not None:
-                cmd.extend([key, value])
-        return cmd
-
-    def modify_connection_bond(self):
-        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name]
-        # format for modifying bond interface
-
-        options = {
-            'ipv4.address': self.ip4,
-            'ipv4.gateway': self.gw4,
-            'ipv4.dns': self.dns4,
-            'ipv6.address': self.ip6,
-            'ipv6.gateway': self.gw6,
-            'ipv6.dns': self.dns6,
-            'autoconnect': self.bool_to_string(self.autoconnect),
-            'ipv4.dns-search': self.dns4_search,
-            'ipv6.dns-search': self.dns6_search,
-            'miimon': self.miimon,
-            'downdelay': self.downdelay,
-            'updelay': self.updelay,
-            'arp-interval': self.arp_interval,
-            'arp-ip-target': self.arp_ip_target,
-            'ipv4.dhcp-client-id': self.dhcp_client_id,
-        }
-
-        for key, value in options.items():
-            if value is not None:
-                cmd.extend([key, value])
-
-        return cmd
-
-    def create_connection_bond_slave(self):
-        cmd = [self.nmcli_bin, 'connection', 'add', 'type', 'bond-slave', 'con-name']
-        # format for creating bond-slave interface
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-        cmd.append('master')
-        if self.conn_name is not None:
-            cmd.append(self.master)
-        return cmd
-
-    def modify_connection_bond_slave(self):
-        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name, 'connection.master', self.master]
-        # format for modifying bond-slave interface
-        return cmd
-
-    def create_connection_ethernet(self, conn_type='ethernet'):
-        # format for creating ethernet interface
-        # To add an Ethernet connection with static IP configuration, issue a command as follows
-        # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
-        # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
-        cmd = [self.nmcli_bin, 'con', 'add', 'type']
-        if conn_type == 'ethernet':
-            cmd.append('ethernet')
-        elif conn_type == 'generic':
-            cmd.append('generic')
-        cmd.append('con-name')
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-
-        options = {
-            'ipv4.address': self.ip4,
-            'ipv4.gateway': self.gw4,
-            'ipv6.address': self.ip6,
-            'ipv6.gateway': self.gw6,
-            'autoconnect': self.bool_to_string(self.autoconnect),
-            'ipv4.dns-search': self.dns4_search,
-            'ipv6.dns-search': self.dns6_search,
-            'ipv4.dhcp-client-id': self.dhcp_client_id,
-        }
-
-        for key, value in options.items():
-            if value is not None:
-                cmd.extend([key, value])
-
-        return cmd
-
-    def modify_connection_ethernet(self, conn_type='ethernet'):
-        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name]
-        # format for modifying ethernet interface
-        # To modify an Ethernet connection with static IP configuration, issue a command as follows
-        # - nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
-        # nmcli con mod con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
-        options = {
-            'ipv4.address': self.ip4,
-            'ipv4.gateway': self.gw4,
-            'ipv4.dns': self.dns4,
-            'ipv6.address': self.ip6,
-            'ipv6.gateway': self.gw6,
-            'ipv6.dns': self.dns6,
-            'autoconnect': self.bool_to_string(self.autoconnect),
-            'ipv4.dns-search': self.dns4_search,
-            'ipv6.dns-search': self.dns6_search,
-            '802-3-ethernet.mtu': self.mtu,
-            'ipv4.dhcp-client-id': self.dhcp_client_id,
-        }
-
-        for key, value in options.items():
-            if value is not None:
-                if key == '802-3-ethernet.mtu' and conn_type != 'ethernet':
-                    continue
-                cmd.extend([key, value])
-
-        return cmd
-
-    def create_connection_bridge(self):
-        # format for creating bridge interface
-        # To add an Bridge connection with static IP configuration, issue a command as follows
-        # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=bridge ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
-        # nmcli con add con-name my-eth1 ifname eth1 type bridge ip4 192.0.2.100/24 gw4 192.0.2.1
-        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'bridge', 'con-name']
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-
-        options = {
-            'ip4': self.ip4,
-            'gw4': self.gw4,
-            'ip6': self.ip6,
-            'gw6': self.gw6,
-            'autoconnect': self.bool_to_string(self.autoconnect),
-            'bridge.ageing-time': self.ageingtime,
-            'bridge.forward-delay': self.forwarddelay,
-            'bridge.hello-time': self.hellotime,
-            'bridge.mac-address': self.mac,
-            'bridge.max-age': self.maxage,
-            'bridge.priority': self.priority,
-            'bridge.stp': self.bool_to_string(self.stp)
-        }
-
-        for key, value in options.items():
-            if value is not None:
-                cmd.extend([key, value])
-
-        return cmd
-
-    def modify_connection_bridge(self):
-        # format for modifying bridge interface
-        # To add an Bridge connection with static IP configuration, issue a command as follows
-        # - nmcli: name=mod conn_name=my-eth1 ifname=eth1 type=bridge ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
-        # nmcli con mod my-eth1 ifname eth1 type bridge ip4 192.0.2.100/24 gw4 192.0.2.1
-        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name]
-
-        options = {
-            'ip4': self.ip4,
-            'gw4': self.gw4,
-            'ip6': self.ip6,
-            'gw6': self.gw6,
-            'autoconnect': self.bool_to_string(self.autoconnect),
-            'bridge.ageing-time': self.ageingtime,
-            'bridge.forward-delay': self.forwarddelay,
-            'bridge.hello-time': self.hellotime,
-            'bridge.mac-address': self.mac,
-            'bridge.max-age': self.maxage,
-            'bridge.priority': self.priority,
-            'bridge.stp': self.bool_to_string(self.stp)
-        }
-
-        for key, value in options.items():
-            if value is not None:
-                cmd.extend([key, value])
-
-        return cmd
-
-    def create_connection_bridge_slave(self):
-        # format for creating bond-slave interface
-        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'bridge-slave', 'con-name']
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-
-        options = {
-            'master': self.master,
-            'bridge-port.path-cost': self.path_cost,
-            'bridge-port.hairpin': self.bool_to_string(self.hairpin),
-            'bridge-port.priority': self.slavepriority,
-        }
-
-        for key, value in options.items():
-            if value is not None:
-                cmd.extend([key, value])
-
-        return cmd
-
-    def modify_connection_bridge_slave(self):
-        # format for modifying bond-slave interface
-        cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name]
-        options = {
-            'master': self.master,
-            'bridge-port.path-cost': self.path_cost,
-            'bridge-port.hairpin': self.bool_to_string(self.hairpin),
-            'bridge-port.priority': self.slavepriority,
-        }
-
-        for key, value in options.items():
-            if value is not None:
-                cmd.extend([key, value])
-
-        return cmd
-
-    def create_connection_vlan(self):
-        cmd = [self.nmcli_bin]
-        cmd.append('con')
-        cmd.append('add')
-        cmd.append('type')
-        cmd.append('vlan')
-        cmd.append('con-name')
-
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        else:
-            cmd.append('vlan%s' % self.vlanid)
-
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-        else:
-            cmd.append('vlan%s' % self.vlanid)
-
-        params = {'dev': self.vlandev,
-                  'id': self.vlanid,
-                  'ip4': self.ip4 or '',
-                  'gw4': self.gw4 or '',
-                  'ip6': self.ip6 or '',
-                  'gw6': self.gw6 or '',
-                  'autoconnect': self.bool_to_string(self.autoconnect)
-                  }
-        for k, v in params.items():
-            cmd.extend([k, v])
-
-        return cmd
-
-    def modify_connection_vlan(self):
-        cmd = [self.nmcli_bin]
-        cmd.append('con')
-        cmd.append('mod')
-
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        else:
-            cmd.append('vlan%s' % self.vlanid)
-
-        params = {'vlan.parent': self.vlandev,
-                  'vlan.id': self.vlanid,
-                  'ipv4.address': self.ip4 or '',
-                  'ipv4.gateway': self.gw4 or '',
-                  'ipv4.dns': self.dns4 or '',
-                  'ipv6.address': self.ip6 or '',
-                  'ipv6.gateway': self.gw6 or '',
-                  'ipv6.dns': self.dns6 or '',
-                  'autoconnect': self.bool_to_string(self.autoconnect)
-                  }
-
-        for k, v in params.items():
-            cmd.extend([k, v])
-
-        return cmd
-
-    def create_connection_vxlan(self):
-        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'vxlan', 'con-name']
-
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        else:
-            cmd.append('vxlan%s' % self.vxlanid)
-
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-        else:
-            cmd.append('vxan%s' % self.vxlanid)
-
-        params = {'vxlan.id': self.vxlan_id,
-                  'vxlan.local': self.vxlan_local,
-                  'vxlan.remote': self.vxlan_remote,
-                  'autoconnect': self.bool_to_string(self.autoconnect)
-                  }
-        for k, v in params.items():
-            cmd.extend([k, v])
-
-        return cmd
-
-    def modify_connection_vxlan(self):
-        cmd = [self.nmcli_bin, 'con', 'mod']
-
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        else:
-            cmd.append('vxlan%s' % self.vxlanid)
-
-        params = {'vxlan.id': self.vxlan_id,
-                  'vxlan.local': self.vxlan_local,
-                  'vxlan.remote': self.vxlan_remote,
-                  'autoconnect': self.bool_to_string(self.autoconnect)
-                  }
-        for k, v in params.items():
-            cmd.extend([k, v])
-        return cmd
-
-    def create_connection_ipip(self):
-        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'ip-tunnel', 'mode', 'ipip', 'con-name']
-
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.ip_tunnel_dev is not None:
-            cmd.append('ipip%s' % self.ip_tunnel_dev)
-
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-        else:
-            cmd.append('ipip%s' % self.ipip_dev)
-
-        if self.ip_tunnel_dev is not None:
-            cmd.append('dev')
-            cmd.append(self.ip_tunnel_dev)
-
-        params = {'ip-tunnel.local': self.ip_tunnel_local,
-                  'ip-tunnel.remote': self.ip_tunnel_remote,
-                  'autoconnect': self.bool_to_string(self.autoconnect)
-                  }
-        for k, v in params.items():
-            cmd.extend([k, v])
-
-        return cmd
-
-    def modify_connection_ipip(self):
-        cmd = [self.nmcli_bin, 'con', 'mod']
-
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.ip_tunnel_dev is not None:
-            cmd.append('ipip%s' % self.ip_tunnel_dev)
-
-        params = {'ip-tunnel.local': self.ip_tunnel_local,
-                  'ip-tunnel.remote': self.ip_tunnel_remote,
-                  'autoconnect': self.bool_to_string(self.autoconnect)
-                  }
-        for k, v in params.items():
-            cmd.extend([k, v])
-        return cmd
-
-    def create_connection_sit(self):
-        cmd = [self.nmcli_bin, 'con', 'add', 'type', 'ip-tunnel', 'mode', 'sit', 'con-name']
-
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.ip_tunnel_dev is not None:
-            cmd.append('sit%s' % self.ip_tunnel_dev)
-
-        cmd.append('ifname')
-        if self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.conn_name is not None:
-            cmd.append(self.conn_name)
-        else:
-            cmd.append('sit%s' % self.ipip_dev)
-
-        if self.ip_tunnel_dev is not None:
-            cmd.append('dev')
-            cmd.append(self.ip_tunnel_dev)
-
-        params = {'ip-tunnel.local': self.ip_tunnel_local,
-                  'ip-tunnel.remote': self.ip_tunnel_remote,
-                  'autoconnect': self.bool_to_string(self.autoconnect)
-                  }
-        for k, v in params.items():
-            cmd.extend([k, v])
-
-        return cmd
-
-    def modify_connection_sit(self):
-        cmd = [self.nmcli_bin, 'con', 'mod']
-
-        if self.conn_name is not None:
-            cmd.append(self.conn_name)
-        elif self.ifname is not None:
-            cmd.append(self.ifname)
-        elif self.ip_tunnel_dev is not None:
-            cmd.append('sit%s' % self.ip_tunnel_dev)
-
-        params = {'ip-tunnel.local': self.ip_tunnel_local,
-                  'ip-tunnel.remote': self.ip_tunnel_remote,
-                  'autoconnect': self.bool_to_string(self.autoconnect)
-                  }
-        for k, v in params.items():
-            cmd.extend([k, v])
-        return cmd
-
-    def create_connection(self):
-        cmd = []
-        if self.type == 'team':
-            if (self.dns4 is not None) or (self.dns6 is not None):
-                cmd = self.create_connection_team()
-                self.execute_command(cmd)
-                cmd = self.modify_connection_team()
-                if self.activate:
-                    self.execute_command(cmd)
-                    return self.up_connection()
-                else:
-                    return self.execute_command(cmd)
-            elif (self.dns4 is None) or (self.dns6 is None):
-                cmd = self.create_connection_team()
-        elif self.type == 'team-slave':
-            if self.mtu is not None:
-                cmd = self.create_connection_team_slave()
-                self.execute_command(cmd)
-                cmd = self.modify_connection_team_slave()
-                return self.execute_command(cmd)
-            else:
-                cmd = self.create_connection_team_slave()
-        elif self.type == 'bond':
-            if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
-                cmd = self.create_connection_bond()
-                self.execute_command(cmd)
-                cmd = self.modify_connection_bond()
-                if self.activate:
-                    self.execute_command(cmd)
-                    return self.up_connection()
-                else:
-                    return self.execute_command(cmd)
-            else:
-                cmd = self.create_connection_bond()
-        elif self.type == 'bond-slave':
-            cmd = self.create_connection_bond_slave()
-        elif self.type == 'ethernet':
-            if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
-                cmd = self.create_connection_ethernet()
-                self.execute_command(cmd)
-                cmd = self.modify_connection_ethernet()
-                if self.activate:
-                    self.execute_command(cmd)
-                    return self.up_connection()
-                else:
-                    return self.execute_command(cmd)
-            else:
-                cmd = self.create_connection_ethernet()
-        elif self.type == 'bridge':
-            cmd = self.create_connection_bridge()
-        elif self.type == 'bridge-slave':
-            cmd = self.create_connection_bridge_slave()
-        elif self.type == 'vlan':
-            cmd = self.create_connection_vlan()
-        elif self.type == 'vxlan':
-            cmd = self.create_connection_vxlan()
-        elif self.type == 'ipip':
-            cmd = self.create_connection_ipip()
-        elif self.type == 'sit':
-            cmd = self.create_connection_sit()
-        elif self.type == 'generic':
-            cmd = self.create_connection_ethernet(conn_type='generic')
-
-        if cmd:
-            return self.execute_command(cmd)
-        else:
-            self.module.fail_json(msg="Type of device or network connection is required "
-                                      "while performing 'create' operation. Please specify 'type' as an argument.")
-
-    def remove_connection(self):
-        # self.down_connection()
-        cmd = [self.nmcli_bin, 'con', 'del', self.conn_name]
-        return self.execute_command(cmd)
-
-    def modify_connection(self):
-        cmd = []
-        if self.type == 'team':
-            cmd = self.modify_connection_team()
-        elif self.type == 'team-slave':
-            cmd = self.modify_connection_team_slave()
-        elif self.type == 'bond':
-            cmd = self.modify_connection_bond()
-        elif self.type == 'bond-slave':
-            cmd = self.modify_connection_bond_slave()
-        elif self.type == 'ethernet':
-            cmd = self.modify_connection_ethernet()
-        elif self.type == 'bridge':
-            cmd = self.modify_connection_bridge()
-        elif self.type == 'bridge-slave':
-            cmd = self.modify_connection_bridge_slave()
-        elif self.type == 'vlan':
-            cmd = self.modify_connection_vlan()
-        elif self.type == 'vxlan':
-            cmd = self.modify_connection_vxlan()
-        elif self.type == 'ipip':
-            cmd = self.modify_connection_ipip()
-        elif self.type == 'sit':
-            cmd = self.modify_connection_sit()
-        elif self.type == 'generic':
-            cmd = self.modify_connection_ethernet(conn_type='generic')
-        if cmd:
-            return self.execute_command(cmd)
-        else:
-            self.module.fail_json(msg="Type of device or network connection is required "
-                                      "while performing 'modify' operation. Please specify 'type' as an argument.")
-
-
-def main():
-    # Parsing argument file
-    module = AnsibleModule(
-        argument_spec=dict(
-            autoconnect=dict(type='bool', default=True),
-            activate=dict(type='bool', default=True),
-            state=dict(type='str', required=True, choices=['absent', 'present']),
-            conn_name=dict(type='str', required=True),
-            master=dict(type='str'),
-            ifname=dict(type='str'),
-            type=dict(type='str',
-                      choices=['bond', 'bond-slave', 'bridge', 'bridge-slave', 'ethernet', 'generic', 'ipip', 'sit', 'team', 'team-slave', 'vlan', 'vxlan']),
-            ip4=dict(type='str'),
-            gw4=dict(type='str'),
-            dns4=dict(type='list'),
-            dns4_search=dict(type='list'),
-            dhcp_client_id=dict(type='str'),
-            ip6=dict(type='str'),
-            gw6=dict(type='str'),
-            dns6=dict(type='list'),
-            dns6_search=dict(type='list'),
-            # Bond Specific vars
-            mode=dict(type='str', default='balance-rr',
-                      choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
-            miimon=dict(type='int'),
-            downdelay=dict(type='int'),
-            updelay=dict(type='int'),
-            arp_interval=dict(type='int'),
-            arp_ip_target=dict(type='str'),
-            primary=dict(type='str'),
-            # general usage
-            mtu=dict(type='int'),
-            mac=dict(type='str'),
-            # bridge specific vars
-            stp=dict(type='bool', default=True),
-            priority=dict(type='int', default=128),
-            slavepriority=dict(type='int', default=32),
-            forwarddelay=dict(type='int', default=15),
-            hellotime=dict(type='int', default=2),
-            maxage=dict(type='int', default=20),
-            ageingtime=dict(type='int', default=300),
-            hairpin=dict(type='bool', default=True),
-            path_cost=dict(type='int', default=100),
-            # vlan specific vars
-            vlanid=dict(type='int'),
-            vlandev=dict(type='str'),
-            flags=dict(type='str'),
-            ingress=dict(type='str'),
-            egress=dict(type='str'),
-            # vxlan specific vars
-            vxlan_id=dict(type='int'),
-            vxlan_local=dict(type='str'),
-            vxlan_remote=dict(type='str'),
-            # ip-tunnel specific vars
-            ip_tunnel_dev=dict(type='str'),
-            ip_tunnel_local=dict(type='str'),
-            ip_tunnel_remote=dict(type='str'),
-        ),
-        supports_check_mode=True,
-    )
-
-    if not HAVE_DBUS:
-        module.fail_json(msg=missing_required_lib('dbus'), exception=DBUS_IMP_ERR)
-
-    if not HAVE_NM_CLIENT:
-        module.fail_json(msg=missing_required_lib('NetworkManager glib API'), exception=NM_CLIENT_IMP_ERR)
-
-    nmcli = Nmcli(module)
-
-    (rc, out, err) = (None, '', '')
-    result = {'conn_name': nmcli.conn_name, 'state': nmcli.state}
-
-    # check for issues
-    if nmcli.conn_name is None:
-        nmcli.module.fail_json(msg="Please specify a name for the connection")
-    # team-slave checks
-    if nmcli.type == 'team-slave' and nmcli.master is None:
-        nmcli.module.fail_json(msg="Please specify a name for the master")
-    if nmcli.type == 'team-slave' and nmcli.ifname is None:
-        nmcli.module.fail_json(msg="Please specify an interface name for the connection")
-
-    if nmcli.state == 'absent':
-        if nmcli.connection_exists():
-            if module.check_mode:
-                module.exit_json(changed=True)
-            (rc, out, err) = nmcli.down_connection()
-            (rc, out, err) = nmcli.remove_connection()
-            if rc != 0:
-                module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
-
-    elif nmcli.state == 'present':
-        if nmcli.connection_exists():
-            # modify connection (note: this function is check mode aware)
-            # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
-            result['Exists'] = 'Connections do exist so we are modifying them'
-            if module.check_mode:
-                module.exit_json(changed=True)
-            (rc, out, err) = nmcli.modify_connection()
-        if not nmcli.connection_exists():
-            result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
-            if module.check_mode:
-                module.exit_json(changed=True)
-            (rc, out, err) = nmcli.create_connection()
-        if rc is not None and rc != 0:
-            module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
-
-    if rc is None:
-        result['changed'] = False
-    else:
-        result['changed'] = True
-    if out:
-        result['stdout'] = out
-    if err:
-        result['stderr'] = err
-
-    module.exit_json(**result)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/ansible/library/source_file.py b/ansible/library/source_file.py
deleted file mode 100644
index 54e2600706a4a8ccd54b636b7aa945935fdc7387..0000000000000000000000000000000000000000
--- a/ansible/library/source_file.py
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2019, Nicolas Karolak <nicolas.karolak@ubicast.eu>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
-    "metadata_version": "1.1",
-    "status": ["preview"],
-    "supported_by": "community",
-}
-
-
-DOCUMENTATION = """
-module: source_file
-short_description: Source remote bash/dotenv file
-description:
-    - This module is used to register host variables from a remote bash/dotenv-like file.
-    - It handles boolean value (`MY_VAR=1`) and has a basic handling of list (`MY_VAR=one,two,three`) and dictionnary (`MY_VAR=a=1;b=2;c=3`).
-version_added: "2.8"
-author: "Nicolas Karolak (@nikaro)"
-options:
-    path:
-        description:
-            - Path to the file to source.
-        required: true
-        type: path
-    prefix:
-        description:
-            - Prefix to add to the registred variable name.
-        required: false
-        default: ""
-        type: str
-    lower:
-        description:
-            - Wether to lower or not the variable name.
-        required: false
-        default: false
-        type: bool
-notes:
-    - The `check_mode` is supported.
-"""
-
-EXAMPLES = """
-- name: source envsetup file
-  source_file:
-    prefix: envsetup_
-    path: /root/envsetup/conf.sh
-    lower: true
-"""
-
-RETURN = """
-ansible_facts:
-    description: Registred vairales.
-    returned: on success
-    type: dict
-    sample:
-        key: value
-"""
-
-import os  # noqa: E402
-import re  # noqa: E402
-
-from ansible.module_utils.basic import AnsibleModule  # noqa: E402
-from ansible.module_utils.parsing.convert_bool import BOOLEANS, boolean  # noqa: E402
-from ansible.module_utils.six import string_types  # noqa: E402
-
-
-def run_module():
-    module_args = {
-        "path": {"type": "path", "required": True},
-        "prefix": {"type": "str", "required": False, "default": ""},
-        "lower": {"type": "bool", "required": False, "default": False},
-    }
-
-    result = {"changed": False}
-
-    module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
-
-    path = module.params["path"]
-    prefix = module.params["prefix"]
-    lower = boolean(module.params["lower"])
-    variables = {}
-    regex_valid_name = re.compile(r"^[a-zA-Z][a-zA-Z0-9_-]*$")
-    regex_key_value = re.compile(
-        r"^(?!#)(?P<key>[a-zA-Z][a-zA-Z0-9_-]*)=(?:[\'\"])?(?P<value>(?:[^\'\"\n])*)(?:[\'\"])?$",
-        re.MULTILINE
-    )
-
-    if not os.path.isfile(path):
-        module.fail_json(msg="'%s' does not exist or is not a file" % path, **result)
-
-    if prefix and not regex_valid_name.match(prefix):
-        module.fail_json(
-            msg="'%s' is not a valid prefix it must starts with a letter or underscore"
-            " character, and contains only letters, numbers and underscores" % prefix,
-            **result
-        )
-
-    with open(path) as path_fh:
-        # load file content and get all "key=value"
-        content = path_fh.read()
-        content_match = regex_key_value.findall(content)
-
-        for key, value in content_match:
-            # merge prefix + key
-            if prefix:
-                key = "%s%s" % (prefix, key)
-
-            # lower key
-            if lower:
-                key = key.lower()
-
-            # check key validity
-            if not regex_valid_name.match(key):
-                module.fail_json(
-                    msg="'%s' is not a valid variable name it must starts with a letter or "
-                    "underscore character, and contains only letters, numbers and underscores"
-                    % key,
-                    **result
-                )
-
-            # handle list value
-            if "," in value:
-                value = re.split("[,\n]", value)
-
-            # handle dict value
-            if ";" in value and "=" in value:
-                value = {i.split("=")[0]: i.split("=")[1] for i in value.split(";")}
-
-            # handle bool value
-            if isinstance(value, string_types) and value.lower() in BOOLEANS:
-                value = boolean(value)
-
-            # build variables dict
-            variables[key] = value
-
-            result["changed"] = True
-
-            if not module.check_mode:
-                result["ansible_facts"] = variables
-
-    module.exit_json(**result)
-
-
-def main():
-    run_module()
-
-
-if __name__ == "__main__":
-    main()
diff --git a/ansible/molecule/default/converge.yml b/ansible/molecule/default/converge.yml
deleted file mode 100644
index 80309d7064993a44f9cfe87f1d30af40e28a2ce7..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/converge.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: PYTHON
-  hosts: all
-  gather_facts: false
-  tasks:
-    - name: ensure python3 is installed
-      register: python_install
-      changed_when: "'es_pyinstall' in python_install.stdout_lines"
-      raw: command -v python3 || echo es_pyinstall && apt update && apt install -y python3-minimal python3-apt
-
-- name: Converge
-  hosts: all
-  environment:
-    HOSTALIASES: /etc/hosts.aliases
-
-  pre_tasks:
-    - name:
-      stat: "path=/etc/hosts.aliases"
-      register: etc_hosts_debian
-
-    - debug:
-        msg: "{{ etc_hosts_debian }}"
-
-    - name: add all hosts to hostaliases
-      loop: "{{ query('inventory_hostnames', 'all:!localhost') }}"
-      lineinfile:
-        path: /etc/hosts.aliases
-        line: "{{ hostvars[item]['ansible_default_ipv4']['address'] }} {{ item }}"
-        backup: true
-        create: true
-      tags: always
-      when: not etc_hosts_debian.stat.exists
-
-    - name: add all hosts to hostfile
-      shell: "/bin/cat /etc/hosts.aliases >> /etc/hosts"
-      when: not etc_hosts_debian.stat.exists
-      tags: always
-
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-
-  post_tasks:
-    - name: deploy letsencrypt certificate
-      when: letsencrypt_enabled | d(false)
-      include_role:
-        name: letsencrypt
-
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-- import_playbook: ../../playbooks/site.yml
-
-...
diff --git a/ansible/molecule/default/molecule.yml b/ansible/molecule/default/molecule.yml
deleted file mode 100644
index f68c1bc00ea1ef80ddbc862c4e4605c02154b9c9..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/molecule.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-
-driver:
-  name: docker
-
-platforms:
-  - name: ms-prep-${CI_PIPELINE_ID:-default}
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - mediaserver
-      - mediaworker
-      - mirismanager
-      - live
-      - celerity
-      - postgres
-      - mediaimport
-      - mediavault
-      - netcapture
-      - msmonitor
-      - munin_server
-      - munin_node
-
-provisioner:
-  name: ansible
-  options:
-    D: true
-  env:
-    ANSIBLE_ROLES_PATH: ../../roles
-    ANSIBLE_LIBRARY: ../../library
-    ANSIBLE_ACTION_PLUGINS: ../../plugins/action
-    ANSIBLE_PYTHON_INTERPRETER: /usr/bin/python3
-    SKYREACH_SYSTEM_KEY: ${SKYREACH_SYSTEM_KEY_STD}
-  inventory:
-    group_vars:
-      all:
-        customer_short_name: customer
-
-
-verifier:
-  name: testinfra
-  options:
-    verbose: true
-
-
-...
diff --git a/ansible/molecule/default/tests/test_celerity.py b/ansible/molecule/default/tests/test_celerity.py
deleted file mode 100644
index 5bbb2d768a44304dfa785f8302ab4b14bde1e5a3..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_celerity.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_celerity_is_installed(host):
-    p = host.package("celerity-server")
-
-    assert p.is_installed
-
-
-def test_celerity_config(host):
-    f = host.file("/etc/celerity/config.py")
-
-    assert f.exists
-    assert f.contains("SIGNING_KEY =")
-    assert f.contains("MEDIASERVERS =")
-
-
-def test_celerity_service(host):
-    s = host.service("celerity-server")
-
-    assert s.is_running
-    assert s.is_enabled
-
-
-def test_celerity_socket(host):
-    s = host.socket("tcp://0.0.0.0:6200")
-
-    assert s.is_listening
diff --git a/ansible/molecule/default/tests/test_conf.py b/ansible/molecule/default/tests/test_conf.py
deleted file mode 100644
index 313b6b2df7064ec40ef0cc7e657e229d766c753e..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_conf.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_git_is_installed(host):
-    p = host.package("git")
-
-    assert p.is_installed
-
-
-def test_envsetup_repo(host):
-    f = host.file("/root/envsetup")
-
-    assert f.exists
-    assert f.is_directory
-    assert f.user == "root"
-    assert f.group == "root"
-
-
-def test_root_privkey(host):
-    f = host.file("/root/.ssh/id_ed25519")
-
-    assert f.exists
-    assert f.is_file
-    assert f.user == "root"
-    assert f.group == "root"
-    assert f.mode == 0o600
-
-
-def test_root_pubkey(host):
-    f = host.file("/root/.ssh/id_ed25519.pub")
-
-    assert f.exists
-    assert f.is_file
-    assert f.user == "root"
-    assert f.group == "root"
-    assert f.mode == 0o644
-
-
-def test_generated_conf_sh_file(host):
-    f = host.file("/root/envsetup/auto-generated-conf.sh")
-
-    assert f.exists
-    assert f.is_file
-    assert f.user == "root"
-    assert f.group == "root"
-    assert f.contains("Generated by")
-    assert f.contains("SKYREACH_API_KEY=")
-
-
-def test_conf_sh_file(host):
-    f = host.file("/root/envsetup/conf.sh")
-
-    assert f.exists
-    assert f.is_file
-    assert f.user == "root"
-    assert f.group == "root"
diff --git a/ansible/molecule/default/tests/test_init.py b/ansible/molecule/default/tests/test_init.py
deleted file mode 100644
index 4941f6b5d24685dcae96953fbaa7b892e22ece26..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_init.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_apt_source_skyreach_file(host):
-    f = host.file("/etc/apt/sources.list.d/skyreach.list")
-
-    assert f.exists
-    assert f.is_file
-    assert f.user == "root"
-    assert f.group == "root"
-    assert f.contains("deb http")
-
-
-def test_requests_is_installed(host):
-    p = host.package("python3-requests")
-
-    assert p.is_installed
-    assert p.version.startswith("2.")
-
-
-def test_locale_file(host):
-    f = host.file("/etc/default/locale")
-
-    assert f.exists
-    assert f.is_file
-    assert f.user == "root"
-    assert f.group == "root"
-    assert f.contains("LANGUAGE=")
-
-
-def test_ubicast_user(host):
-    u = host.user("ubicast")
-
-    assert u.name == "ubicast"
-    assert u.home == "/home/ubicast"
-    assert "sudo" in u.groups
-    assert u.expiration_date is None
-
-
-def test_bashrc_file(host):
-    f = host.file("/root/.bashrc")
-
-    assert f.exists
-
-
-def test_vimrc_file(host):
-    f = host.file("/root/.vimrc")
-
-    assert f.exists
-
-
-def test_authorized_keys_file(host):
-    f = host.file("/root/.ssh/authorized_keys")
-
-    assert f.exists
-    assert f.is_file
-    assert f.user == "root"
-    assert f.group == "root"
-    assert f.contains(
-        "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCr2IJlzvLlLxa2PyGhydAlz/PAOj240g8anQmY5"
-        "8X+llirLHIOlkdJXBqf94jAeZkweWpoE41RdmKPUQEz4pCO09dGJaZD4lv1NtDhrhNwTmoOnyFcko"
-        "PimR6DX6+UMM9wUmfti/ytljbVEVVo/pRacXmczeumDaci3uYTURyliuAR9h3zbIMQ6D2COESXjpt"
-        "WmEwawE9grsTfJi84Q+XIBPvXRHjjceB5hejUMWuf7xc6GH9WIo5REh3qTUvgtxHtIGLQ3ImOzrbC"
-        "sEhENrBWds0qH0pIuH0lykWGR6pumpPxLzXcVho+e/UJgUrEg5u6/58aizqJTkxFJMa8ciYz "
-        "support@ubicast"
-    )
-
-
-def test_journal_file(host):
-    f = host.file("/var/log/journal")
-
-    assert f.exists
-    assert f.is_directory
diff --git a/ansible/molecule/default/tests/test_mediaimport.py b/ansible/molecule/default/tests/test_mediaimport.py
deleted file mode 100644
index de51b74ca6a9dcced7c394be94a8f3e5abb5c28d..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_mediaimport.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-# TODO: ubicast-mediaimport when released
-def test_import_is_installed(host):
-    p = host.package("ubicast-mediaimport")
-
-    assert p.is_installed
-
-
-def test_ftp_is_installed(host):
-    p = host.package("pure-ftpd")
-
-    assert p.is_installed
-
-
-def test_ssh_is_installed(host):
-    p = host.package("openssh-server")
-
-    assert p.is_installed
-
-
-def test_sftp_is_installed(host):
-    p = host.package("mysecureshell")
-
-    assert p.is_installed
-
-
-def test_mediaimport_conf(host):
-    f = host.file("/etc/mediaserver/mediaimport.json")
-
-    assert f.exists
-
-
-def test_mediaimport_service(host):
-    s = host.service("mediaimport")
-
-    assert s.is_running
-    assert s.is_enabled
-
-
-def test_ftp_service(host):
-    s = host.service("pure-ftpd")
-
-    assert s.is_running
-    assert s.is_enabled
-
-
-def test_sftp_service(host):
-    s = host.service("mysecureshell")
-
-    assert s.is_running
-    assert s.is_enabled
-
-
-def test_ftp_socket(host):
-    s = host.socket("tcp://0.0.0.0:21")
-
-    assert s.is_listening
-
-
-def test_sftp_socket(host):
-    s = host.socket("tcp://0.0.0.0:22")
-
-    assert s.is_listening
-
-
-def test_fail2ban_conf(host):
-    f = host.file("/etc/fail2ban/jail.d/pure-ftpd.conf")
-
-    assert f.exists
-
-
-def test_fail2ban_service(host):
-    s = host.service("fail2ban")
-
-    assert s.is_running
diff --git a/ansible/molecule/default/tests/test_mediaserver.py b/ansible/molecule/default/tests/test_mediaserver.py
deleted file mode 100644
index cd7f8f29cc7fafd6317e2182e421a7941c0a1c37..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_mediaserver.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_server_is_installed(host):
-    p = host.package("ubicast-mediaserver")
-
-    assert p.is_installed
-
-
-def test_server_user(host):
-    u = host.user("msuser")
-
-    assert u.name == "msuser"
-
-
-def test_server_nginx(host):
-    f = host.file("/etc/nginx/sites-available/mediaserver-msuser.conf")
-
-    assert f.exists
-
-
-def test_server_service(host):
-    s = host.service("mediaserver")
-
-    assert s.is_running
-    assert s.is_enabled
-
-
-def test_server_socket(host):
-    s = host.socket("tcp://0.0.0.0:443")
-
-    assert s.is_listening
-
-
-def test_fail2ban_conf(host):
-    f = host.file("/etc/fail2ban/jail.d/mediaserver.conf")
-
-    assert f.exists
-
-
-def test_fail2ban_service(host):
-    s = host.service("fail2ban")
-
-    assert s.is_running
-
-
-def test_postfix_service(host):
-    s = host.service("postfix")
-
-    assert s.is_running
diff --git a/ansible/molecule/default/tests/test_mediavault.py b/ansible/molecule/default/tests/test_mediavault.py
deleted file mode 100644
index 88b6b6d548796ea92a883107e498ef407e8ca48e..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_mediavault.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_rsync_is_installed(host):
-    p = host.package("rsync")
-
-    assert p.is_installed
-
-
-def test_rsync_tmbackup_is_installed(host):
-    r = host.file("/usr/bin/rsync_tmbackup")
-
-    assert r.exists
diff --git a/ansible/molecule/default/tests/test_mediaworker.py b/ansible/molecule/default/tests/test_mediaworker.py
deleted file mode 100644
index 3c81e796bf57c83d33a476676f441914a70d5b42..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_mediaworker.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_celerity_is_installed(host):
-    p = host.package("celerity-workers")
-
-    assert p.is_installed
-
-
-def test_celerity_config(host):
-    f = host.file("/etc/celerity/config.py")
-
-    assert f.exists
-    assert f.contains("SIGNING_KEY =")
-    assert f.contains("SERVER_URL =")
-    assert f.contains("QUEUES_PER_WORKER =")
-
-
-def test_celerity_service(host):
-    s = host.service("celerity-workers")
-
-    assert s.is_running
-    assert s.is_enabled
diff --git a/ansible/molecule/default/tests/test_mirismanager.py b/ansible/molecule/default/tests/test_mirismanager.py
deleted file mode 100644
index 5eadb7687b919a5db0e67d0968f71cf8d4d5b318..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_mirismanager.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_manager_is_installed(host):
-    p = host.package("ubicast-skyreach")
-
-    assert p.is_installed
-
-
-def test_manager_user(host):
-    u = host.user("skyreach")
-
-    assert u.name == "skyreach"
-
-
-def test_manager_nginx(host):
-    f = host.file("/etc/nginx/sites-available/skyreach.conf")
-
-    assert f.exists
-
-
-def test_manager_service(host):
-    s = host.service("skyreach")
-
-    assert s.is_running
-    assert s.is_enabled
-
-
-def test_manager_socket(host):
-    s = host.socket("tcp://0.0.0.0:443")
-
-    assert s.is_listening
-
-
-def test_fail2ban_conf(host):
-    f = host.file("/etc/fail2ban/jail.d/skyreach.conf")
-
-    assert f.exists
-
-
-def test_fail2ban_service(host):
-    s = host.service("fail2ban")
-
-    assert s.is_running
-
-
-def test_postfix_service(host):
-    s = host.service("postfix")
-
-    assert s.is_running
diff --git a/ansible/molecule/default/tests/test_msmonitor.py b/ansible/molecule/default/tests/test_msmonitor.py
deleted file mode 100644
index 8f30248a222847628c502794bec6b1eeb0a94184..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_msmonitor.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_munin_is_installed(host):
-    p = host.package("munin-node")
-
-    assert p.is_installed
-
-
-def test_monitor_is_installed(host):
-    p = host.package("ubicast-monitor")
-
-    assert p.is_installed
-
-
-def test_monitor_runtime_is_installed(host):
-    p = host.package("ubicast-monitor-runtime")
-
-    assert p.is_installed
-
-
-def test_monitor_user(host):
-    u = host.user("msmonitor")
-
-    assert u.name == "msmonitor"
-
-
-def test_monitor_nginx(host):
-    f = host.file("/etc/nginx/sites-available/msmonitor.conf")
-
-    assert f.exists
-
-
-def test_monitor_service(host):
-    s = host.service("msmonitor")
-
-    assert s.is_running
-    assert s.is_enabled
-
-
-def test_monitor_socket(host):
-    s = host.socket("tcp://0.0.0.0:443")
-
-    assert s.is_listening
-
-
-def test_fail2ban_conf(host):
-    f = host.file("/etc/fail2ban/jail.d/monitor.conf")
-
-    assert f.exists
-
-
-def test_fail2ban_service(host):
-    s = host.service("fail2ban")
-
-    assert s.is_running
diff --git a/ansible/molecule/default/tests/test_netcapture.py b/ansible/molecule/default/tests/test_netcapture.py
deleted file mode 100644
index 45fc1ceb88721fcb55150071014df6b6b9a94398..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_netcapture.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_netcapture_is_installed(host):
-    p = host.package("python3-miris-netcapture")
-
-    assert p.is_installed
-
-
-def test_docker_is_installed(host):
-    p = host.package("docker-ce")
-
-    assert p.is_installed
-
-
-def test_netcapture_conf(host):
-    f = host.file("/etc/miris/netcapture.json")
-
-    assert f.exists
-
-
-def test_miris_api_conf(host):
-    f = host.file("/etc/miris/conf/api.json")
-
-    assert f.exists
-
-
-def test_docker_service(host):
-    s = host.service("docker")
-
-    assert s.is_running
-    assert s.is_enabled
diff --git a/ansible/molecule/default/tests/test_nginx.py b/ansible/molecule/default/tests/test_nginx.py
deleted file mode 100644
index 6ece959c2c0e8133f854542b5b893a5b74634d2f..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_nginx.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_apache2_is_installed(host):
-    p = host.package("apache2")
-
-    assert not p.is_installed
-
-
-def test_nginx_is_installed(host):
-    p = host.package("nginx")
-
-    assert p.is_installed
-
-
-def test_nginx_removed_default(host):
-    f = host.file("/etc/nginx/sites-enabled/default.conf")
-
-    assert not f.exists
-
-
-def test_nginx_removed_old_ssl(host):
-    f = host.file("/etc/nginx/conf.d/ssl.conf")
-
-    assert not f.exists
diff --git a/ansible/molecule/default/tests/test_ntp.py b/ansible/molecule/default/tests/test_ntp.py
deleted file mode 100644
index 30700392e52e6e62b7a974f3aa0ca1b9b3285283..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_ntp.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_ntp_is_installed(host):
-    p = host.package("ntp")
-
-    assert p.is_installed
-
-
-def test_systemd_timesyncd_override(host):
-    f = host.file(
-        "/lib/systemd/system/systemd-timesyncd.service.d/disable-with-time-daemon.conf"
-    )
-
-    assert f.exists
-    assert f.contains("[Unit]")
-    assert f.contains("ConditionFileIsExecutable=!")
-
-
-def test_systemd_timesyncd_disabled(host):
-    s = host.service("systemd-timesyncd")
-
-    assert not s.is_running
-    assert not s.is_enabled
-
-
-def test_ntp_service(host):
-    s = host.service("ntp")
-
-    assert s.is_running
-    assert s.is_enabled
diff --git a/ansible/molecule/default/tests/test_postfix.py b/ansible/molecule/default/tests/test_postfix.py
deleted file mode 100644
index 59be2872a1a090c4094872f205fa935c13564257..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_postfix.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_postfix_is_installed(host):
-    p = host.package("postfix")
-
-    assert p.is_installed
-
-
-def test_postfix_main(host):
-    f = host.file("/etc/postfix/main.cf")
-
-    assert f.exists
-
-
-def test_mailname(host):
-    f = host.file("/etc/mailname")
-
-    assert f.exists
-
-
-def test_aliases(host):
-    f = host.file("/etc/aliases")
-
-    assert f.exists
-    assert f.contains("devnull:")
-    assert f.contains("root:")
-
-
-def test_postfix_virtual(host):
-    f = host.file("/etc/postfix/virtual")
-
-    assert f.exists
-    assert f.contains("postmaster@")
-    assert f.contains("bounces@")
-    assert f.contains("noreply@")
-
-
-def test_postfix_generic(host):
-    f = host.file("/etc/postfix/generic")
-
-    assert f.exists
-    assert f.contains("root@")
-
-
-def test_postfix_service(host):
-    s = host.service("postfix")
-
-    assert s.is_running
-    assert s.is_enabled
-
-
-def test_postfix_listen(host):
-    s = host.socket("tcp://127.0.0.1:25")
-
-    assert s.is_listening
diff --git a/ansible/molecule/default/tests/test_postgres.py b/ansible/molecule/default/tests/test_postgres.py
deleted file mode 100644
index 75324c8400ac9f3485411df1b052064732be960e..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_postgres.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_psycopg2_is_installed(host):
-    p = host.package("python3-psycopg2")
-
-    assert p.is_installed
-
-
-def test_postgres_is_installed(host):
-    p = host.package("postgresql")
-
-    assert p.is_installed
-
-
-def test_postgres_user(host):
-    u = host.user("postgres")
-
-    assert u.name == "postgres"
-
-
-def test_postgres_service(host):
-    s = host.service("postgresql@11-main")
-
-    assert s.is_running
-
-
-def test_postgres_socket(host):
-    s = host.socket("tcp://127.0.0.1:5432")
-
-    assert s.is_listening
diff --git a/ansible/molecule/default/tests/test_python3.py b/ansible/molecule/default/tests/test_python3.py
deleted file mode 100644
index edfcc5a7eee2902584b6a4e0775fd8ffc6659bf2..0000000000000000000000000000000000000000
--- a/ansible/molecule/default/tests/test_python3.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("all")
-
-
-def test_python3_is_installed(host):
-    p = host.package("python3")
-
-    assert p.is_installed
-    assert p.version.startswith("3.")
diff --git a/ansible/molecule/pf-ha/converge.yml b/ansible/molecule/pf-ha/converge.yml
deleted file mode 120000
index 73cb8dadb7909e1d88c761560ee0a0e3489ae5cf..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-ha/converge.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default/converge.yml
\ No newline at end of file
diff --git a/ansible/molecule/pf-ha/molecule.yml b/ansible/molecule/pf-ha/molecule.yml
deleted file mode 100644
index 6bf83f0ef1e0e5b95414221e50eedcd85501e4ff..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-ha/molecule.yml
+++ /dev/null
@@ -1,106 +0,0 @@
----
-
-driver:
-  name: docker
-
-platforms:
-  - name: ms0-${CI_PIPELINE_ID:-default}
-    environment:
-      HOSTALIASES: /etc/hosts.aliases
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-      - data-${CI_PIPELINE_ID:-ha}:/data:rw
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - mediaserver
-      - live
-      - celerity
-  - name: ms1-${CI_PIPELINE_ID:-default}
-    environment:
-      HOSTALIASES: /etc/hosts.aliases
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-      - data-${CI_PIPELINE_ID:-ha}:/data:rw
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - mediaserver
-      - live
-  - name: ms2-${CI_PIPELINE_ID:-default}
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    environment:
-      HOSTALIASES: /etc/hosts.aliases
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-      - data-${CI_PIPELINE_ID:-ha}:/data:rw
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - mediaserver
-      - live
-  - name: db0-${CI_PIPELINE_ID:-default}
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - postgres
-  - name: db1-${CI_PIPELINE_ID:-default}
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - postgres
-  - name: db2-${CI_PIPELINE_ID:-default}
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - postgres
-provisioner:
-  name: ansible
-  options:
-    D: true
-  env:
-    ANSIBLE_ROLES_PATH: ../../roles
-    ANSIBLE_LIBRARY: ../../library
-    ANSIBLE_ACTION_PLUGINS: ../../plugins/action
-    ANSIBLE_PYTHON_INTERPRETER: /usr/bin/python3
-    SKYREACH_SYSTEM_KEY: ${SKYREACH_SYSTEM_KEY_HA}
-  inventory:
-    group_vars:
-      postgres:
-        repmgr_password: "testrepmgr"
-
-verifier:
-  name: testinfra
-  options:
-    verbose: true
-
-...
diff --git a/ansible/molecule/pf-ha/tests/commons.py b/ansible/molecule/pf-ha/tests/commons.py
deleted file mode 100644
index e7318b13789ba836201d1f683dc0aed4f4fc9937..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-ha/tests/commons.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import socket
-
-
-def get_status(host):
-    ip = host.interface('eth0').addresses[0]
-    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    s.connect((ip, 8543))
-    data = s.recv(1024)
-    return data.rstrip().decode('utf-8')
diff --git a/ansible/molecule/pf-ha/tests/test_haproxy.py b/ansible/molecule/pf-ha/tests/test_haproxy.py
deleted file mode 100644
index bca50da2009bb182e9b2a75c47ce00bce7c539b6..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-ha/tests/test_haproxy.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("mediaserver")
-
-
-def test_psycopg2_is_installed(host):
-    p = host.package("haproxy")
-
-    assert p.is_installed
-
-
-def test_postgres_service(host):
-    s = host.service("haproxy")
-
-    assert s.is_running
-
-
-def test_haproxy_socket(host):
-    s = host.socket("tcp://0.0.0.0:54321")
-
-    assert s.is_listening
diff --git a/ansible/molecule/pf-ha/tests/test_mediaserver.py b/ansible/molecule/pf-ha/tests/test_mediaserver.py
deleted file mode 100644
index 6927fc93e69b23786db99d8cb25d1d7de6f95af2..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-ha/tests/test_mediaserver.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("mediaserver")
-
-
-def test_server_is_installed(host):
-    p = host.package("ubicast-mediaserver")
-
-    assert p.is_installed
-
-
-def test_server_user(host):
-    u = host.user("msuser")
-
-    assert u.name == "msuser"
-
-
-def test_server_nginx(host):
-    f = host.file("/etc/nginx/sites-available/mediaserver-msuser.conf")
-
-    assert f.exists
-
-
-def test_server_service(host):
-    s = host.service("mediaserver")
-
-    assert s.is_running
-    assert s.is_enabled
-
-
-def test_server_socket(host):
-    s = host.socket("tcp://0.0.0.0:443")
-
-    assert s.is_listening
-
-
-def test_fail2ban_conf(host):
-    f = host.file("/etc/fail2ban/jail.d/mediaserver.conf")
-
-    assert f.exists
-
-
-def test_fail2ban_service(host):
-    s = host.service("fail2ban")
-
-    assert s.is_running
diff --git a/ansible/molecule/pf-ha/tests/test_postgres.py b/ansible/molecule/pf-ha/tests/test_postgres.py
deleted file mode 100644
index b8a8516ef6ceb42a496d64cf727ee22dcd72377b..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-ha/tests/test_postgres.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("postgres")
-
-
-def test_psycopg2_is_installed(host):
-    p = host.package("python3-psycopg2")
-
-    assert p.is_installed
-
-
-def test_postgres_is_installed(host):
-    p = host.package("postgresql")
-
-    assert p.is_installed
-
-
-def test_postgres_user(host):
-    u = host.user("postgres")
-
-    assert u.name == "postgres"
-
-
-def test_postgres_service(host):
-    s = host.service("postgresql@11-main")
-
-    assert s.is_running
-
-
-def test_postgres_socket(host):
-    s = host.socket("tcp://127.0.0.1:5432")
-
-    assert s.is_listening
diff --git a/ansible/molecule/pf-std/converge.yml b/ansible/molecule/pf-std/converge.yml
deleted file mode 120000
index 73cb8dadb7909e1d88c761560ee0a0e3489ae5cf..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-std/converge.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default/converge.yml
\ No newline at end of file
diff --git a/ansible/molecule/pf-std/molecule.yml b/ansible/molecule/pf-std/molecule.yml
deleted file mode 100644
index 3501a3fc9851e485bf2e20215af7bee48e5ca6ba..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-std/molecule.yml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-
-driver:
-  name: docker
-
-platforms:
-  - name: ms-${CI_PIPELINE_ID:-default}
-    environment:
-      HOSTALIASES: /etc/hosts.aliases
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - mediaserver
-      - postgres
-      - celerity
-      - live
-      - mirismanager
-      - mediaimport
-      - msmonitor
-  - name: mw-${CI_PIPELINE_ID:-default}
-    environment:
-      HOSTALIASES: /etc/hosts.aliases
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - mediaworker
-
-provisioner:
-  name: ansible
-  options:
-    D: true
-  env:
-    ANSIBLE_ROLES_PATH: ../../roles
-    ANSIBLE_LIBRARY: ../../library
-    ANSIBLE_ACTION_PLUGINS: ../../plugins/action
-    ANSIBLE_PYTHON_INTERPRETER: /usr/bin/python3
-    SKYREACH_SYSTEM_KEY: ${SKYREACH_SYSTEM_KEY_STD}
-
-verifier:
-  name: testinfra
-  options:
-    verbose: true
-
-...
diff --git a/ansible/molecule/pf-std/tests/commons.py b/ansible/molecule/pf-std/tests/commons.py
deleted file mode 100644
index e7318b13789ba836201d1f683dc0aed4f4fc9937..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-std/tests/commons.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import socket
-
-
-def get_status(host):
-    ip = host.interface('eth0').addresses[0]
-    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    s.connect((ip, 8543))
-    data = s.recv(1024)
-    return data.rstrip().decode('utf-8')
diff --git a/ansible/molecule/pf-std/tests/test_mediaserver.py b/ansible/molecule/pf-std/tests/test_mediaserver.py
deleted file mode 100644
index 6927fc93e69b23786db99d8cb25d1d7de6f95af2..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-std/tests/test_mediaserver.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("mediaserver")
-
-
-def test_server_is_installed(host):
-    p = host.package("ubicast-mediaserver")
-
-    assert p.is_installed
-
-
-def test_server_user(host):
-    u = host.user("msuser")
-
-    assert u.name == "msuser"
-
-
-def test_server_nginx(host):
-    f = host.file("/etc/nginx/sites-available/mediaserver-msuser.conf")
-
-    assert f.exists
-
-
-def test_server_service(host):
-    s = host.service("mediaserver")
-
-    assert s.is_running
-    assert s.is_enabled
-
-
-def test_server_socket(host):
-    s = host.socket("tcp://0.0.0.0:443")
-
-    assert s.is_listening
-
-
-def test_fail2ban_conf(host):
-    f = host.file("/etc/fail2ban/jail.d/mediaserver.conf")
-
-    assert f.exists
-
-
-def test_fail2ban_service(host):
-    s = host.service("fail2ban")
-
-    assert s.is_running
diff --git a/ansible/molecule/pf-std/tests/test_postgres.py b/ansible/molecule/pf-std/tests/test_postgres.py
deleted file mode 100644
index b8a8516ef6ceb42a496d64cf727ee22dcd72377b..0000000000000000000000000000000000000000
--- a/ansible/molecule/pf-std/tests/test_postgres.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("postgres")
-
-
-def test_psycopg2_is_installed(host):
-    p = host.package("python3-psycopg2")
-
-    assert p.is_installed
-
-
-def test_postgres_is_installed(host):
-    p = host.package("postgresql")
-
-    assert p.is_installed
-
-
-def test_postgres_user(host):
-    u = host.user("postgres")
-
-    assert u.name == "postgres"
-
-
-def test_postgres_service(host):
-    s = host.service("postgresql@11-main")
-
-    assert s.is_running
-
-
-def test_postgres_socket(host):
-    s = host.socket("tcp://127.0.0.1:5432")
-
-    assert s.is_listening
diff --git a/ansible/molecule/pgsql-ha/converge.yml b/ansible/molecule/pgsql-ha/converge.yml
deleted file mode 100644
index 58ea8718716da92a4a5e63aa39d7ef016c1bc3bd..0000000000000000000000000000000000000000
--- a/ansible/molecule/pgsql-ha/converge.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: PYTHON
-  hosts: all
-  gather_facts: false
-  tasks:
-    - name: ensure python3 is installed
-      register: python_install
-      changed_when: "'es_pyinstall' in python_install.stdout_lines"
-      raw: command -v python3 || echo es_pyinstall && apt update && apt install -y python3-minimal python3-apt
-
-- name: Converge
-  hosts: postgres
-  pre_tasks:
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-  roles:
-    - base
-    - postgres-ha
-  post_tasks:
-    - name: deploy letsencrypt certificate
-      when: letsencrypt_enabled | d(false)
-      include_role:
-        name: letsencrypt
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-...
diff --git a/ansible/molecule/pgsql-ha/molecule.yml b/ansible/molecule/pgsql-ha/molecule.yml
deleted file mode 100644
index 6106312a2af0179f7624f794e1f01ac8d14b9bd6..0000000000000000000000000000000000000000
--- a/ansible/molecule/pgsql-ha/molecule.yml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-
-driver:
-  name: docker
-
-platforms:
-  - name: db0-${CI_PIPELINE_ID:-default}
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - postgres
-  - name: db1-${CI_PIPELINE_ID:-default}
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - postgres
-  - name: db2-${CI_PIPELINE_ID:-default}
-    image: registry.ubicast.net/docker/debian-systemd:buster
-    command: /lib/systemd/systemd
-    privileged: true
-    volumes:
-      - /sys/fs/cgroup:/sys/fs/cgroup:ro
-    tmpfs:
-      - /tmp
-      - /run
-    groups:
-      - postgres
-
-provisioner:
-  name: ansible
-  options:
-    D: true
-  env:
-    ANSIBLE_ROLES_PATH: ../../roles
-    ANSIBLE_LIBRARY: ../../library
-    ANSIBLE_ACTION_PLUGINS: ../../plugins/action
-    ANSIBLE_PYTHON_INTERPRETER: /usr/bin/python3
-    SKYREACH_SYSTEM_KEY: ${SKYREACH_SYSTEM_KEY_HA}
-  inventory:
-    group_vars:
-      postgres:
-        repmgr_password: "testrepmgr"
-  playbooks:
-    side_effect: side_effect.yml
-
-verifier:
-  name: testinfra
-  options:
-    verbose: true
-
-...
diff --git a/ansible/molecule/pgsql-ha/side_effect.yml b/ansible/molecule/pgsql-ha/side_effect.yml
deleted file mode 100644
index b66b60b2f4648862d0e5a33e9dc04af655c8ffc6..0000000000000000000000000000000000000000
--- a/ansible/molecule/pgsql-ha/side_effect.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: first failover
-  import_playbook: side_effect_destroy_master.yml
-
-# Refresh inventory to have a clean inventory (each server in the initial group)
-- name: refresh inventory
-  hosts: all
-  tasks:
-  - name: refresh inventory
-    meta: refresh_inventory
-
-- name: second failover
-  import_playbook: side_effect_destroy_master.yml
diff --git a/ansible/molecule/pgsql-ha/side_effect_destroy_master.yml b/ansible/molecule/pgsql-ha/side_effect_destroy_master.yml
deleted file mode 100644
index 5332953fe58bba9bc012f18b8b84401e5e0f692f..0000000000000000000000000000000000000000
--- a/ansible/molecule/pgsql-ha/side_effect_destroy_master.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: stop master node
-  hosts: postgres[0]
-  tasks:
-  - name: stop postgresql
-    service:
-      name: postgresql@11-main
-      state: stopped
-
-- name: pause for 45 seconds
-  hosts: localhost
-  tasks:
-    - pause:
-        seconds: 45
-
-- name: set correct host to correct groups
-  hosts: postgres
-  tasks:
-    - name: add db0 to fenced group
-      add_host:
-        name: "{{ groups['postgres'][0] }}"
-        groups: [ 'postgres_fenced', 'postgres_standby' ]
-
-    - name: add db1 to primary group
-      add_host:
-        name: "{{ groups['postgres'][1] }}"
-        groups: postgres_primary
-
-# db0 is now fenced, import playbook to switch it back to standby
-- import_playbook: ../../playbooks/postgres-maintenance/rephacheck_status.yml
-- import_playbook: ../../playbooks/postgres-maintenance/fenced_to_standby.yml
-
-- name: pause for 45 seconds
-  hosts: localhost
-  tasks:
-    - pause:
-        seconds: 45
-
-# db0 is now standby, import playbook to switch it back as primary
-- import_playbook: ../../playbooks/postgres-maintenance/rephacheck_status.yml
-- import_playbook: ../../playbooks/postgres-maintenance/standby_to_primary.yml
-
-- name: pause for 45 seconds
-  hosts: localhost
-  tasks:
-    - pause:
-        seconds: 45
-
-# db0 is now primary, restart repmgrd
-- import_playbook: ../../playbooks/postgres-maintenance/rephacheck_status.yml
-- import_playbook: ../../playbooks/postgres-maintenance/restart_repmgrd.yml
-
-...
diff --git a/ansible/molecule/pgsql-ha/tests/commons.py b/ansible/molecule/pgsql-ha/tests/commons.py
deleted file mode 100644
index e7318b13789ba836201d1f683dc0aed4f4fc9937..0000000000000000000000000000000000000000
--- a/ansible/molecule/pgsql-ha/tests/commons.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import socket
-
-
-def get_status(host):
-    ip = host.interface('eth0').addresses[0]
-    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    s.connect((ip, 8543))
-    data = s.recv(1024)
-    return data.rstrip().decode('utf-8')
diff --git a/ansible/molecule/pgsql-ha/tests/test_postgres_a_setup.py b/ansible/molecule/pgsql-ha/tests/test_postgres_a_setup.py
deleted file mode 100644
index 3a2adcbbbac8685d89bbce661941943250336bb6..0000000000000000000000000000000000000000
--- a/ansible/molecule/pgsql-ha/tests/test_postgres_a_setup.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-# /!\ This test run accross all servers
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
-    os.environ["MOLECULE_INVENTORY_FILE"]
-).get_hosts("postgres")
-
-
-def test_psycopg2_is_installed(host):
-    p = host.package("python3-psycopg2")
-
-    assert p.is_installed
-
-
-def test_postgres_is_installed(host):
-    p = host.package("postgresql-11")
-
-    assert p.is_installed
-    assert p.version.startswith("11")
-
-
-def test_postgres_user(host):
-    u = host.user("postgres")
-
-    assert u.name == "postgres"
-
-
-def test_postgres_service(host):
-    s = host.service("postgresql@11-main")
-
-    assert s.is_running
-
-
-def test_postgresql_socket(host):
-    s = host.socket("tcp://127.0.0.1:5432")
-
-    assert s.is_listening
diff --git a/ansible/molecule/pgsql-ha/tests/test_postgres_b_cluster_status.py b/ansible/molecule/pgsql-ha/tests/test_postgres_b_cluster_status.py
deleted file mode 100644
index 4bbb6f2d90acf49a97e81c56fb2f4f3268a7bf7d..0000000000000000000000000000000000000000
--- a/ansible/molecule/pgsql-ha/tests/test_postgres_b_cluster_status.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-import commons
-
-# This test run accross all servers
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("postgres")
-
-
-def test_postgresql_check_repmgr_status(host):
-    ''' check if repmgr is working correctly on each node'''
-
-    if host.ansible.get_variables()["inventory_hostname"].startswith("db0"):
-        data = commons.get_status(host)
-        assert data == "primary"
-    if host.ansible.get_variables()["inventory_hostname"].startswith("db1"):
-        data = commons.get_status(host)
-        assert data == "standby"
-    if host.ansible.get_variables()["inventory_hostname"].startswith("db2"):
-        data = commons.get_status(host)
-        assert data == "witness"
diff --git a/ansible/molecule/pgsql-ha/tests/test_postgres_c_test_cluster_db0_primary.py b/ansible/molecule/pgsql-ha/tests/test_postgres_c_test_cluster_db0_primary.py
deleted file mode 100644
index 0849b3e3a47bbd9d0ffae264f0f58762d5b53d87..0000000000000000000000000000000000000000
--- a/ansible/molecule/pgsql-ha/tests/test_postgres_c_test_cluster_db0_primary.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-# This test run accross all servers
-hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("postgres")
-testinfra_hosts = [i for i in hosts if i.startswith('db0')]
-
-
-def test_postgresql_create_db(host):
-    ''' check if we can only create db on the primary node of the cluster '''
-
-    s = host.ansible("postgresql_db", "name=test", become=True, check=False, become_user='postgres')
-    assert s["changed"]
-
-
-def test_postgresql_create_table(host):
-    ''' check if we can only create a table on the primary node of the cluster '''
-
-    s = host.ansible("postgresql_query", "db=test query='CREATE TABLE test_ha (id SERIAL PRIMARY KEY, name VARCHAR(100) );'", become=True, check=False, become_user='postgres')
-    assert s["changed"]
-
-
-def test_postgresql_insert(host):
-    ''' check if we can only write to the primary node of the cluster '''
-
-    s = host.ansible("postgresql_query", "db=test query='INSERT INTO test_ha (name) VALUES (\'test\');'", become=True, check=False, become_user='postgres')
-    assert s["changed"]
diff --git a/ansible/molecule/pgsql-ha/tests/test_postgres_d_test_cluster_db1_secondary.py b/ansible/molecule/pgsql-ha/tests/test_postgres_d_test_cluster_db1_secondary.py
deleted file mode 100644
index 683304aebbd33d67be6da4eb20ce1e757f053c36..0000000000000000000000000000000000000000
--- a/ansible/molecule/pgsql-ha/tests/test_postgres_d_test_cluster_db1_secondary.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-# This test run accross all servers
-hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("postgres")
-testinfra_hosts = [i for i in hosts if i.startswith('db1')]
-
-
-def test_postgresql_create_db(host):
-    ''' check if we can only create db on the primary node of the cluster '''
-
-    s = host.ansible("postgresql_db", "name=test", become=True, check=False, become_user='postgres')
-    assert not s["changed"]
-
-
-def test_postgresql_create_table(host):
-    ''' check if we can only create a table on the primary node of the cluster '''
-
-    s = host.ansible("postgresql_query", "db=test query='CREATE TABLE test_ha (id SERIAL PRIMARY KEY, name VARCHAR(100) );'", become=True, check=False, become_user='postgres')
-    assert not s["changed"]
-
-
-def test_postgresql_insert(host):
-    ''' check if we can only write to the primary node of the cluster '''
-
-    s = host.ansible("postgresql_query", "db=test query='INSERT INTO test_ha (name) VALUES (\'test\');'", become=True, check=False, become_user='postgres')
-    assert not s["changed"]
diff --git a/ansible/playbooks/base.yml b/ansible/playbooks/base.yml
deleted file mode 100755
index fdb0ed4c0528cdaf66cd63cfbb9c195cd4fd71f7..0000000000000000000000000000000000000000
--- a/ansible/playbooks/base.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: BASE
-  hosts: all
-  tags: all
-  roles:
-    - base
-
-...
diff --git a/ansible/playbooks/bench-monitoring.yml b/ansible/playbooks/bench-monitoring.yml
deleted file mode 100755
index ccf66150335c03795cd7b563bcbdf383ad5d8638..0000000000000000000000000000000000000000
--- a/ansible/playbooks/bench-monitoring.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: DEPLOY ELASTIC KIBANA SERVER
-  hosts: elastic
-  tags: [ 'never', 'monbench' ]
-  vars:
-    - es_heap_size: 2g
-    - es_config:
-        network.host: "{{ hostvars[groups['elastic'][0]]['ansible_default_ipv4']['address'] }}"
-        node.data: true
-        node.master: true
-        cluster.initial_master_nodes: "{{ hostvars[groups['elastic'][0]].ansible_hostname }}"
-    - kibana_server_host: "{{ hostvars[groups['elastic'][0]]['ansible_default_ipv4']['address'] }}"
-    - elastic_host: "{{ es_config['network.host'] }}"
-    - es_api_host: "{{ es_config['network.host'] }}"
-  roles:
-    - elastic.elasticsearch
-    - elastic
-
-- name: DEPLOY METRICBEAT WORKERS
-  hosts: mediaserver,postgres
-  tags: [ 'never', 'monbench' ]
-  vars:
-    - kibana_server_host: "{{ hostvars[groups['elastic'][0]]['ansible_default_ipv4']['address'] }}"
-    - elastic_host: "{{ es_config['network.host'] }}"
-    - es_api_host: "{{ es_config['network.host'] }}"
-  roles:
-    - metricbeat
-
-...
diff --git a/ansible/playbooks/bench.yml b/ansible/playbooks/bench.yml
deleted file mode 100755
index 301f29b492c52317063c69f9e67d99e78406592c..0000000000000000000000000000000000000000
--- a/ansible/playbooks/bench.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: DEPLOY BENCHMARK SERVER
-  hosts: bench_server
-  pre_tasks:
-    - name: "Fail is benchmark server is not unique"
-      fail:
-        msg: "Benchmark server must be unique"
-      when: groups['bench_server'] | length > 1
-  tags: bench_server
-  roles:
-    - bench-server
-  tasks:
-    - name: restart bench-server
-      service:
-        name: bench-server
-        state: restarted
-      tags: [ 'never', 'prepare-bench' ]
-
-- name: DEPLOY BENCHMARK WORKERS
-  hosts: bench_worker
-  tags: bench_worker
-  roles:
-    - bench-worker
-  tasks:
-    - name: restart bench-worker
-      service:
-        name: bench-worker
-        state: restarted
-      tags: [ 'never', 'prepare-bench' ]
-
-...
diff --git a/ansible/playbooks/celerity.yml b/ansible/playbooks/celerity.yml
deleted file mode 100755
index 69751262ef76f16880879502fd0117b173578f73..0000000000000000000000000000000000000000
--- a/ansible/playbooks/celerity.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: CELERITY SERVER
-  hosts: celerity
-  tags: celerity
-  pre_tasks:
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-  roles:
-    - celerity
-  post_tasks:
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-...
diff --git a/ansible/playbooks/letsencrypt.yml b/ansible/playbooks/letsencrypt.yml
deleted file mode 100755
index 571d59282b9a0bc534ad935fa899d4d4cf98938f..0000000000000000000000000000000000000000
--- a/ansible/playbooks/letsencrypt.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: Let's encrypt
-  hosts: all
-  tags: all
-  roles:
-    - letsencrypt
-
-...
diff --git a/ansible/playbooks/live/README.md b/ansible/playbooks/live/README.md
deleted file mode 100644
index 0c5e322836bca13d06261f052ab4c52873d50e3f..0000000000000000000000000000000000000000
--- a/ansible/playbooks/live/README.md
+++ /dev/null
@@ -1,76 +0,0 @@
-# Description
-
-The envsetup `live` group includes all the hosts that will process lives streams.
-In a standard Ubicast case, the host is the same as the mediaserver.
-In a HA Ubicast case, the live is usually a cluster of two dedicated servers behind a loadbalancer VIP setup in active/backup.
-
-# Prerequisites
-
-The `live` group playbooks actions **must** be played after the `mediaserver` playbook actions otherwise the `live` playbook will throw an error when configuring nginx on the `mediaserver` hosts.
-
-# Usage
-
-## Description
-
-You can setup a standard case by setting the `ip_live` value to `127.0.0.1` in your inventory (or leave it blank as it is the default value).
-For a HA case, you will have to setup the live cluster VIP address in the `ip_live` variable.
-
-# Ansible
-
-## Inventory variables
-
-| Type | Name        | Default      | Description                             |
-|------|-------------|--------------|-----------------------------------------|
-| Base | ip_live     | 127.0.0.1    | IP/DNS to reach the live server/cluster |
-| HA   | live_domain | live.live.fr | Domain name of the live cluster         |
-| HA   | tmpfs_size  | 2048m        | Size of the tmpfs storing the live chunks (unit g or m) |
-
-**Note**: See the [ubicast prerequisites](https://docs.google.com/document/d/1vAfLq1hgPMYoTlcCs9-yGHfaKdwKirSmpfic1DwfnXo/edit#heading=h.6txdj6tamlvd) for details on the tmpfs size (Although 2048m should cover most of the non-intensive cases)
-
-## Usage examples
-
-### Standard case
-
-* To deploy the live cluster with a global deployment
-
-```
-cd <envsetup dir>/ansible
-ansible-playbook -i inventory/<inventory name> playbooks/site.yml
-```
-
-* To deploy only the live cluster and the Ubicast global tools
-
-```
-cd <envsetup dir>/ansible
-ansible-playbook -i inventory/<inventory name> playbooks/live/deploy-standalone.yml
-```
-
-* To deploy only the live cluster without the Ubicast global tools
-
-```
-cd <envsetup dir>/ansible
-ansible-playbook -i inventory/<inventory name> playbooks/live/deploy-minimal.yml
-```
-
-### HA case
-
-* To deploy the live cluster with a global deployment
-
-```
-cd <envsetup dir>/ansible
-ansible-playbook -i inventory/<inventory name> playbooks/site.yml
-```
-
-* To deploy only the live cluster and the Ubicast global tools
-
-```
-cd <envsetup dir>/ansible
-ansible-playbook -i inventory/<inventory name> playbooks/live/deploy-standalone.yml
-```
-
-* To deploy only the live cluster without the Ubicast global tools
-
-```
-cd <envsetup dir>/ansible
-ansible-playbook -i inventory/<inventory name> playbooks/live/deploy-minimal.yml
-```
diff --git a/ansible/playbooks/live/deploy-minimal.yml b/ansible/playbooks/live/deploy-minimal.yml
deleted file mode 100644
index d2b73cdb586e182bae7af594903262265dc44e3d..0000000000000000000000000000000000000000
--- a/ansible/playbooks/live/deploy-minimal.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-
-- name: LIVE
-  hosts: live
-  gather_facts: false
-  tags: live
-  roles:
-    - live
-
-- import_playbook: subplays/standard-case.yml
-  when: groups['live'] | d('') | length >= 1
-        and (
-          hostvars[groups['live'][0]].ip_live is undefined
-          or hostvars[groups['live'][0]].ip_live == "127.0.0.1"
-        )
-
-- import_playbook: subplays/ha-case.yml
-  when: groups['live'] | d('') | length >= 1
-        and (
-          hostvars[groups['live'][0]].ip_live is defined
-          and hostvars[groups['live'][0]].ip_live != "127.0.0.1"
-        )
-
-...
diff --git a/ansible/playbooks/live/deploy-standalone.yml b/ansible/playbooks/live/deploy-standalone.yml
deleted file mode 100644
index 440ebd3173de47635f8dd0b214020b4ec4d10a9c..0000000000000000000000000000000000000000
--- a/ansible/playbooks/live/deploy-standalone.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-
-- name: LIVE
-  hosts: live
-  tags: live
-  roles:
-    - base
-
-# FIREWALL
-
-- hosts: live
-  tags: live
-  vars:
-    server_firewall_enabled: true
-    server_ferm_rules_filename: live
-    server_ferm_input_rules:
-      - proto:
-          - tcp
-        dport:
-          - 80
-          - 443
-          - 1935
-    server_ferm_output_rules: []
-    server_ferm_global_settings:
-  tasks:
-    - name: firewall
-      when: server_firewall_enabled
-      vars:
-        ferm_rules_filename: "{{ server_ferm_rules_filename }}"
-        ferm_input_rules: "{{ server_ferm_input_rules }}"
-        ferm_output_rules: "{{ server_ferm_output_rules }}"
-        ferm_global_settings: "{{ server_ferm_global_settings }}"
-      include_role:
-        name: ferm-configure
-
-- import_playbook: deploy-minimal.yml
-  tags: live
-
-...
diff --git a/ansible/playbooks/live/functions/create-live-app.yml b/ansible/playbooks/live/functions/create-live-app.yml
deleted file mode 100644
index 053a4a608cf698430300e7d19734671555c81531..0000000000000000000000000000000000000000
--- a/ansible/playbooks/live/functions/create-live-app.yml
+++ /dev/null
@@ -1,196 +0,0 @@
----
-
-- name: Checking the live(s) server(s) live configuration state
-  hosts: live
-  gather_facts: false
-  tags: live
-  tasks:
-    - name: Check the existence of the live configuration
-      stat:
-        path: /etc/nginx/rtmp.d/{{ live_app_name }}.conf
-      register: live_conf_live
-
-    - name: Getting the live configuration content
-      shell: grep -oP '^application \K[A-Za-z0-9]+' /etc/nginx/rtmp.d/{{ live_app_name }}.conf
-      when: live_conf_live.stat.exists
-      register: live_conf_secret
-      changed_when: false
-
-    - name: Extracting the application secret
-      set_fact:
-        live_secret: "{{ live_conf_secret.stdout }}"
-      when: live_conf_live.stat.exists
-
-    - name: Declaring the application secret
-      set_fact:
-        live_secret: ""
-      when: not live_conf_live.stat.exists
-
-- name: Checking the MediaServer(s) live configuration state
-  hosts: mediaserver
-  gather_facts: false
-  tags: live
-  tasks:
-    - name: Check the existence of the live configuration
-      stat:
-        path: /home/{{ live_app_name }}/msinstance/conf/lives.json
-      register: ms_conf_live
-
-    - name: Retrieve the live configuration
-      slurp:
-        src: /home/{{ live_app_name }}/msinstance/conf/lives.json
-      register: ms_live_config
-      when: ms_conf_live.stat.exists
-
-    - name: Extracting the application secret
-      set_fact:
-        live_secret: "{{ ms_live_config.content|b64decode|from_json | json_query('RTMP_APP') }}"
-      when: ms_conf_live.stat.exists
-
-    - name: Declaring the application secret
-      set_fact:
-        live_secret: ""
-      when: not ms_conf_live.stat.exists
-
-- name: Computing the {{ live_app_name }} application secret
-  hosts: localhost
-  gather_facts: false
-  tags: live
-  tasks:
-    - name: Retrieving the first live host configured app secret as reference
-      set_fact:
-        base_live_secret: "{{ hostvars[groups['live'][0]].live_secret }}"
-        app_secret_diff: false
-      when: hostvars[groups['live'][0]].live_secret | length > 0
-
-    - name: Comparing the app secrets from MS an live servers with the reference
-      set_fact:
-        app_secret_diff: true
-      when: base_live_secret is defined
-            and hostvars[item].live_secret != base_live_secret
-      with_items:
-        - "{{ groups['live'] }}"
-        - "{{ groups['mediaserver'] }}"
-
-    - name: Generating an application secret on localhost with /dev/urandom
-      shell: >
-        set -o pipefail && \
-        cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 30 | head -n 1
-      register: secret
-      args:
-        executable: /bin/bash
-      failed_when: false  # Ansible-lint requires pipefail, but the return is then non-null so we have to force this
-      when: base_live_secret is not defined
-            or app_secret_diff
-
-    - name: Deciding the application secret to use
-      set_fact:
-        live_app_secret: "{{ secret.stdout | d(base_live_secret) }}"
-
-- name: Live server(s) - "{{ live_app_name }}" live application configuration
-  hosts: live
-  gather_facts: false
-  tags: live
-  tasks:
-    - name: Check the existence of the RTMP app
-      command: ubicast-livectl get {{ live_app_name }} {{ hostvars['localhost'].live_app_secret }}
-      register: app_status
-      changed_when: false
-      failed_when: false
-
-    - name: (Re)create the RTMP app configuration
-      notify: Reload nginx
-      command: ubicast-livectl add {{ live_app_name }} {{ hostvars['localhost'].live_app_secret }}
-      when: app_status.rc == 1
-
-    - name: Prepare the nginx RTMP temporary directory
-      notify: Reload nginx
-      file:
-        path: /var/tmp/nginx-rtmp/{{ live_app_name }}
-        owner: nginx
-        group: root
-        state: directory
-        mode: '0700'
-
-    - name: Create the nginx RTMP web directory symlink
-      notify: Reload nginx
-      file:
-        src: /var/tmp/nginx-rtmp/{{ live_app_name }}
-        dest: /var/www/{{ live_app_name }}/streaming-rtmp
-        state: link
-        force: true
-      when: deploy_case == "standard"
-
-  handlers:
-    - name: Reload nginx
-      systemd:
-        name: nginx
-        state: reloaded
-
-- name: MediaServer(s) - "{{ live_app_name }}" live application configuration
-  hosts: mediaserver
-  gather_facts: false
-  tags: live
-  tasks:
-    - name: Getting the current lives configuration
-      slurp:
-        src: /home/{{ live_app_name }}/msinstance/conf/lives.json
-      register: lives_config
-      when: ms_conf_live.stat.exists
-
-    # The "W10K" string is decoded to an empty json file => "[]"
-    - name: Store the lives configuration in a variable
-      set_fact:
-        lives_config: "{{ lives_config.content | default('W10K') | b64decode | from_json }}"
-
-    - name: Set the live application secret in lives configuration
-      vars:
-        rtmp_app_line:
-          RTMP_APP: "{{ hostvars['localhost'].live_app_secret }}"
-      set_fact:
-        lives_config: "{{ lives_config | combine(rtmp_app_line) }}"
-
-    - name: Set the RTMP_NAME in lives configuration
-      vars:
-        rtmp_name_line:
-          RTMP_NAME: "{{ live_app_name }}"
-      set_fact:
-        lives_config: "{{ lives_config | combine(rtmp_name_line) }}"
-
-    - name: Set the RTMP_HLS_PLAYBACK_URL in lives configuration
-      vars:
-        rtmp_hls_line:
-          RTMP_HLS_PLAYBACK_URL: "{{ rtmp_hls_url }}"
-      set_fact:
-        lives_config: "{{ lives_config | combine(rtmp_hls_line) }}"
-
-    - name: Set the RTMP_PLAYBACK_URL in lives configuration
-      vars:
-        rtmp_playback_line:
-          RTMP_PLAYBACK_URL: null
-      set_fact:
-        lives_config: "{{ lives_config | combine(rtmp_playback_line) }}"
-
-    - name: Set the RTMP_PUBLISH_URL in lives configuration
-      vars:
-        rtmp_publish_line:
-          RTMP_PUBLISH_URL: "{{ rtmp_pub_url }}"
-      set_fact:
-        lives_config: "{{ lives_config | combine(rtmp_publish_line) }}"
-
-    - name: Update mediaserver lives configuration
-      notify: Restart mediaserver
-      copy:
-        content: "{{ lives_config | to_nice_json }}"
-        dest: "/home/{{ live_app_name }}/msinstance/conf/lives.json"
-        owner: "{{ live_app_name }}"
-        group: "{{ live_app_name }}"
-        mode: '0600'
-
-  handlers:
-    - name: Restart mediaserver
-      systemd:
-        name: mediaserver
-        state: restarted
-
-...
diff --git a/ansible/playbooks/live/subplays/ha-case.yml b/ansible/playbooks/live/subplays/ha-case.yml
deleted file mode 100644
index b2fc625a6b691771066615d210f413380bba1a1a..0000000000000000000000000000000000000000
--- a/ansible/playbooks/live/subplays/ha-case.yml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-
-- name: Live vhost setup
-  hosts: live
-  tags: live
-  gather_facts: false
-  tasks:
-    - name: resolve domain name to localhost
-      lineinfile:
-        path: /etc/hosts
-        line: '127.0.1.1 {{ live_domain }}'
-        backup: true
-
-    - name: fill the vhost file
-      notify: Restart nginx
-      replace:
-        path: /etc/nginx/sites-available/live-rtmp.conf
-        regexp: '^(\s+server_name)\s+.*(;)$'
-        replace: '\1 {{ live_domain }}\2'
-
-    - name: Activating the live vhost configuration
-      notify: Restart nginx
-      file:
-        src: /etc/nginx/sites-available/live-rtmp.conf
-        dest: /etc/nginx/sites-enabled/live-rtmp.conf
-        state: link
-
-  handlers:
-    - name: Restart nginx
-      systemd:
-        name: nginx
-        state: restarted
-
-- import_playbook: ../functions/create-live-app.yml
-  vars:
-    live_app_name: msuser
-    rtmp_hls_url: "https://{{ hostvars[groups['live'][0]].live_domain }}/streaming-rtmp/%(rtmp_name)s/%(stream_id)s.m3u8"
-    rtmp_pub_url: "rtmp://{{ hostvars[groups['live'][0]].live_domain }}/%(rtmp_app)s/%(stream_id)s"
-    deploy_case: ha
-
-- hosts: mediaserver
-  tags: live
-  gather_facts: false
-  tasks:
-    - name: Check the existence of the rtmp configuration folder
-      stat:
-        path: /etc/nginx/rtmp.d
-      register: rtmp_conf_dir
-
-    - name: Remove unused MediaServer(s) rtmp configurations
-      shell: /bin/rm -f /etc/nginx/rtmp.d/*
-      args:
-        warn: false
-      when: rtmp_conf_dir.stat.exists
-
-...
diff --git a/ansible/playbooks/live/subplays/standard-case.yml b/ansible/playbooks/live/subplays/standard-case.yml
deleted file mode 100644
index 00623dc40acca499c07b6c36340bf2ebd86340ce..0000000000000000000000000000000000000000
--- a/ansible/playbooks/live/subplays/standard-case.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-
-- import_playbook: ../functions/create-live-app.yml
-  vars:
-    live_app_name: msuser
-    rtmp_hls_url: "https://%(ms_host)s/streaming-rtmp/%(stream_id)s.m3u8"
-    rtmp_pub_url: "rtmp://%(ms_host)s/%(rtmp_app)s/%(stream_id)s"
-    deploy_case: standard
-
-...
diff --git a/ansible/playbooks/mediacache.yml b/ansible/playbooks/mediacache.yml
deleted file mode 100755
index 49dd46fa3c02a8024316ed808770f09c1e255557..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mediacache.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: MEDIACACHE
-  hosts: mediacache
-  tags: mediacache
-  pre_tasks:
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-  roles:
-    - mediacache
-  post_tasks:
-    - name: deploy letsencrypt certificate
-      when: letsencrypt_enabled | d(false)
-      include_role:
-        name: letsencrypt
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-...
diff --git a/ansible/playbooks/mediacache/README.md b/ansible/playbooks/mediacache/README.md
deleted file mode 100644
index c0beee5cf47c952c04b59d4322a9d79f1f850e7d..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mediacache/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Description
-
-The envsetup `mediacache` group includes all the hosts that will be installed as cache servers for the Ubicast solution medias.
-These hosts should be dedicated to the MediaCache solution.
-
-# Prerequisites
-
-The `mediacache` playbooks **must** be played after the `mediaserver` and `live` playbooks actions.
-
-# Ansible
-
-## Inventory variables
-
-| Mandatory | Name                   | Default                          | Description                                    |
-|-----------|------------------------|----------------------------------|------------------------------------------------|
-| Yes       | mediacache_url         |                                  | Domain name of the cache server                |
-| Yes       | ms_url                 |                                  | Domain name of the mediaserver cluster/server  |
-| Yes       | mediacache_vod_folder  | /var/cache/nginx/mediacache-vod  | Folder for the VOD cache storage               |
-| Yes       | mediacache_vod_size    |                                  | Size of the VOD cache storage in GB            |
-| No        | live_url               |                                  | Domain name of the live cluster/server         |
-| No        | mediacache_live_folder | /var/cache/nginx/mediacache-live | Folder for the live cache storage              |
-| No        | mediacache_live_size   | 1                                | Size of the live cache storage in GB           |
-
-**Note**: The `live_url`, `mediacache_live_folder` and `mediacache_live_size` becomes mandatory if you want to configure a cache on the live medias
-
-## Usage examples
-
-* To deploy the live cluster with a global deployment
-
-```
-cd <envsetup dir>/ansible
-ansible-playbook -i inventory/<inventory name> playbooks/site.yml
-```
-
-* To deploy only the live cluster and the Ubicast global tools
-
-```
-cd <envsetup dir>/ansible
-ansible-playbook -i inventory/<inventory name> playbooks/mediacache.yml
-```
diff --git a/ansible/playbooks/mediacache/deploy-minimal.yml b/ansible/playbooks/mediacache/deploy-minimal.yml
deleted file mode 100644
index 8140c7e2af99845c2aef3df67c3b0d1932103267..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mediacache/deploy-minimal.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-
-- name: MEDIACACHE
-  hosts: mediacache
-  tags: mediacache
-  roles:
-    - mediacache
-  tasks:
-    - name: Getting the IP to trust in term of securelink
-      set_fact:
-        securelink_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
-
-    - name: authorize mediacache on mediaserver
-      notify: restart nginx on mediaservers
-      lineinfile:
-        path: /etc/nginx/conf.d/mediaserver-securelink.conf
-        line: "{{'\t'}}{{ securelink_ip }} 1;"
-        insertafter: '^geo'
-      delegate_to: "{{ item }}"
-      delegate_facts: true
-      loop: "{{ groups['mediaserver'] }}"
-
-  handlers:
-    - name: restart nginx on mediaservers
-      systemd:
-        name: nginx
-        state: restarted
-      delegate_to: "{{ item }}"
-      delegate_facts: true
-      loop: "{{ groups['mediaserver'] }}"
-
-...
diff --git a/ansible/playbooks/mediacache/deploy-standalone.yml b/ansible/playbooks/mediacache/deploy-standalone.yml
deleted file mode 100644
index 0190c3f0d48ec590383e4144e89c547fec8daf8f..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mediacache/deploy-standalone.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: MEDIACACHE
-  hosts: mediacache
-  tags: mediacache
-  roles:
-    - base
-
-# FIREWALL
-
-- hosts: mediacache
-  tags: mediacache
-  vars:
-    server_firewall_enabled: true
-    server_ferm_rules_filename: mediacache
-    server_ferm_input_rules:
-      - proto:
-          - tcp
-        dport:
-          - 80
-          - 443
-    server_ferm_output_rules: []
-    server_ferm_global_settings:
-  tasks:
-    - name: firewall
-      when: server_firewall_enabled
-      vars:
-        ferm_rules_filename: "{{ server_ferm_rules_filename }}"
-        ferm_input_rules: "{{ server_ferm_input_rules }}"
-        ferm_output_rules: "{{ server_ferm_output_rules }}"
-        ferm_global_settings: "{{ server_ferm_global_settings }}"
-      include_role:
-        name: ferm-configure
-
-- import_playbook: deploy-minimal.yml
-  tags: mediacache
-
-...
diff --git a/ansible/playbooks/mediaimport.yml b/ansible/playbooks/mediaimport.yml
deleted file mode 100755
index 637c84740f61af83600974398b1eff9095f5c800..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mediaimport.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: MEDIAIMPORT
-  hosts: mediaimport
-  tags: mediaimport
-  pre_tasks:
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-  roles:
-    - mediaimport
-  post_tasks:
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-...
diff --git a/ansible/playbooks/mediaserver.yml b/ansible/playbooks/mediaserver.yml
deleted file mode 100755
index 052ed623a563b8e2a4c4e33b692b1798f2238272..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mediaserver.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: MEDIASERVER
-  hosts: mediaserver
-  tags: mediaserver
-  pre_tasks:
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-  roles:
-    - mediaserver
-  post_tasks:
-    - name: deploy letsencrypt certificate
-      when: letsencrypt_enabled | d(false)
-      include_role:
-        name: letsencrypt
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-...
diff --git a/ansible/playbooks/mediavault/add_backup.yml b/ansible/playbooks/mediavault/add_backup.yml
deleted file mode 100755
index 62e2ed43d65211a7309ad5dd96284aa0da0ebf60..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mediavault/add_backup.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: MEDIAVAULT
-  hosts: mediavault
-  tags: mediavault
-  tasks:
-  - include_tasks: ressources/add_backup_task.yml
-    loop: "{{ mvt_backups }}"
-
-...
diff --git a/ansible/playbooks/mediavault/deploy.yml b/ansible/playbooks/mediavault/deploy.yml
deleted file mode 100755
index a45939fa684fc80f98a1577e43f043585a10358d..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mediavault/deploy.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: MEDIAVAULT
-  hosts: mediavault
-  tags: mediavault
-  pre_tasks:
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-  roles:
-    - mediavault
-  post_tasks:
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-...
diff --git a/ansible/playbooks/mediavault/ressources/add_backup_task.yml b/ansible/playbooks/mediavault/ressources/add_backup_task.yml
deleted file mode 100644
index d20110e6fc11ec6efd8a780bf7e15ef6cecaa99d..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mediavault/ressources/add_backup_task.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-
-- name: get {{ item.name  }} backup information
-  stat: path={{ item.dest }}/backup.marker
-  register: backup_marker
-
-- name: create {{ item.name }} backup
-  shell: mediavaultctl add --backup-name "{{ item.name }}" --source-folder "{{ item.source  }}" --dest-folder "{{ item.dest }}"
-  when: not backup_marker.stat.exists
-
-...
diff --git a/ansible/playbooks/mediaworker.yml b/ansible/playbooks/mediaworker.yml
deleted file mode 100755
index 1f0464c3bccdb8bff3a0c0bedd30c4d63788c4ee..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mediaworker.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: MEDIAWORKER
-  hosts: mediaworker
-  tags: mediaworker
-  pre_tasks:
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-  roles:
-    - mediaworker
-  post_tasks:
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-...
diff --git a/ansible/playbooks/migrate-debian.yml b/ansible/playbooks/migrate-debian.yml
deleted file mode 100755
index c714dbf6ddfdd05314eb527c005997e0d0be07a9..0000000000000000000000000000000000000000
--- a/ansible/playbooks/migrate-debian.yml
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: MIGRATE TO DEBIAN 10
-  hosts: all
-  tasks:
-
-    - name: check / space
-      shell:
-        cmd: '[ $(df --output="avail" / | tail -n 1) -gt 4000000 ]'
-
-    - name: check /boot space
-      shell:
-        cmd: '[ $(df --output="avail" /boot | tail -n 1) -gt 300000 ]'
-
-    - name: dist-upgrade current ubuntu
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        update_cache: true
-        dpkg_options: force-confnew
-        upgrade: dist
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: autoremove current ubuntu
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        autoclean: true
-        autoremove: true
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: list ubicast packages
-      shell:
-        cmd: |
-          rm -f /root/ubicast-installed;
-          for pkg in 'ubicast-mediaserver' 'ubicast-mediaserver-runtime' 'ubicast-monitor' 'ubicast-monitor-runtime' 'ubicast-skyreach' 'ubicast-skyreach-runtime' 'celerity-server' 'celerity-workers'; do
-            dpkg -s "$pkg" >/dev/null 2>&1 && echo -n "$pkg " | tee -a '/root/ubicast-installed';
-            echo '';
-          done
-
-    - name: dump mediaserver database
-      shell:
-        cmd: /usr/bin/mscontroller.py dump
-
-    - name: dump skyreach database
-      shell:
-        cmd: /home/skyreach/htdocs/skyreach_site/scripts/control.sh dump
-
-    - name: stop services
-      loop:
-        - nginx
-        - msmonitor
-        - mediaserver
-        - skyreach
-      systemd:
-        name: "{{ item }}"
-        state: stopped
-
-    - name: add debian keys
-      loop:
-        - https://ftp-master.debian.org/keys/archive-key-10.asc
-        - https://ftp-master.debian.org/keys/archive-key-10-security.asc
-      apt_key:
-        url: "{{ item }}"
-
-    - name: disable skyreach repository
-      shell:
-        cmd: mv -f /etc/apt/sources.list.d/skyreach.list /etc/apt/sources.list.d/skyreach.list.migrate
-
-    - name: update sources list
-      copy:
-        dest: /etc/apt/sources.list
-        content: |
-          deb http://ftp.debian.org/debian buster main contrib non-free
-          deb http://ftp.debian.org/debian buster-updates main contrib non-free
-          deb http://security.debian.org buster/updates main contrib non-free
-
-    - name: install debian keyring
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        update_cache: true
-        name: debian-archive-keyring
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: upgrade to debian
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        update_cache: true
-        dpkg_options: force-confnew
-        upgrade: dist
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: autoremove debian
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        autoclean: true
-        autoremove: true
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: install apt-show-version
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        name: apt-show-version
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: install debian version of packages
-      shell:
-        cmd: "apt-get install $(apt-show-versions | grep -P 'newer than version in archive' | awk -F: '{print $1\"/buster\"}')"
-
-    - name: upgrade
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        update_cache: true
-        dpkg_options: force-confnew
-        upgrade: dist
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: autoremove
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        autoclean: true
-        autoremove: true
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-...
diff --git a/ansible/playbooks/mirismanager.yml b/ansible/playbooks/mirismanager.yml
deleted file mode 100755
index a67d22104d707dd443e623ffd33cef0baca6b44e..0000000000000000000000000000000000000000
--- a/ansible/playbooks/mirismanager.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: MIRIS MANAGER
-  hosts: mirismanager
-  tags: mirismanager
-  pre_tasks:
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-  roles:
-    - mirismanager
-  post_tasks:
-    - name: deploy letsencrypt certificate
-      when: letsencrypt_enabled | d(false)
-      include_role:
-        name: letsencrypt
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-...
diff --git a/ansible/playbooks/munin/all.yml b/ansible/playbooks/munin/all.yml
deleted file mode 100644
index 0e46d98d456f9d9a2476705ca2575bffa0c418c3..0000000000000000000000000000000000000000
--- a/ansible/playbooks/munin/all.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- import_playbook: msmonitor.yml
-  tags: monitor
-- import_playbook: munin-node.yml
-  tags: monitor
-- import_playbook: munin-server.yml
-  tags: monitor
-
-...
diff --git a/ansible/playbooks/munin/msmonitor.yml b/ansible/playbooks/munin/msmonitor.yml
deleted file mode 100644
index 7b066cd2a18517ae5303a7a3296447c8533153a7..0000000000000000000000000000000000000000
--- a/ansible/playbooks/munin/msmonitor.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: msmonitor
-  hosts: msmonitor
-  tags: munin
-  roles:
-    - munin/msmonitor
-
-...
diff --git a/ansible/playbooks/munin/munin-node.yml b/ansible/playbooks/munin/munin-node.yml
deleted file mode 100644
index fd3de0664613d652bf616a59ec68bb9f8addf6e4..0000000000000000000000000000000000000000
--- a/ansible/playbooks/munin/munin-node.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: Munin node
-  hosts: munin_node
-  tags: munin
-  roles:
-    - munin/munin-node
-
-...
diff --git a/ansible/playbooks/munin/munin-server.yml b/ansible/playbooks/munin/munin-server.yml
deleted file mode 100644
index cebc054e0c78d283740c0f641564206936f9a4b7..0000000000000000000000000000000000000000
--- a/ansible/playbooks/munin/munin-server.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: Munin server
-  hosts: munin_server
-  tags: munin
-  pre_tasks:
-    - name: gather munin_node group facts
-      setup:
-      delegate_to: "{{ item }}"
-      delegate_facts: true
-      with_items: "{{ groups['munin_node'] }}"
-  roles:
-    - munin/munin-server
-
-...
diff --git a/ansible/playbooks/netcapture.yml b/ansible/playbooks/netcapture.yml
deleted file mode 100755
index aa1b337227ba3238687bb3f7e42e5ed25a947ffc..0000000000000000000000000000000000000000
--- a/ansible/playbooks/netcapture.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: NETCAPTURE
-  hosts: netcapture
-  tags: netcapture
-  pre_tasks:
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-  roles:
-    - netcapture
-  post_tasks:
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-...
diff --git a/ansible/playbooks/postfix.yml b/ansible/playbooks/postfix.yml
deleted file mode 100755
index 719fba051f559046933342f416bab88ee0a94377..0000000000000000000000000000000000000000
--- a/ansible/playbooks/postfix.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: POSTFIX
-  hosts: all
-  tags: all
-  roles:
-    - conf
-    - postfix
-
-...
diff --git a/ansible/playbooks/postgres-ha.yml b/ansible/playbooks/postgres-ha.yml
deleted file mode 100755
index 8e986d5a518369a1eb8bd37b7e7137e21d63426c..0000000000000000000000000000000000000000
--- a/ansible/playbooks/postgres-ha.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: POSTGRES HA
-  hosts: postgres
-  tags: postgres
-  pre_tasks:
-    - name: check that repmgr_node_id is set
-      assert:
-        that: repmgr_node_id != ""
-        quiet: true
-    - name: check that repmgr_primary_node is set
-      assert:
-        that: repmgr_primary_node != ""
-        quiet: true
-    - name: install psycopg2
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        name: python3-psycopg2
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-  roles:
-    - postgres-ha
-
-- name: POSTGRES HA CLIENTS
-  hosts: mediaserver
-  tags: [ 'postgres', 'mediaserver' ]
-  pre_tasks:
-    - name: check that haproxy is configured
-      assert:
-        that: hap_config_listen != ""
-        quiet: true
-  roles:
-    - haproxy
-
-...
diff --git a/ansible/playbooks/postgres-maintenance.yml b/ansible/playbooks/postgres-maintenance.yml
deleted file mode 100755
index b4774ee9f567a90d95605b2175061c2308f02a55..0000000000000000000000000000000000000000
--- a/ansible/playbooks/postgres-maintenance.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- import_playbook: postgres-maintenance/rephacheck_status.yml
-  tags: [ 'always' ]
-
-- import_playbook: postgres-maintenance/fenced_to_standby.yml
-  tags: [ 'never', 'fenced-to-standby' ]
-
-- import_playbook: postgres-maintenance/standby_to_primary.yml
-  tags: [ 'never', 'standby-to-primary' ]
-
-- import_playbook: postgres-maintenance/restart_repmgrd.yml
-  tags: [ 'never', 'restart-repmgrd', 'standby-to-primary' ]
-
-...
diff --git a/ansible/playbooks/postgres-maintenance/fenced_to_standby.yml b/ansible/playbooks/postgres-maintenance/fenced_to_standby.yml
deleted file mode 100644
index f4b19d1a56e592fd0401037f43998f4698da30f8..0000000000000000000000000000000000000000
--- a/ansible/playbooks/postgres-maintenance/fenced_to_standby.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-- name: POSTGRESQL SWITCH CURRENT FENCED TO STANDBY
-  hosts: postgres_fenced
-  tasks:
-    - name: fail if node status if not fenced
-      fail:
-        msg: "Current status {{ rephacheck['stdout'] }} must be fenced."
-      when: rephacheck['stdout'] != "fenced"
-    - name: stop postgresql
-      systemd:
-        name: postgresql
-        state: stopped
-    - name: delete postgresql data directory
-      file:
-        path: /var/lib/postgresql/11/main/
-        state: absent
-        force: true
-    - name: copy data from primary
-      command: "repmgr -f /etc/postgresql/11/main/repmgr.conf --force --verbose standby clone -h {{ hostvars[groups['postgres_primary'][0]]['ansible_default_ipv4']['address'] }} -d repmgr -U repmgr -c"
-      become: true
-      become_user: postgres
-      register: copy_from_primary
-    - name: start postgresql
-      systemd:
-        name: postgresql
-        state: started
-      when: copy_from_primary is succeeded
-    - name: register node as standby
-      command: "repmgr -f /etc/postgresql/11/main/repmgr.conf --force --verbose standby register"
-      become: true
-      become_user: postgres
-      when: copy_from_primary is succeeded
-
-...
diff --git a/ansible/playbooks/postgres-maintenance/rephacheck_status.yml b/ansible/playbooks/postgres-maintenance/rephacheck_status.yml
deleted file mode 100644
index b22dec19a7b53300534c6903868e553b52695490..0000000000000000000000000000000000000000
--- a/ansible/playbooks/postgres-maintenance/rephacheck_status.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-- name: REHACHECK STATUS
-  hosts: postgres_primary:postgres_standby:postgres_fenced
-  tasks:
-    - name: get cluster state
-      command: "rephacheck"
-      register: rephacheck
-    - name: show status for each node
-      debug:
-        msg: "Current node {{ ansible_hostname }} status {{ rephacheck['stdout'] }}"
-      when: rephacheck['stdout'] != ""
-
-...
diff --git a/ansible/playbooks/postgres-maintenance/restart_repmgrd.yml b/ansible/playbooks/postgres-maintenance/restart_repmgrd.yml
deleted file mode 100644
index 4753a56a824df950b92233d98e9ec6e9977e5edc..0000000000000000000000000000000000000000
--- a/ansible/playbooks/postgres-maintenance/restart_repmgrd.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-- name: REPMGRD RESTART
-  hosts: postgres
-  tasks:
-    - name: kill repmgrd
-      command: "pkill repmgrd"
-      ignore_errors: true
-    - name: restart repmgrd
-      systemd:
-        name: repmgrd
-        state: restarted
-
-...
diff --git a/ansible/playbooks/postgres-maintenance/standby_to_primary.yml b/ansible/playbooks/postgres-maintenance/standby_to_primary.yml
deleted file mode 100644
index bfce1c64b61e4bf28131f2a5dcb4907c2b35e3b0..0000000000000000000000000000000000000000
--- a/ansible/playbooks/postgres-maintenance/standby_to_primary.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-- name: POSTGRESQL SWITCH CURRENT STANDBY TO PRIMARY
-  hosts: postgres_standby
-  tasks:
-    - name: fail if node status if not standby
-      fail:
-        msg: "Current status {{ rephacheck['stdout'] }} must be standby."
-      when: rephacheck['stdout'] != "standby"
-    - name: check if node is currently in standby
-      command: "repmgr standby switchover -f /etc/postgresql/11/main/repmgr.conf --siblings-follow --dry-run"
-      become: true
-      become_user: postgres
-      when: rephacheck['stdout'] == "standby"
-      register: standby_dry_run
-    - name: switch standby node to primary
-      command: "repmgr standby switchover -f /etc/postgresql/11/main/repmgr.conf --siblings-follow"
-      become: true
-      become_user: postgres
-      when:
-        - standby_dry_run is succeeded
-        - rephacheck['stdout'] == "standby"
-
-...
diff --git a/ansible/playbooks/postgres.yml b/ansible/playbooks/postgres.yml
deleted file mode 100755
index 85b3916fe37a32fb822d6ef6dfc74a850aece751..0000000000000000000000000000000000000000
--- a/ansible/playbooks/postgres.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: POSTGRESQL
-  hosts: postgres
-  tags: postgres
-  pre_tasks:
-    - name: check running in a docker container
-      register: check_if_docker
-      stat:
-        path: /.dockerenv
-    - name: set docker flag variable
-      set_fact:
-        in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
-  roles:
-    - postgres
-  post_tasks:
-    - name: configure network
-      when: network_apply | d(false)
-      include_role:
-        name: network
-    - name: configure proxy
-      when: proxy_apply | d(false)
-      include_role:
-        name: proxy
-
-...
diff --git a/ansible/playbooks/repos.yml b/ansible/playbooks/repos.yml
deleted file mode 100755
index 13cff05b96ac184bef1fd22753fd736ab18e6c37..0000000000000000000000000000000000000000
--- a/ansible/playbooks/repos.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: REPOSITORY
-  hosts: all
-  tags: all
-  roles:
-    - conf
-    - repos
-
-...
diff --git a/ansible/playbooks/site.yml b/ansible/playbooks/site.yml
deleted file mode 100755
index 8af1d970be6300b5b1a9d0d6cc5c36d1b491c799..0000000000000000000000000000000000000000
--- a/ansible/playbooks/site.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: PYTHON
-  hosts: all
-  gather_facts: false
-
-  tasks:
-    - name: ensure python3 is installed
-      register: python_install
-      changed_when: "'es_pyinstall' in python_install.stdout_lines"
-      raw: command -v python3 || echo es_pyinstall && apt update && apt install -y python3-minimal python3-apt iproute2
-      tags: always
-
-- import_playbook: "{{ 'postgres-ha' if groups['postgres']|d('') | length > 1 else 'postgres' }}.yml"
-  tags: postgres
-
-- import_playbook: mirismanager.yml
-  tags: manager
-
-- import_playbook: celerity.yml
-  tags: celerity
-
-- import_playbook: mediaworker.yml
-  tags: worker
-
-- import_playbook: mediaserver.yml
-  tags: server
-
-- import_playbook: live/deploy-standalone.yml
-  tags: live
-
-- import_playbook: mediacache/deploy-standalone.yml
-  tags: mediacache
-
-- import_playbook: mediavault/deploy.yml
-  tags: vault
-
-- import_playbook: mediaimport.yml
-  tags: import
-
-- import_playbook: netcapture.yml
-  tags: netcapture
-
-- import_playbook: bench.yml
-  tags: bench
-
-- import_playbook: munin/all.yml
-  tags: monitor
-
-...
diff --git a/ansible/playbooks/site_docker.yml b/ansible/playbooks/site_docker.yml
deleted file mode 100755
index 07da26f7540a16a57712cd7afb356c10c92f903f..0000000000000000000000000000000000000000
--- a/ansible/playbooks/site_docker.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: DOCKER CONTAINERS PROVISIONING
-  hosts: localhost
-  connection: local
-  tags: always
-  tasks:
-    - name: Create docker containers from inventory
-      docker_container:
-        name: "{{ item }}"
-        image: registry.ubicast.net/docker/debian-systemd:buster
-        privileged: true
-        command: /lib/systemd/systemd
-        state: started
-        volumes:
-          - /sys/fs/cgroup:/sys/fs/cgroup:ro
-        tmpfs:
-          - /tmp
-          - /run
-      with_inventory_hostnames:
-        - all:!localhost
-
-    - name: add host to inventory
-      add_host:
-        name: "{{ item }}"
-        ansible_host: "{{ item }}"
-        ansible_connection: docker
-        ansible_python_interpreter: /usr/bin/python3
-        in_docker: true
-      with_inventory_hostnames:
-        - all:!localhost
-
-- import_playbook: site.yml
-
-...
diff --git a/ansible/playbooks/tests/README.md b/ansible/playbooks/tests/README.md
deleted file mode 100644
index ac92de167f2d6bfa42a71b22a290893c23275d57..0000000000000000000000000000000000000000
--- a/ansible/playbooks/tests/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Description
-
-Theses playbooks are used to check the prerequisites before deployement.
-
-# Prerequisites
-
-- A complete inventory of the planned deployment (mediaserver, mediaworker, ...)
-- The proxy URL if there is applicable
-
-# Ansible
-
-## Usage examples
-
-* To test firewall rules
-
-```
-cd <envsetup dir>/ansible
-ANSIBLE_DISPLAY_SKIPPED_HOSTS=false ansible-playbook -i inventory/<inventory name> playbooks/tests/firewall-rules.yml
-```
-
-* To test /data partiton 
-
-```
-cd <envsetup dir>/ansible
-ANSIBLE_DISPLAY_SKIPPED_HOSTS=false ansible-playbook -i inventory/<inventory name> playbooks/tests/data-partition.yml
-```
-
diff --git a/ansible/playbooks/tests/data-partition.yml b/ansible/playbooks/tests/data-partition.yml
deleted file mode 100755
index 49bf0c4130296c68f9498f167ffa49c0b75717be..0000000000000000000000000000000000000000
--- a/ansible/playbooks/tests/data-partition.yml
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: TEST DATA PARTITION
-  hosts: mediaserver
-  gather_facts: false
-  tasks:
-
-    - name: verify /data partition existence
-      shell: findmnt /data
-      register: data_exist
-      failed_when: false
-      changed_when: false
-
-    # /data exist
-    - block:
-
-        - name: get /data size
-          shell: df -BG /data --output=size | tail -n1 | grep -o '[0-9]*'
-          register: data_size
-          failed_when: false
-          changed_when: false
-
-        - name: print size
-          debug:
-            msg: "/data size is {{ data_size.stdout }}G"
-
-        - name: create a test directory in /data
-          file:
-            path: /data/test
-            state: directory
-            mode: '0755'
-            owner: nobody
-            group: nogroup
-          ignore_errors: true
-          changed_when: false
-
-        - name: create a test file in /data
-          file:
-            state: touch
-            path: /data/test/file
-            mode: '0644'
-            owner: nobody
-            group: nogroup
-          ignore_errors: true
-          become: true
-          become_method: sudo
-          become_user: nobody
-          changed_when: false
-
-      when: data_exist.rc == 0
-
-    # /data missing
-    - block:
-
-        - name: get /home size
-          shell: df -BG /home --output=size | tail -n1 | grep -o '[0-9]*'
-          register: home_size
-          failed_when: false
-          changed_when: false
-
-        - name: verify size
-          debug:
-            msg: "/home size is too short ({{ home_size.stdout }}G < 200G)"
-          when: home_size.stdout | int < 200
-          ignore_errors: true
-          failed_when: true
-
-      when: data_exist.rc != 0
-
-...
diff --git a/ansible/playbooks/tests/exec-tester.yml b/ansible/playbooks/tests/exec-tester.yml
deleted file mode 100755
index 1165646d4d72af900b82ed183cb95c6fef6b7651..0000000000000000000000000000000000000000
--- a/ansible/playbooks/tests/exec-tester.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: RUN TESTER
-  hosts: all
-  tags: tester
-  gather_facts: false
-  vars:
-    tester_reset_log: false
-  tasks:
-    - name: remove envsetup tester log
-      when: tester_reset_log
-      file:
-        path: /root/envsetup/tests/logs/tester_pb.log
-        state: absent
-    - name: envsetup tester
-      shell:
-        cmd: |
-          set -o pipefail
-          python3 /root/envsetup/tests/tester.py 2>&1 | tee /root/envsetup/tests/logs/tester_pb.log
-        creates: /root/envsetup/tests/logs/tester_pb.log
-        executable: /bin/bash
-
-...
diff --git a/ansible/playbooks/tests/firewall-rules.yml b/ansible/playbooks/tests/firewall-rules.yml
deleted file mode 100755
index 605ddf34db3f43bbb520b8a721aadf153d7d6ee7..0000000000000000000000000000000000000000
--- a/ansible/playbooks/tests/firewall-rules.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: GATHER ALL FACTS
-  hosts: all
-  tasks:
-    - name: ensure python3 is installed
-      register: python_install
-      changed_when: "'es_pyinstall' in python_install.stdout_lines"
-      raw: command -v python3 || echo es_pyinstall && apt update && apt install -y python3-minimal python3-apt iproute2
-      tags: always
-
-- name: SET PORTS TO LISTEN
-  hosts: all
-  gather_facts: false
-  tasks:
-
-    - include_vars:
-        file: ressources/firewall/rules.yml
-
-    - include: ressources/firewall/listen.yml
-      # loop over listen vars (named outer_item) and call firewall-listen
-      loop: "{{ listen }}"
-      loop_control:
-        loop_var: outer_item
-      # execute loop only when group exists and host is in listen.groupname
-      when: (outer_item.groupname in groups) and (inventory_hostname in groups[outer_item.groupname])
-
-- name: TEST FIREWALL RULES
-  hosts: all
-  gather_facts: false
-  tasks:
-
-    - include_vars:
-        file: ressources/firewall/rules.yml
-
-    - name: test rules
-      include: ressources/firewall/test-rule.yml
-      # loop over listen vars (named outer_item) and call firewall-listen
-      loop: "{{ test }}"
-      loop_control:
-        loop_var: outer_item
-      # execute loop only when group exists and host is in listen.groupname_src
-      when: (outer_item.groupname_src in groups) and (inventory_hostname in groups[outer_item.groupname_src])
-
-...
diff --git a/ansible/playbooks/tests/ressources/firewall/listen.yml b/ansible/playbooks/tests/ressources/firewall/listen.yml
deleted file mode 100644
index f1c97b6056dc5cc959c413f5daa11fab134d117e..0000000000000000000000000000000000000000
--- a/ansible/playbooks/tests/ressources/firewall/listen.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-
-- debug:
-    msg: "On {{ outer_item.groupname }} server(s) put {{ outer_item.ports }} port(s) in listen mode"
-
-- shell: "nohup timeout 300 nc -lp {{ item }} >/dev/null 2>&1 &"
-  ignore_errors: true
-  loop: "{{ outer_item.ports }}"
-  changed_when: false
-
-...
diff --git a/ansible/playbooks/tests/ressources/firewall/rules.yml b/ansible/playbooks/tests/ressources/firewall/rules.yml
deleted file mode 100644
index 0ea422c325cff6d0831c97374317be1c0bd43de2..0000000000000000000000000000000000000000
--- a/ansible/playbooks/tests/ressources/firewall/rules.yml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-
-listen:
-
-  - groupname: mediaserver
-    ports: [ "80", "443" ]
-
-  - groupname: celerity
-    ports: [ "6200" ]
-
-  - groupname: wowza
-    ports: [ "1935" ]
-
-  - groupname: mirismanager
-    ports: [ "22", "443" ]
-
-  - groupname: mediaimport
-    ports: [ "20", "22" ]
-
-  - groupname: all
-    ports: [ "4949" ]
-
-  - groupname: postgres
-    ports: [ "5432", "22" ]
-
-
-test:
-
-  - groupname_src: mediaworker
-    groupname_dst: mediaserver
-    ports: [ "80", "443" ]
-
-  - groupname_src: mediaworker
-    groupname_dst: celerity
-    ports: [ "6200" ]
-
-  - groupname_src: mediaserver
-    groupname_dst: celerity
-    ports: [ "6200" ]
-
-  - groupname_src: mediaserver
-    groupname_dst: mediacache
-    ports: [ "22", "443" ]
-
-  - groupname_src: mediacache
-    groupname_dst: mediaserver
-    ports: [ "80", "443" ]
-
-  - groupname_src: mediaserver
-    hosts_dst: [ "mirismanager.ubicast.eu" ]
-    ports: [ "80", "443" ]
-
-  - groupname_src: mediaserver
-    hosts_dst: [ 'wowzalicense1.wowzamedia.com', 'wowzalicense2.wowzamedia.com', 'wowzalicense3.wowzamedia.com', 'wowzalicense4.wowzamedia.com' ]
-    ports: [ "80" ]
-
-  - groupname_src: mediaserver
-    groupname_dst: netcapture
-    ports: [ "22" ]
-
-  - groupname_src: netcapture
-    groupname_dst: mediaserver
-    ports: [ "443", "1935" ]
-
-  - groupname_src: mediaserver
-    hosts_dst: [ "git.ubicast.net" ]
-    ports: [ "22" ]
-
-  - groupname_src: localhost
-    groupname_dst: mediaserver
-    ports: [ "80", "443" ]
-
-...
diff --git a/ansible/playbooks/tests/ressources/firewall/test-rule.yml b/ansible/playbooks/tests/ressources/firewall/test-rule.yml
deleted file mode 100644
index 7ca6414c7c2bc9aebe6d3686436e827252800a2b..0000000000000000000000000000000000000000
--- a/ansible/playbooks/tests/ressources/firewall/test-rule.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-
-# test rules with direct hosts destination
-- block:
-    - debug:
-        msg: "Test rule from {{ outer_item.groupname_src }} to {{ outer_item.hosts_dst }} on {{ outer_item.ports }} port(s)"
-
-    - shell: "nc -zv {{ item.0 }} {{ item.1 }}"
-      ignore_errors: true
-      loop: "{{ outer_item.hosts_dst | product(outer_item.ports) | list }}"
-      when: proxy is not defined
-      changed_when: false
-
-    - shell: "nc -x {{ proxy }} -X Connect -zv {{ item.0 }} {{ item.1 }}"
-      ignore_errors: true
-      loop: "{{ outer_item.hosts_dst | product(outer_item.ports) | list }}"
-      when: proxy is defined
-      changed_when: false
-  when: outer_item.hosts_dst is defined
-
-# test rules with ansible group destination
-- block:
-    - debug:
-        msg: "Test rule from {{ outer_item.groupname_src }} to {{ outer_item.groupname_dst }} on {{ outer_item.ports }} port(s)"
-
-    - shell: "nc -zv {{ item.0 }} {{ item.1 }}"
-      ignore_errors: true
-      loop: "{{ groups[outer_item.groupname_dst] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list
-                | product(outer_item.ports) | list }}"
-      when: proxy is not defined
-      changed_when: false
-
-    - shell: "nc -x {{ proxy }} -X Connect -zv {{ item.0 }} {{ item.1 }}"
-      ignore_errors: true
-      loop: "{{ groups[outer_item.groupname_dst] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list
-                | product(outer_item.ports) | list }}"
-      when: proxy is defined
-      changed_when: false
-  when: outer_item.groupname_dst is defined
-
-...
diff --git a/ansible/playbooks/upgrade.yml b/ansible/playbooks/upgrade.yml
deleted file mode 100755
index 8040b497504688d373c73654e518457feb15280b..0000000000000000000000000000000000000000
--- a/ansible/playbooks/upgrade.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: UPGRADE SERVERS
-  hosts: all
-  tasks:
-
-    - name: apt-get dist-upgrade
-      when: ansible_os_family == "Debian"
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        cache_valid_time: 3600
-        upgrade: dist
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: yum upgrade
-      when: ansible_os_family == "RedHat"
-      yum:
-        name: "*"
-        state: latest
-
-...
diff --git a/ansible/playbooks/users.yml b/ansible/playbooks/users.yml
deleted file mode 100755
index 0972b6100f02f87dbb85a54c6669bc9c4b2e2e29..0000000000000000000000000000000000000000
--- a/ansible/playbooks/users.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-
-- name: USERS
-  hosts: all
-  tags: all
-  roles:
-    - conf
-    - users
-
-...
diff --git a/ansible/plugins/action/source_file.py b/ansible/plugins/action/source_file.py
deleted file mode 100644
index d6088a2037ed3df08dca62952d34f84e00f2b6fe..0000000000000000000000000000000000000000
--- a/ansible/plugins/action/source_file.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/python
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-from ansible.plugins.action import ActionBase
-
-
-class ActionModule(ActionBase):
-    """Plugin to set facts from variables sourced in `source_file` module."""
-
-    TRANSFERS_FILES = False
-
-    def run(self, tmp=None, task_vars=None):
-        self._supports_check_mode = True
-
-        result = super(ActionModule, self).run(tmp, task_vars)
-        del tmp  # tmp no longer has any effect
-
-        if self._task.args.get("path", None) is None:
-            result["failed"] = True
-            result["msg"] = "path arg needs to be provided"
-            return result
-
-        result.update(self._execute_module(task_vars=task_vars))
-
-        return result
diff --git a/ansible/requirements.dev.in b/ansible/requirements.dev.in
deleted file mode 100644
index b7dd382af49381d5e626f8ae874a8d6cb430ccbb..0000000000000000000000000000000000000000
--- a/ansible/requirements.dev.in
+++ /dev/null
@@ -1,10 +0,0 @@
--r requirements.in
-ansible-lint
-flake8
-git+git://github.com/atmaniak/molecule@e03437923b302fca1bd7b4f6030c6956ad00367a#egg=molecule[docker]
-#molecule[docker]
-pip-tools
-pytest-testinfra
-yamllint
-openstacksdk
-ovh
diff --git a/ansible/requirements.dev.txt b/ansible/requirements.dev.txt
deleted file mode 100644
index 9abecaaaf301c394501dd711cb602be429fa08a6..0000000000000000000000000000000000000000
--- a/ansible/requirements.dev.txt
+++ /dev/null
@@ -1,244 +0,0 @@
-#
-# This file is autogenerated by pip-compile
-# To update, run:
-#
-#    pip-compile --output-file=requirements.dev.txt requirements.dev.in
-#
-ansible-lint==4.2.0
-    # via -r requirements.dev.in
-ansible==2.9.18
-    # via
-    #   -r requirements.in
-    #   ansible-lint
-    #   molecule
-appdirs==1.4.4
-    # via openstacksdk
-arrow==0.17.0
-    # via jinja2-time
-attrs==20.3.0
-    # via pytest
-bcrypt==3.2.0
-    # via paramiko
-binaryornot==0.4.4
-    # via cookiecutter
-cerberus==1.3.2
-    # via molecule
-certifi==2020.12.5
-    # via requests
-cffi==1.14.4
-    # via
-    #   bcrypt
-    #   cryptography
-    #   pynacl
-chardet==4.0.0
-    # via
-    #   binaryornot
-    #   requests
-click-completion==0.5.2
-    # via molecule
-click-help-colors==0.9
-    # via molecule
-click==7.1.2
-    # via
-    #   click-completion
-    #   click-help-colors
-    #   cookiecutter
-    #   molecule
-    #   pip-tools
-    #   python-gilt
-colorama==0.4.4
-    # via
-    #   molecule
-    #   python-gilt
-    #   rich
-commonmark==0.9.1
-    # via rich
-cookiecutter==1.7.2
-    # via molecule
-cryptography==3.3.1
-    # via
-    #   ansible
-    #   openstacksdk
-    #   paramiko
-decorator==4.4.2
-    # via
-    #   dogpile.cache
-    #   openstacksdk
-distro==1.5.0
-    # via selinux
-docker==4.4.1
-    # via molecule
-dogpile.cache==1.1.1
-    # via openstacksdk
-fasteners==0.16
-    # via python-gilt
-flake8==3.8.4
-    # via -r requirements.dev.in
-idna==2.10
-    # via requests
-iniconfig==1.1.1
-    # via pytest
-iso8601==0.1.13
-    # via
-    #   keystoneauth1
-    #   openstacksdk
-jinja2-time==0.2.0
-    # via cookiecutter
-jinja2==2.11.2
-    # via
-    #   ansible
-    #   click-completion
-    #   cookiecutter
-    #   jinja2-time
-    #   molecule
-jmespath==0.10.0
-    # via openstacksdk
-jsonpatch==1.28
-    # via openstacksdk
-jsonpointer==2.0
-    # via jsonpatch
-keystoneauth1==4.3.0
-    # via openstacksdk
-markupsafe==1.1.1
-    # via
-    #   cookiecutter
-    #   jinja2
-mccabe==0.6.1
-    # via flake8
-git+git://github.com/atmaniak/molecule@e03437923b302fca1bd7b4f6030c6956ad00367a#egg=molecule[docker]
-    # via -r requirements.dev.in
-munch==2.5.0
-    # via openstacksdk
-netaddr==0.8.0
-    # via -r requirements.in
-netifaces==0.10.9
-    # via openstacksdk
-openstacksdk==0.52.0
-    # via
-    #   -r requirements.dev.in
-    #   -r requirements.in
-os-service-types==1.7.0
-    # via
-    #   keystoneauth1
-    #   openstacksdk
-ovh==0.5.0
-    # via
-    #   -r requirements.dev.in
-    #   -r requirements.in
-packaging==20.8
-    # via pytest
-paramiko==2.7.2
-    # via molecule
-pathspec==0.8.1
-    # via yamllint
-pbr==5.5.1
-    # via
-    #   keystoneauth1
-    #   openstacksdk
-    #   os-service-types
-    #   stevedore
-pexpect==4.8.0
-    # via molecule
-pip-tools==5.5.0
-    # via -r requirements.dev.in
-pluggy==0.13.1
-    # via
-    #   molecule
-    #   pytest
-poyo==0.5.0
-    # via cookiecutter
-ptyprocess==0.7.0
-    # via pexpect
-py==1.10.0
-    # via pytest
-pycodestyle==2.6.0
-    # via flake8
-pycparser==2.20
-    # via cffi
-pyflakes==2.2.0
-    # via flake8
-pygments==2.7.3
-    # via rich
-pynacl==1.4.0
-    # via paramiko
-pyparsing==2.4.7
-    # via packaging
-pytest-testinfra==6.1.0
-    # via -r requirements.dev.in
-pytest==6.2.1
-    # via pytest-testinfra
-python-dateutil==2.8.1
-    # via arrow
-python-gilt==1.2.3
-    # via molecule
-python-slugify==4.0.1
-    # via cookiecutter
-pyyaml==5.3.1
-    # via
-    #   -r requirements.in
-    #   ansible
-    #   ansible-lint
-    #   molecule
-    #   openstacksdk
-    #   python-gilt
-    #   yamllint
-requests==2.25.1
-    # via
-    #   cookiecutter
-    #   docker
-    #   keystoneauth1
-requestsexceptions==1.4.0
-    # via openstacksdk
-rich==9.6.1
-    # via ansible-lint
-ruamel.yaml.clib==0.2.2
-    # via ruamel.yaml
-ruamel.yaml==0.16.12
-    # via ansible-lint
-selinux==0.2.1
-    # via molecule
-sh==1.13.1
-    # via
-    #   molecule
-    #   python-gilt
-shellingham==1.3.2
-    # via click-completion
-six==1.15.0
-    # via
-    #   bcrypt
-    #   click-completion
-    #   cookiecutter
-    #   cryptography
-    #   docker
-    #   fasteners
-    #   keystoneauth1
-    #   munch
-    #   pynacl
-    #   python-dateutil
-    #   websocket-client
-stevedore==3.3.0
-    # via
-    #   dogpile.cache
-    #   keystoneauth1
-tabulate==0.8.7
-    # via molecule
-text-unidecode==1.3
-    # via python-slugify
-toml==0.10.2
-    # via pytest
-tree-format==0.1.2
-    # via molecule
-typing-extensions==3.7.4.3
-    # via rich
-urllib3==1.26.2
-    # via requests
-websocket-client==0.57.0
-    # via docker
-yamllint==1.25.0
-    # via
-    #   -r requirements.dev.in
-    #   molecule
-
-# The following packages are considered to be unsafe in a requirements file:
-# pip
-# setuptools
diff --git a/ansible/requirements.in b/ansible/requirements.in
deleted file mode 100644
index 89d375f50dc49fe355d0db04f085e2a1437e78d9..0000000000000000000000000000000000000000
--- a/ansible/requirements.in
+++ /dev/null
@@ -1,5 +0,0 @@
-ansible<2.10
-netaddr
-pyyaml
-openstacksdk
-ovh
diff --git a/ansible/requirements.txt b/ansible/requirements.txt
deleted file mode 100644
index 3c60fe163a4785c9de906c07e0c410f4089b287f..0000000000000000000000000000000000000000
--- a/ansible/requirements.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# This file is autogenerated by pip-compile
-# To update, run:
-#
-#    pip-compile --output-file=requirements.txt requirements.in
-#
-ansible==2.9.18            # via -r requirements.in
-cffi==1.14.0              # via cryptography
-cryptography==2.9         # via ansible
-jinja2==2.11.2            # via ansible
-jmespath==0.10.0          # via openstacksdk
-jsonpatch==1.28           # via openstacksdk
-jsonpointer==2.0          # via jsonpatch
-keystoneauth1==4.3.0      # via openstacksdk
-markupsafe==1.1.1         # via jinja2
-munch==2.5.0              # via openstacksdk
-netaddr==0.8.0            # via -r requirements.in
-netifaces==0.10.9         # via openstacksdk
-openstacksdk==0.52.0      # via -r requirements.in
-os-service-types==1.7.0   # via keystoneauth1, openstacksdk
-ovh==0.5.0                # via -r requirements.in
-pbr==5.5.1                # via keystoneauth1, openstacksdk, os-service-types, stevedore
-pycparser==2.20           # via cffi
-pyyaml==5.3.1             # via -r requirements.in, ansible, openstacksdk
-requests==2.25.1          # via keystoneauth1
-requestsexceptions==1.4.0  # via openstacksdk
-six==1.15.0               # via cryptography, keystoneauth1, munch
-stevedore==3.3.0          # via dogpile.cache, keystoneauth1
-urllib3==1.26.2           # via requests
diff --git a/ansible/requirements.yml b/ansible/requirements.yml
deleted file mode 100644
index 6b1fe0558ef539bd67aa2ea23d2c4a539ffd9dc1..0000000000000000000000000000000000000000
--- a/ansible/requirements.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- src: elastic.elasticsearch
-  version: 7.9.0
-
-...
diff --git a/ansible/roles/base/meta/main.yml b/ansible/roles/base/meta/main.yml
deleted file mode 100644
index ea5745729fa6af9e0c1b02bc804a60545d1e8657..0000000000000000000000000000000000000000
--- a/ansible/roles/base/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-
-dependencies:
-  - role: conf
-  - role: init
-  - role: sysconfig
-  - role: users
-  - role: postfix
-  - role: ferm-install
-  - role: ferm-configure
-  - role: fail2ban
-
-...
diff --git a/ansible/roles/bench-server/defaults/main.yml b/ansible/roles/bench-server/defaults/main.yml
deleted file mode 100644
index feccdd5a25d91ced913e75b888c8dca74ec4ac2f..0000000000000000000000000000000000000000
--- a/ansible/roles/bench-server/defaults/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-bench_server_packages:
-  - ubicast-benchmark
-
-bench_server: "{{ hostvars[groups['bench_server'][0]]['ansible_default_ipv4']['address'] }}"
-bench_time_stat: 15000
-
-bench_host: "{{ envsetup_ms_server_name | d() }}"
-bench_user: admin
-bench_password: "{{ envsetup_ms_admin_pwd | d() }}"
-bench_oid:
-bench_dl_streams: false
-
-bench_stream_repo: https://git.ubicast.net/mediaserver/ms-testing-suite.git
-bench_host_api_key: "{{ envsetup_ms_api_key | d() }}"
-
-...
diff --git a/ansible/roles/bench-server/meta/main.yml b/ansible/roles/bench-server/meta/main.yml
deleted file mode 100644
index 91d0a5d794147e734592ab2aab6005487dfd9bbc..0000000000000000000000000000000000000000
--- a/ansible/roles/bench-server/meta/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-dependencies:
-  - role: conf
-  - role: init
-  - role: sysconfig
-
-...
diff --git a/ansible/roles/bench-server/tasks/main.yml b/ansible/roles/bench-server/tasks/main.yml
deleted file mode 100644
index a4049b687dcd763dac76be80f14136681f89e108..0000000000000000000000000000000000000000
--- a/ansible/roles/bench-server/tasks/main.yml
+++ /dev/null
@@ -1,101 +0,0 @@
----
-
-- name: install bench-server packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    update_cache: true
-    name: "{{ bench_server_packages }}"
-    state: latest
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: ensure configuration directory exists
-  file:
-    path: /etc/mediaserver
-    state: directory
-
-- name: benchmark configuration settings
-  copy:
-    dest: /etc/mediaserver/bench-conf.json
-    content: |
-      {
-      "LOCUST_HOST":"{{ bench_server }}",
-      "MS_HOST":"{{ bench_host }}",
-      "MS_USERNAME":"{{ bench_user }}",
-      "MS_PASSWORD":"{{ bench_password }}",
-      "MEDIA_OID":"{{ bench_oid }}",
-      "DL_STREAMS":{{ bench_dl_streams }},
-      "TIME_STATS":{{ bench_time_stat }}
-      }
-
-- name: reload systemd daemon
-  systemd:
-    daemon_reload: true
-
-- name: restart bench-server
-  systemd:
-    name: bench-server
-    state: restarted
-
-- name: streaming configuration settings
-  template:
-    src: bench-streaming.conf.j2
-    dest: /etc/mediaserver/bench-streaming.conf
-
-- name: clone ms-testing-suite repository
-  git:
-    repo: "{{ bench_stream_repo }}"
-    version: stable
-    dest: /usr/share/ms-testing-suite
-    update: true
-    force: true
-
-- name: copy configuration for testing tools
-  copy:
-    src: /etc/mediaserver/bench-streaming.conf
-    dest: /usr/share/ms-testing-suite/config.json
-    remote_src: true
-
-- name: add docker key
-  when:
-    - not offline_mode | d(false)
-    - not in_docker | d(false)
-  apt_key:
-    url: https://download.docker.com/linux/debian/gpg
-    state: present
-
-- name: add docker debian repository
-  when:
-    - not offline_mode | d(false)
-    - not in_docker | d(false)
-  apt_repository:
-    repo: "deb https://download.docker.com/linux/debian buster stable"
-    state: present
-    update_cache: true
-
-- name: install docker
-  when:
-    - not offline_mode | d(false)
-    - not in_docker | d(false)
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    update_cache: true
-    name: docker-ce
-    state: latest
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: generate docker image
-  when:
-    - not offline_mode | d(false)
-    - not in_docker | d(false)
-  command:
-    cmd: make build_docker_img
-    chdir: /usr/share/ms-testing-suite
-  run_once: true
-
-...
diff --git a/ansible/roles/bench-server/templates/bench-streaming.conf.j2 b/ansible/roles/bench-server/templates/bench-streaming.conf.j2
deleted file mode 100644
index 1c8f90598917a2e7e0341b71e5c8fdc41b1dcc10..0000000000000000000000000000000000000000
--- a/ansible/roles/bench-server/templates/bench-streaming.conf.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-    "SERVER_URL": "https://{{ bench_host }}",
-    "API_KEY": "{{ bench_host_api_key }}",
-    "OID": "{{ bench_oid }}",
-    "PROXIES": {
-        "http": "",
-        "https": ""
-    },
-    "VERIFY_SSL": false,
-    "EXTRA": {
-        "IGNORED_ROUTES": []
-    }
-}
diff --git a/ansible/roles/bench-worker/defaults/main.yml b/ansible/roles/bench-worker/defaults/main.yml
deleted file mode 100644
index 8e51cf19d09a0ab404db26c568c3a268de6a92f6..0000000000000000000000000000000000000000
--- a/ansible/roles/bench-worker/defaults/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-
-bench_worker_packages:
-  - ubicast-benchmark
-
-bench_server: "{{ hostvars[groups['bench_server'][0]]['ansible_default_ipv4']['address'] }}"
-bench_time_stat: 15000
-
-bench_host: "{{ envsetup_ms_server_name | d() }}"
-bench_user: admin
-bench_password: "{{ envsetup_ms_admin_pwd | d() }}"
-bench_oid:
-bench_dl_streams: false
-
-...
diff --git a/ansible/roles/bench-worker/meta/main.yml b/ansible/roles/bench-worker/meta/main.yml
deleted file mode 100644
index 5f2a4de7e5fcd35ce77b6aac53633d3871e95335..0000000000000000000000000000000000000000
--- a/ansible/roles/bench-worker/meta/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-dependencies:
-  - role: conf
-  - role: init
-  - role: sysconfig
-...
diff --git a/ansible/roles/bench-worker/tasks/main.yml b/ansible/roles/bench-worker/tasks/main.yml
deleted file mode 100644
index d79de64769e4e38cc096745010318050e34609ff..0000000000000000000000000000000000000000
--- a/ansible/roles/bench-worker/tasks/main.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-
-- name: install bench-worker packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    update_cache: true
-    name: "{{ bench_worker_packages }}"
-    state: latest
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: ensure configuration directory exists
-  file:
-    path: /etc/mediaserver
-    state: directory
-
-- name: benchmark configuration settings
-  copy:
-    dest: /etc/mediaserver/bench-conf.json
-    content: |
-      {
-      "LOCUST_HOST":"{{ bench_server }}",
-      "MS_HOST":"{{ bench_host }}",
-      "MS_USERNAME":"{{ bench_user }}",
-      "MS_PASSWORD":"{{ bench_password }}",
-      "MEDIA_OID":"{{ bench_oid }}",
-      "DL_STREAMS":{{ bench_dl_streams }},
-      "TIME_STATS":{{ bench_time_stat }}
-      }
-
-- name: reload systemd daemon
-  systemd:
-    daemon_reload: true
-
-- name: restart bench-worker
-  systemd:
-    name: bench-worker
-    state: restarted
-
-...
diff --git a/ansible/roles/celerity/defaults/main.yml b/ansible/roles/celerity/defaults/main.yml
deleted file mode 100644
index 74d05b1c0669cfbd5b6a2871451a30edff72a970..0000000000000000000000000000000000000000
--- a/ansible/roles/celerity/defaults/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-
-celerity_signing_key: "{{ envsetup_celerity_signing_key }}"
-celerity_server: "{{ envsetup_celerity_server | d(envsetup_ms_server_name, true) }}"
-
-celerity_ms_id: "{{ envsetup_ms_id }}"
-celerity_ms_api_key: "{{ envsetup_ms_api_key }}"
-celerity_ms_hostname: "{{ envsetup_ms_server_name }}"
-celerity_ms_instances:
-  - ms_id: "{{ celerity_ms_id }}"
-    ms_api_key: "{{ celerity_ms_api_key }}"
-    ms_server_name: "{{ celerity_ms_hostname }}"
-
-celerity_firewall_enabled: true
-celerity_ferm_rules_filename: celerity
-celerity_ferm_input_rules:
-  - proto:
-      - tcp
-    dport:
-      - 6200
-celerity_ferm_output_rules: []
-celerity_ferm_global_settings:
-
-...
diff --git a/ansible/roles/celerity/handlers/main.yml b/ansible/roles/celerity/handlers/main.yml
deleted file mode 100644
index f76e4aa3cc43c6982db20df8bfce0587d771fd57..0000000000000000000000000000000000000000
--- a/ansible/roles/celerity/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart celerity-server
-  service:
-    name: celerity-server
-    state: restarted
-
-...
diff --git a/ansible/roles/celerity/meta/main.yml b/ansible/roles/celerity/meta/main.yml
deleted file mode 100644
index e45d692ae3567f856967cd6f66c91d13e2e94e4e..0000000000000000000000000000000000000000
--- a/ansible/roles/celerity/meta/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-dependencies:
-  - role: base
-
-...
diff --git a/ansible/roles/celerity/tasks/main.yml b/ansible/roles/celerity/tasks/main.yml
deleted file mode 100644
index 8ceaffdb34b7f3d6261056d2bc9da056c0567fd8..0000000000000000000000000000000000000000
--- a/ansible/roles/celerity/tasks/main.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-
-- name: celerity server install
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: celerity-server
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: config celerity server
-  notify: restart celerity-server
-  template:
-    src: celerity-config.py.j2
-    dest: /etc/celerity/config.py
-    mode: 0644
-    owner: root
-    group: root
-  changed_when: "'molecule-idempotence-notest' not in ansible_skip_tags"
-
-- name: ensure celerity server is running
-  service:
-    name: celerity-server
-    enabled: true
-    state: started
-
-# FIREWALL
-
-- name: firewall
-  when: celerity_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ celerity_ferm_rules_filename }}"
-    ferm_input_rules: "{{ celerity_ferm_input_rules }}"
-    ferm_output_rules: "{{ celerity_ferm_output_rules }}"
-    ferm_global_settings: "{{ celerity_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-- meta: flush_handlers
-
-...
diff --git a/ansible/roles/celerity/templates/celerity-config.py.j2 b/ansible/roles/celerity/templates/celerity-config.py.j2
deleted file mode 100644
index 7ce10ff9cb33775766df11b4ea3705eb11a58802..0000000000000000000000000000000000000000
--- a/ansible/roles/celerity/templates/celerity-config.py.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-SIGNING_KEY = '{{ celerity_signing_key }}'
-SERVER_URL = 'https://{{ celerity_server }}:6200'
-
-# Queues count, default: min(round(thread_count/2)-1,2)
-#QUEUES_PER_WORKER = 2
-
-# MediaServer interactions
-MEDIASERVERS = {
-    '{{ celerity_ms_id }}': {'url': 'https://{{ celerity_ms_hostname }}', 'api_key': '{{ celerity_ms_api_key }}'},
-}
diff --git a/ansible/roles/conf/defaults/main.yml b/ansible/roles/conf/defaults/main.yml
deleted file mode 100644
index e6ccc95f2c156c6a633761698e252fd33ca66212..0000000000000000000000000000000000000000
--- a/ansible/roles/conf/defaults/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-
-conf_req_packages:
-  - ca-certificates
-  - openssh-client
-
-conf_req_packages_online:
-  - git
-
-conf_repo_url: https://mirismanager.ubicast.eu/git/mediaserver/envsetup.git
-conf_repo_version: "{{ lookup('env', 'ENVSETUP_BRANCH') | d('stable', true) }}"
-conf_repo_dest: /root/envsetup
-
-conf_host: "{{ skyreach_host | default('mirismanager.ubicast.eu', true) }}"
-conf_valid_cert: "{{ skyreach_valid_cert | default(true, true) }}"
-
-skyreach_activation_key: "{{ lookup('env', 'SKYREACH_ACTIVATION_KEY') }}"
-skyreach_system_key: "{{ lookup('env', 'SKYREACH_SYSTEM_KEY') }}"
-
-conf_update: false
-
-conf_debug: false
-
-...
diff --git a/ansible/roles/conf/tasks/main.yml b/ansible/roles/conf/tasks/main.yml
deleted file mode 100644
index 4bcb7e97ec83a1ee27ffb5d7ebd786f1cb741a9a..0000000000000000000000000000000000000000
--- a/ansible/roles/conf/tasks/main.yml
+++ /dev/null
@@ -1,157 +0,0 @@
----
-
-- name: proxy
-  when:
-    - proxy_http | d()
-    - proxy_https | d()
-  include_role:
-    name: proxy
-
-- name: install requirements
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ conf_req_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: install online requirements
-  when: not offline_mode | d(false)
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ conf_req_packages_online }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: clone envsetup repository
-  when: not offline_mode | d(false)
-  ignore_errors: true
-  register: conf_clone
-  git:
-    repo: "{{ conf_repo_url }}"
-    version: "{{ conf_repo_version }}"
-    dest: "{{ conf_repo_dest }}"
-
-- name: ask to continue
-  when:
-    - not offline_mode | d(false)
-    - conf_clone is failed
-  pause:
-    prompt: "Previous task failed, it may be normal if you have local changes in the commited files, do you want to continue anyway?"
-    seconds: 30
-
-- name: generate root ssh key pair
-  register: conf_root
-  user:
-    name: root
-    generate_ssh_key: true
-    ssh_key_type: ed25519
-    ssh_key_file: .ssh/id_ed25519
-
-- name: check if auto-generated-conf.sh exists
-  check_mode: false
-  register: check_conf
-  stat:
-    path: "{{ conf_repo_dest }}/auto-generated-conf.sh"
-
-- name: check if conf.sh exists
-  check_mode: false
-  register: check_local_conf
-  stat:
-    path: "{{ conf_repo_dest }}/conf.sh"
-
-- name: download conf and update ssh public key with activation key
-  when: skyreach_activation_key | d(false)
-  register: conf_dl_ak
-  changed_when: conf_dl_ak.status == 200
-  failed_when:
-    - conf_dl_ak.status != 200
-    - not check_conf.stat.exists
-    - not skyreach_system_key
-  uri:
-    url: https://{{ conf_host }}/erp/credentials/envsetup-conf.sh
-    method: POST
-    body_format: form-urlencoded
-    body:
-      key: "{{ skyreach_activation_key }}"
-      public_key: "{{ conf_root.ssh_public_key }}"
-    return_content: true
-    validate_certs: "{{ conf_valid_cert }}"
-
-- name: download conf and update ssh public key with system key
-  when:
-    - not check_conf.stat.exists or conf_update
-    - skyreach_system_key | d(false)
-  register: conf_dl_sk
-  changed_when: conf_dl_sk.status == 200
-  failed_when:
-    - conf_dl_sk.status != 200
-    - not check_conf.stat.exists
-  uri:
-    url: https://{{ conf_host }}/erp/credentials/envsetup-conf.sh
-    method: POST
-    body_format: form-urlencoded
-    body:
-      api_key: "{{ skyreach_system_key }}"
-      public_key: "{{ conf_root.ssh_public_key }}"
-    return_content: true
-    validate_certs: "{{ conf_valid_cert }}"
-
-- name: save generated conf
-  loop:
-    - "{{ conf_dl_ak }}"
-    - "{{ conf_dl_sk }}"
-  when: item is changed
-  copy:
-    content: "{{ item.content }}"
-    dest: "{{ conf_repo_dest }}/auto-generated-conf.sh"
-    force: true
-    backup: true
-
-- name: touch generated conf
-  file:
-    path: "{{ conf_repo_dest }}/auto-generated-conf.sh"
-    access_time: preserve
-    modification_time: preserve
-    state: touch
-
-- name: touch local conf
-  file:
-    path: "{{ conf_repo_dest }}/conf.sh"
-    access_time: preserve
-    modification_time: preserve
-    state: touch
-
-- name: load global conf
-  changed_when: false
-  check_mode: false
-  source_file:
-    path: "{{ conf_repo_dest }}/global-conf.sh"
-    prefix: envsetup_
-    lower: true
-
-- name: load generated conf
-  changed_when: false
-  check_mode: false
-  source_file:
-    path: "{{ conf_repo_dest }}/auto-generated-conf.sh"
-    prefix: envsetup_
-    lower: true
-
-- name: load local conf
-  changed_when: false
-  check_mode: false
-  source_file:
-    path: "{{ conf_repo_dest }}/conf.sh"
-    prefix: envsetup_
-    lower: true
-
-- name: debug variables
-  when: conf_debug
-  debug:
-    var: ansible_facts
-
-...
diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml
deleted file mode 100644
index f2883ab5f11aef2ee3419aa1d30cf83683905d5e..0000000000000000000000000000000000000000
--- a/ansible/roles/docker/tasks/main.yml
+++ /dev/null
@@ -1,62 +0,0 @@
----
-- name: requirements install
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name:
-      - apt-transport-https
-      - ca-certificates
-      - curl
-      - gnupg-agent
-      - lsb-release
-      - software-properties-common
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: add docker key
-  when:
-    - not offline_mode | d(false)
-  apt_key:
-    url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg
-    state: present
-
-- name: add docker debian repository
-  when:
-    - not offline_mode | d(false)
-  apt_repository:
-    repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release | lower }} stable
-    state: present
-    update_cache: true
-
-- name: install docker
-  when:
-    - not offline_mode | d(false)
-  apt:
-    name: docker-ce
-    state: latest
-    update_cache: true
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: docker service
-  when:
-    - not offline_mode | d(false)
-  systemd:
-    name: docker
-    enabled: true
-    state: started
-
-- name: install requirements for docker python binding
-  when:
-    - not offline_mode | d(false)
-  apt:
-    name: python3-docker
-    state: latest
-    update_cache: true
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-...
diff --git a/ansible/roles/elastic/defaults/main.yml b/ansible/roles/elastic/defaults/main.yml
deleted file mode 100644
index b8be8f11ecee8857764d8db5d79bfdef925ba950..0000000000000000000000000000000000000000
--- a/ansible/roles/elastic/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-kibana_default_port: 5601
-kibana_server_host: localhost
-
-...
diff --git a/ansible/roles/elastic/handlers/main.yml b/ansible/roles/elastic/handlers/main.yml
deleted file mode 100644
index 65d7d70557485bf967156b1feea618560fcca49f..0000000000000000000000000000000000000000
--- a/ansible/roles/elastic/handlers/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: restart kibana
-  service:
-    name: kibana
-    state: restarted
-
-- name: restart apm-server
-  service:
-    name: apm-server
-    state: restarted
-
-...
diff --git a/ansible/roles/elastic/tasks/main.yml b/ansible/roles/elastic/tasks/main.yml
deleted file mode 100644
index 7ecc381f4c65da31c15e4216018ad90f77e15c0a..0000000000000000000000000000000000000000
--- a/ansible/roles/elastic/tasks/main.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: install kibana package
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: kibana
-    state: latest
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: deploy kibana configuration
-  template:
-    src: kibana.yml.j2
-    dest: /etc/kibana/kibana.yml
-  notify: restart kibana
-
-- name: install apm-server package
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: apm-server
-    state: latest
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: deploy apm-server configuration
-  template:
-    src: apm-server.yml.j2
-    dest: /etc/apm-server/apm-server.yml
-  notify: restart apm-server
-
-...
diff --git a/ansible/roles/elastic/templates/apm-server.yml.j2 b/ansible/roles/elastic/templates/apm-server.yml.j2
deleted file mode 100644
index c5b844631aed83aa2484741997c7d6a651711695..0000000000000000000000000000000000000000
--- a/ansible/roles/elastic/templates/apm-server.yml.j2
+++ /dev/null
@@ -1,1204 +0,0 @@
-######################### APM Server Configuration #########################
-
-################################ APM Server ################################
-
-apm-server:
-  # Defines the host and port the server is listening on. Use "unix:/path/to.sock" to listen on a unix domain socket.
-  host: "{{ kibana_server_host }}:8200"
-
-  # Maximum permitted size in bytes of a request's header accepted by the server to be processed.
-  #max_header_size: 1048576
-
-  # Maximum amount of time to wait for the next incoming request before underlying connection is closed.
-  #idle_timeout: 45s
-
-  # Maximum permitted duration for reading an entire request.
-  #read_timeout: 30s
-
-  # Maximum permitted duration for writing a response.
-  #write_timeout: 30s
-
-  # Maximum duration before releasing resources when shutting down the server.
-  #shutdown_timeout: 5s
-
-  # Maximum permitted size in bytes of an event accepted by the server to be processed.
-  #max_event_size: 307200
-
-  # Maximum number of new connections to accept simultaneously (0 means unlimited).
-  #max_connections: 0
-
-  # If true (default), APM Server captures the IP of the instrumented service
-  # or the IP and User Agent of the real user (RUM requests).
-  #capture_personal_data: true
-
-  # Enable APM Server Golang expvar support (https://golang.org/pkg/expvar/).
-  #expvar:
-    #enabled: false
-
-    # Url to expose expvar.
-    #url: "/debug/vars"
-
-  # Instrumentation support for the server's HTTP endpoints and event publisher.
-  #instrumentation:
-    # Set to true to enable instrumentation of the APM Server itself.
-    #enabled: false
-
-    # Environment in which the APM Server is running on (eg: staging, production, etc.)
-    #environment: ""
-
-    # Remote hosts to report instrumentation results to.
-    #hosts:
-    #  - http://remote-apm-server:8200
-
-    # API Key for the remote APM Server(s).
-    # If api_key is set then secret_token will be ignored.
-    #api_key:
-
-    # Secret token for the remote APM Server(s).
-    #secret_token:
-
-    # Enable profiling of the server, recording profile samples as events.
-    #
-    # This feature is experimental.
-    #profiling:
-      #cpu:
-        # Set to true to enable CPU profiling.
-        #enabled: false
-        #interval: 60s
-        #duration: 10s
-      #heap:
-        # Set to true to enable heap profiling.
-        #enabled: false
-        #interval: 60s
-
-  # A pipeline is a definition of processors applied to documents when ingesting them to Elasticsearch.
-  # Using pipelines involves two steps:
-  # (1) registering a pipeline
-  # (2) applying a pipeline during data ingestion (see `output.elasticsearch.pipeline`)
-  #
-  # You can manually register a pipeline, or use this configuration option to ensure
-  # the pipeline is loaded and registered at the configured Elasticsearch instances.
-  # Find the default pipeline configuration at `ingest/pipeline/definition.json`.
-  # Automatic pipeline registration requires the `output.elasticsearch` to be enabled and configured.
-  #register.ingest.pipeline:
-    # Registers APM pipeline definition in Elasticsearch on APM Server startup. Defaults to true.
-    #enabled: true
-    # Overwrites existing APM pipeline definition in Elasticsearch. Defaults to false.
-    #overwrite: false
-
-
-  #---------------------------- APM Server - Secure Communication with Agents ----------------------------
-
-  # Enable secure communication between APM agents and the server. By default ssl is disabled.
-  #ssl:
-    #enabled: false
-
-    # Path to file containing the certificate for server authentication.
-    # Needs to be configured when ssl is enabled.
-    #certificate: ''
-
-    # Path to file containing server certificate key.
-    # Needs to be configured when ssl is enabled.
-    #key: ''
-
-    # Optional configuration options for ssl communication.
-
-    # Passphrase for decrypting the Certificate Key.
-    # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
-    #key_passphrase: ''
-
-    # List of supported/valid protocol versions. By default TLS versions 1.1 up to 1.3 are enabled.
-    #supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
-
-    # Configure cipher suites to be used for SSL connections.
-    # Note that cipher suites are not configurable for TLS 1.3.
-    #cipher_suites: []
-
-    # Configure curve types for ECDHE based cipher suites.
-    #curve_types: []
-
-    # Following options only concern requiring and verifying client certificates provided by the agents.
-    # Providing a client certificate is currently only supported by the RUM agent through
-    # browser configured certificates and Jaeger agents connecting via gRPC.
-    #
-    # Configure a list of root certificate authorities for verifying client certificates.
-    #certificate_authorities: []
-    #
-    # Configure which type of client authentication is supported.
-    # Options are `none`, `optional`, and `required`.
-    # Default is `none`. If `certificate_authorities` are configured,
-    # the value for `client_authentication` is automatically changed to `required`.
-    #client_authentication: "none"
-
-  # The APM Server endpoints can be secured by configuring a secret token or enabling the usage of API keys. Both
-  # options can be enabled in parallel, allowing Elastic APM agents to chose whichever mechanism they support.
-  # As soon as one of the options is enabled, requests without a valid token are denied by the server. An exception
-  # to this are requests to any enabled RUM endpoint. RUM endpoints are generally not secured by any token.
-  #
-  # Configure authorization via a common `secret_token`. By default it is disabled.
-  # Agents include the token in the following format: Authorization: Bearer <secret-token>.
-  # It is recommended to use an authorization token in combination with SSL enabled,
-  # and save the token in the apm-server keystore.
-  #secret_token:
-
-  # Enable API key authorization by setting enabled to true. By default API key support is disabled.
-  # Agents include a valid API key in the following format: Authorization: ApiKey <token>.
-  # The key must be the base64 encoded representation of the API key's "id:key".
-  # This is an experimental feature, use with care.
-  #api_key:
-    #enabled: false
-
-    # Restrict how many unique API keys are allowed per minute. Should be set to at least the amount of different
-    # API keys configured in your monitored services. Every unique API key triggers one request to Elasticsearch.
-    #limit: 100
-
-    # API keys need to be fetched from Elasticsearch. If nothing is configured, configuration settings from the
-    # output section will be reused.
-    # Note that configuration needs to point to a secured Elasticsearch cluster that is able to serve API key requests.
-    #elasticsearch:
-      #hosts: ["localhost:9200"]
-
-      #protocol: "http"
-
-      # Username and password are only needed for the apm-server apikey sub-command, and they are ignored otherwise
-      # See `apm-server apikey --help` for details.
-      #username: "elastic"
-      #password: "changeme"
-
-      # Optional HTTP Path.
-      #path: ""
-
-      # Proxy server url.
-      #proxy_url: ""
-      #proxy_disable: false
-
-      # Configure http request timeout before failing an request to Elasticsearch.
-      #timeout: 5s
-
-      # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
-      #ssl.enabled: true
-
-      # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
-      # Configure SSL verification mode. If `none` is configured, all server hosts
-      # and certificates will be accepted. In this mode, SSL based connections are
-      # susceptible to man-in-the-middle attacks. Use only for testing. Default is
-      # `full`.
-      #ssl.verification_mode: full
-
-      # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
-      # 1.2 are enabled.
-      #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
-
-      # List of root certificates for HTTPS server verifications.
-      #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
-
-      # Certificate for SSL client authentication.
-      #ssl.certificate: "/etc/pki/client/cert.pem"
-
-      # Client Certificate Key
-      #ssl.key: "/etc/pki/client/cert.key"
-
-      # Optional passphrase for decrypting the Certificate Key.
-      # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
-      #ssl.key_passphrase: ''
-
-      # Configure cipher suites to be used for SSL connections.
-      #ssl.cipher_suites: []
-
-      # Configure curve types for ECDHE based cipher suites.
-      #ssl.curve_types: []
-
-      # Configure what types of renegotiation are supported. Valid options are
-      # never, once, and freely. Default is never.
-      #ssl.renegotiation: never
-
-
-  #---------------------------- APM Server - RUM Real User Monitoring ----------------------------
-
-  # Enable Real User Monitoring (RUM) Support. By default RUM is disabled.
-  # RUM does not support token based authorization. Enabled RUM endpoints will not require any authorization
-  # token configured for other endpoints.
-  #rum:
-    #enabled: false
-
-    #event_rate:
-
-      # Defines the maximum amount of events allowed to be sent to the APM Server RUM
-      # endpoint per IP per second. Defaults to 300.
-      #limit: 300
-
-      # An LRU cache is used to keep a rate limit per IP for the most recently seen IPs.
-      # This setting defines the number of unique IPs that can be tracked in the cache.
-      # Sites with many concurrent clients should consider increasing this limit. Defaults to 1000.
-      #lru_size: 1000
-
-    #-- General RUM settings
-
-    # A list of permitted origins for real user monitoring.
-    # User-agents will send an origin header that will be validated against this list.
-    # An origin is made of a protocol scheme, host and port, without the url path.
-    # Allowed origins in this setting can have * to match anything (eg.: http://*.example.com)
-    # If an item in the list is a single '*', everything will be allowed.
-    #allow_origins : ['*']
-
-    # A list of Access-Control-Allow-Headers to allow RUM requests, in addition to "Content-Type",
-    # "Content-Encoding", and "Accept"
-    #allow_headers : []
-
-    # Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes.
-    # If the regexp matches, the stacktrace frame is considered to be a library frame.
-    #library_pattern: "node_modules|bower_components|~"
-
-    # Regexp to be matched against a stacktrace frame's `file_name`.
-    # If the regexp matches, the stacktrace frame is not used for calculating error groups.
-    # The default pattern excludes stacktrace frames that have a filename starting with '/webpack'
-    #exclude_from_grouping: "^/webpack"
-
-    # If a source map has previously been uploaded, source mapping is automatically applied.
-    # to all error and transaction documents sent to the RUM endpoint.
-    #source_mapping:
-
-      # Sourcemapping is enabled by default.
-      #enabled: true
-
-      # Source maps are always fetched from Elasticsearch, by default using the output.elasticsearch configuration.
-      # A different instance must be configured when using any other output.
-      # This setting only affects sourcemap reads - the output determines where sourcemaps are written.
-      #elasticsearch:
-        # Array of hosts to connect to.
-        # Scheme and port can be left out and will be set to the default (`http` and `9200`).
-        # In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
-        # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
-        # hosts: ["localhost:9200"]
-
-        # Protocol - either `http` (default) or `https`.
-        #protocol: "https"
-
-        # Authentication credentials - either API key or username/password.
-        #api_key: "id:api_key"
-        #username: "elastic"
-        #password: "changeme"
-
-      # The `cache.expiration` determines how long a source map should be cached before fetching it again from Elasticsearch.
-      # Note that values configured without a time unit will be interpreted as seconds.
-      #cache:
-        #expiration: 5m
-
-      # Source maps are stored in a separate index.
-      # If the default index pattern for source maps at 'outputs.elasticsearch.indices'
-      # is changed, a matching index pattern needs to be specified here.
-      #index_pattern: "apm-*-sourcemap*"
-
-  #---------------------------- APM Server - Agent Configuration ----------------------------
-
-  # When using APM agent configuration, information fetched from Kibana will be cached in memory for some time.
-  # Specify cache key expiration via this setting. Default is 30 seconds.
-  #agent.config.cache.expiration: 30s
-
-  #kibana:
-    # For APM Agent configuration in Kibana, enabled must be true.
-    #enabled: false
-
-    # Scheme and port can be left out and will be set to the default (`http` and `5601`).
-    # In case you specify an additional path, the scheme is required: `http://localhost:5601/path`.
-    # IPv6 addresses should always be defined as: `https://[2001:db8::1]:5601`.
-    #host: "localhost:5601"
-
-    # Optional protocol and basic auth credentials.
-    #protocol: "https"
-    #username: "elastic"
-    #password: "changeme"
-
-    # Optional HTTP path.
-    #path: ""
-
-    # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
-    #ssl.enabled: true
-
-    # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
-    # Configure SSL verification mode. If `none` is configured, all server hosts
-    # and certificates will be accepted. In this mode, SSL based connections are
-    # susceptible to man-in-the-middle attacks. Use only for testing. Default is
-    # `full`.
-    #ssl.verification_mode: full
-
-    # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
-    # 1.2 are enabled.
-    #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
-
-    # List of root certificates for HTTPS server verifications.
-    #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
-
-    # Certificate for SSL client authentication.
-    #ssl.certificate: "/etc/pki/client/cert.pem"
-
-    # Client Certificate Key
-    #ssl.key: "/etc/pki/client/cert.key"
-
-    # Optional passphrase for decrypting the Certificate Key.
-    # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
-    #ssl.key_passphrase: ''
-
-    # Configure cipher suites to be used for SSL connections.
-    #ssl.cipher_suites: []
-
-    # Configure curve types for ECDHE based cipher suites.
-    #ssl.curve_types: []
-
-  #---------------------------- APM Server - ILM Index Lifecycle Management ----------------------------
-
-  #ilm:
-    # Supported values are `auto`, `true` and `false`.
-    # `true`: Make use of Elasticsearch's Index Lifecycle Management (ILM) for APM indices. If no Elasticsearch output is
-    # configured or the configured instance does not support ILM, APM Server cannot apply ILM and must create
-    # unmanaged indices instead.
-    # `false`: APM Server does not make use of ILM in Elasticsearch.
-    # `auto`: If an Elasticsearch output is configured with default index and indices settings, and the configured
-    # Elasticsearch instance supports ILM, `auto` will resolve to `true`. Otherwise `auto` will resolve to `false`.
-    # Default value is `auto`.
-    #enabled: "auto"
-
-    #setup:
-      # Only disable setup if you want to set up everything related to ILM on your own.
-      # When setup is enabled, the APM Server creates:
-      # - aliases and ILM policies if `apm-server.ilm.enabled` resolves to `true`.
-      # - An ILM specific template per event type. This is required to map ILM aliases and policies to indices. In case
-      # ILM is disabled, the templates will be created without any ILM settings.
-      # Be aware that if you turn off setup, you need to manually manage event type specific templates on your own.
-      # If you simply want to disable ILM, use the above setting, `apm-server.ilm.enabled`, instead.
-      # Defaults to true.
-      #enabled: true
-
-      # Configure whether or not existing policies and ILM related templates should be updated. This needs to be
-      # set to true when customizing your policies.
-      # Defaults to false.
-      #overwrite: false
-
-      # Set `require_policy` to `false` when policies are set up outside of APM Server but referenced here.
-      # Default value is `true`.
-      #require_policy: true
-
-      # The configured event types and policies will be merged with the default setup. You only need to configure
-      # the mappings that you want to customize.
-      #mapping:
-        #- event_type: "error"
-        #  policy_name: "apm-rollover-30-days"
-        #- event_type: "span"
-        #  policy_name: "apm-rollover-30-days"
-        #- event_type: "transaction"
-        #  policy_name: "apm-rollover-30-days"
-        #- event_type: "metric"
-        #  policy_name: "apm-rollover-30-days"
-
-      # Configured policies are added to pre-defined default policies.
-      # If a policy with the same name as a default policy is configured, the configured policy overwrites the default policy.
-      #policies:
-        #- name: "apm-rollover-30-days"
-          #policy:
-            #phases:
-              #hot:
-                #actions:
-                  #rollover:
-                    #max_size: "50gb"
-                    #max_age: "30d"
-                  #set_priority:
-                    #priority: 100
-              #warm:
-                #min_age: "30d"
-                #actions:
-                  #set_priority:
-                    #priority: 50
-                  #readonly: {}
-
-
-
-  #---------------------------- APM Server - Experimental Jaeger integration ----------------------------
-
-  # When enabling Jaeger integration, APM Server acts as Jaeger collector. It supports jaeger.thrift over HTTP
-  # and gRPC. This is an experimental feature, use with care.
-  #jaeger:
-    #grpc:
-      # Set to true to enable the Jaeger gRPC collector service.
-      #enabled: false
-
-      # Defines the gRPC host and port the server is listening on.
-      # Defaults to the standard Jaeger gRPC collector port 14250.
-      #host: "localhost:14250"
-
-      # Set to the name of a process tag to use for authorizing
-      # Jaeger agents.
-      #
-      # The tag value should have the same format as an HTTP
-      # Authorization header, i.e. "Bearer <secret_token>" or
-      # "ApiKey <base64(id:key)>".
-      #
-      # By default (if the auth_tag value is empty), authorization
-      # does not apply to Jaeger agents.
-      #auth_tag: ""
-
-    #http:
-      # Set to true to enable the Jaeger HTTP collector endpoint.
-      #enabled: false
-
-      # Defines the HTTP host and port the server is listening on.
-      # Defaults to the standard Jaeger HTTP collector port 14268.
-      #host: "localhost:14268"
-
-#================================= General =================================
-
-# Data is buffered in a memory queue before it is published to the configured output.
-# The memory queue will present all available events (up to the outputs
-# bulk_max_size) to the output, the moment the output is ready to serve
-# another batch of events.
-#queue:
-  # Queue type by name (default 'mem').
-  #mem:
-    # Max number of events the queue can buffer.
-    #events: 4096
-
-    # Hints the minimum number of events stored in the queue,
-    # before providing a batch of events to the outputs.
-    # The default value is set to 2048.
-    # A value of 0 ensures events are immediately available
-    # to be sent to the outputs.
-    #flush.min_events: 2048
-
-    # Maximum duration after which events are available to the outputs,
-    # if the number of events stored in the queue is < `flush.min_events`.
-    #flush.timeout: 1s
-
-# Sets the maximum number of CPUs that can be executing simultaneously. The
-# default is the number of logical CPUs available in the system.
-#max_procs:
-
-#================================= Template =================================
-
-# A template is used to set the mapping in Elasticsearch.
-# By default template loading is enabled and the template is loaded.
-# These settings can be adjusted to load your own template or overwrite existing ones.
-
-# Set to false to disable template loading.
-#setup.template.enabled: true
-
-# Template name. By default the template name is "apm-%{[observer.version]}"
-# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
-#setup.template.name: "apm-%{[observer.version]}"
-
-# Template pattern. By default the template pattern is "apm-%{[observer.version]}-*" to apply to the default index settings.
-# The first part is the version of apm-server and then -* is used to match all daily indices.
-# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
-#setup.template.pattern: "apm-%{[observer.version]}-*"
-
-# Path to fields.yml file to generate the template.
-#setup.template.fields: "${path.config}/fields.yml"
-
-# Overwrite existing template.
-#setup.template.overwrite: false
-
-# Elasticsearch template settings.
-#setup.template.settings:
-
-  # A dictionary of settings to place into the settings.index dictionary
-  # of the Elasticsearch template. For more details, please check
-  # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
-  #index:
-    #number_of_shards: 1
-    #codec: best_compression
-    #number_of_routing_shards: 30
-    #mapping.total_fields.limit: 2000
-
-#============================= Elastic Cloud =============================
-
-# These settings simplify using APM Server with the Elastic Cloud (https://cloud.elastic.co/).
-
-# The cloud.id setting overwrites the `output.elasticsearch.hosts` option.
-# You can find the `cloud.id` in the Elastic Cloud web UI.
-#cloud.id:
-
-# The cloud.auth setting overwrites the `output.elasticsearch.username` and
-# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
-#cloud.auth:
-
-#================================ Outputs =================================
-
-# Configure the output to use when sending the data collected by apm-server.
-
-#-------------------------- Elasticsearch output --------------------------
-output.elasticsearch:
-  # Array of hosts to connect to.
-  # Scheme and port can be left out and will be set to the default (`http` and `9200`).
-  # In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
-  # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
-  hosts: ["{{ elastic_host }}:9200"]
-
-  # Boolean flag to enable or disable the output module.
-  #enabled: true
-
-  # Set gzip compression level.
-  #compression_level: 0
-
-  # Protocol - either `http` (default) or `https`.
-  #protocol: "https"
-
-  # Authentication credentials - either API key or username/password.
-  #api_key: "id:api_key"
-  #username: "elastic"
-  #password: "changeme"
-
-  # Dictionary of HTTP parameters to pass within the url with index operations.
-  #parameters:
-    #param1: value1
-    #param2: value2
-
-  # Number of workers per Elasticsearch host.
-  #worker: 1
-
-  # By using the configuration below, APM documents are stored to separate indices,
-  # depending on their `processor.event`:
-  # - error
-  # - transaction
-  # - span
-  # - sourcemap
-  #
-  # The indices are all prefixed with `apm-%{[observer.version]}`.
-  # To allow managing indices based on their age, all indices (except for sourcemaps)
-  # end with the information of the day they got indexed.
-  # e.g. "apm-7.3.0-transaction-2019.07.20"
-  #
-  # Be aware that you can only specify one Elasticsearch template.
-  # If you modify the index patterns you must also update these configurations accordingly,
-  # as they need to be aligned:
-  # * `setup.template.name`
-  # * `setup.template.pattern`
-  #index: "apm-%{[observer.version]}-%{+yyyy.MM.dd}"
-  #indices:
-  #  - index: "apm-%{[observer.version]}-sourcemap"
-  #    when.contains:
-  #      processor.event: "sourcemap"
-  #
-  #  - index: "apm-%{[observer.version]}-error-%{+yyyy.MM.dd}"
-  #    when.contains:
-  #      processor.event: "error"
-  #
-  #  - index: "apm-%{[observer.version]}-transaction-%{+yyyy.MM.dd}"
-  #    when.contains:
-  #      processor.event: "transaction"
-  #
-  #  - index: "apm-%{[observer.version]}-span-%{+yyyy.MM.dd}"
-  #    when.contains:
-  #      processor.event: "span"
-  #
-  #  - index: "apm-%{[observer.version]}-metric-%{+yyyy.MM.dd}"
-  #    when.contains:
-  #      processor.event: "metric"
-  #
-  #  - index: "apm-%{[observer.version]}-onboarding-%{+yyyy.MM.dd}"
-  #    when.contains:
-  #      processor.event: "onboarding"
-
-  # A pipeline is a definition of processors applied to documents when ingesting them to Elasticsearch.
-  # APM Server comes with a default pipeline definition, located at `ingest/pipeline/definition.json`, which is
-  # loaded to Elasticsearch by default (see `apm-server.register.ingest.pipeline`).
-  # APM pipeline is enabled by default. To disable it, set `pipeline: _none`.
-  #pipeline: "apm"
-
-  # Optional HTTP Path.
-  #path: "/elasticsearch"
-
-  # Custom HTTP headers to add to each request.
-  #headers:
-  #  X-My-Header: Contents of the header
-
-  # Proxy server url.
-  #proxy_url: http://proxy:3128
-
-  # The number of times a particular Elasticsearch index operation is attempted. If
-  # the indexing operation doesn't succeed after this many retries, the events are
-  # dropped. The default is 3.
-  #max_retries: 3
-
-  # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
-  # The default is 50.
-  #bulk_max_size: 50
-
-  # The number of seconds to wait before trying to reconnect to Elasticsearch
-  # after a network error. After waiting backoff.init seconds, apm-server
-  # tries to reconnect. If the attempt fails, the backoff timer is increased
-  # exponentially up to backoff.max. After a successful connection, the backoff
-  # timer is reset. The default is 1s.
-  #backoff.init: 1s
-
-  # The maximum number of seconds to wait before attempting to connect to
-  # Elasticsearch after a network error. The default is 60s.
-  #backoff.max: 60s
-
-  # Configure http request timeout before failing an request to Elasticsearch.
-  #timeout: 90
-
-  # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
-  #ssl.enabled: true
-
-  # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
-  # Configure SSL verification mode. If `none` is configured, all server hosts
-  # and certificates will be accepted. In this mode, SSL based connections are
-  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
-  # `full`.
-  #ssl.verification_mode: full
-
-  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
-  # 1.2 are enabled.
-  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
-
-  # List of root certificates for HTTPS server verifications.
-  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
-
-  # Certificate for SSL client authentication.
-  #ssl.certificate: "/etc/pki/client/cert.pem"
-
-  # Client Certificate Key
-  #ssl.key: "/etc/pki/client/cert.key"
-
-  # Optional passphrase for decrypting the Certificate Key.
-  # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
-  #ssl.key_passphrase: ''
-
-  # Configure cipher suites to be used for SSL connections.
-  #ssl.cipher_suites: []
-
-  # Configure curve types for ECDHE based cipher suites.
-  #ssl.curve_types: []
-
-  # Configure what types of renegotiation are supported. Valid options are
-  # never, once, and freely. Default is never.
-  #ssl.renegotiation: never
-
-  # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
-  #kerberos.enabled: true
-
-  # Authentication type to use with Kerberos. Available options: keytab, password.
-  #kerberos.auth_type: password
-
-  # Path to the keytab file. It is used when auth_type is set to keytab.
-  #kerberos.keytab: /etc/elastic.keytab
-
-  # Path to the Kerberos configuration.
-  #kerberos.config_path: /etc/krb5.conf
-
-  # Name of the Kerberos user.
-  #kerberos.username: elastic
-
-  # Password of the Kerberos user. It is used when auth_type is set to password.
-  #kerberos.password: changeme
-
-  # Kerberos realm.
-  #kerberos.realm: ELASTIC
-
-
-#----------------------------- Console output -----------------------------
-#output.console:
-  # Boolean flag to enable or disable the output module.
-  #enabled: false
-
-  # Configure JSON encoding.
-  #codec.json:
-    # Pretty-print JSON event.
-    #pretty: false
-
-    # Configure escaping HTML symbols in strings.
-    #escape_html: false
-
-#---------------------------- Logstash output -----------------------------
-#output.logstash:
-  # Boolean flag to enable or disable the output module.
-  #enabled: false
-
-  # The Logstash hosts.
-  #hosts: ["localhost:5044"]
-
-  # Number of workers per Logstash host.
-  #worker: 1
-
-  # Set gzip compression level.
-  #compression_level: 3
-
-  # Configure escaping html symbols in strings.
-  #escape_html: true
-
-  # Optional maximum time to live for a connection to Logstash, after which the
-  # connection will be re-established.  A value of `0s` (the default) will
-  # disable this feature.
-  #
-  # Not yet supported for async connections (i.e. with the "pipelining" option set).
-  #ttl: 30s
-
-  # Optional load balance the events between the Logstash hosts. Default is false.
-  #loadbalance: false
-
-  # Number of batches to be sent asynchronously to Logstash while processing
-  # new batches.
-  #pipelining: 2
-
-  # If enabled only a subset of events in a batch of events is transferred per
-  # group.  The number of events to be sent increases up to `bulk_max_size`
-  # if no error is encountered.
-  #slow_start: false
-
-  # The number of seconds to wait before trying to reconnect to Logstash
-  # after a network error. After waiting backoff.init seconds, apm-server
-  # tries to reconnect. If the attempt fails, the backoff timer is increased
-  # exponentially up to backoff.max. After a successful connection, the backoff
-  # timer is reset. The default is 1s.
-  #backoff.init: 1s
-
-  # The maximum number of seconds to wait before attempting to connect to
-  # Logstash after a network error. The default is 60s.
-  #backoff.max: 60s
-
-  # Optional index name. The default index name is set to apm
-  # in all lowercase.
-  #index: 'apm'
-
-  # SOCKS5 proxy server URL
-  #proxy_url: socks5://user:password@socks5-server:2233
-
-  # Resolve names locally when using a proxy server. Defaults to false.
-  #proxy_use_local_resolver: false
-
-  # Enable SSL support. SSL is automatically enabled if any SSL setting is set.
-  #ssl.enabled: false
-
-  # Optional SSL configuration options. SSL is off by default.
-  # Configure SSL verification mode. If `none` is configured, all server hosts
-  # and certificates will be accepted. In this mode, SSL based connections are
-  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
-  # `full`.
-  #ssl.verification_mode: full
-
-  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
-  # 1.2 are enabled.
-  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
-
-  # List of root certificates for HTTPS server verifications.
-  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
-
-  # Certificate for SSL client authentication.
-  #ssl.certificate: "/etc/pki/client/cert.pem"
-
-  # Client Certificate Key
-  #ssl.key: "/etc/pki/client/cert.key"
-
-  # Optional passphrase for decrypting the Certificate Key.
-  # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
-  #ssl.key_passphrase: ''
-
-  # Configure cipher suites to be used for SSL connections.
-  #ssl.cipher_suites: []
-
-  # Configure curve types for ECDHE based cipher suites.
-  #ssl.curve_types: []
-
-  # Configure what types of renegotiation are supported. Valid options are
-  # never, once, and freely. Default is never.
-  #ssl.renegotiation: never
-
-#------------------------------ Kafka output ------------------------------
-#output.kafka:
-  # Boolean flag to enable or disable the output module.
-  #enabled: false
-
-  # The list of Kafka broker addresses from where to fetch the cluster metadata.
-  # The cluster metadata contain the actual Kafka brokers events are published
-  # to.
-  #hosts: ["localhost:9092"]
-
-  # The Kafka topic used for produced events. The setting can be a format string
-  # using any event field. To set the topic from document type use `%{[type]}`.
-  #topic: beats
-
-  # The Kafka event key setting. Use format string to create unique event key.
-  # By default no event key will be generated.
-  #key: ''
-
-  # The Kafka event partitioning strategy. Default hashing strategy is `hash`
-  # using the `output.kafka.key` setting or randomly distributes events if
-  # `output.kafka.key` is not configured.
-  #partition.hash:
-    # If enabled, events will only be published to partitions with reachable
-    # leaders. Default is false.
-    #reachable_only: false
-
-    # Configure alternative event field names used to compute the hash value.
-    # If empty `output.kafka.key` setting will be used.
-    # Default value is empty list.
-    #hash: []
-
-  # Authentication details. Password is required if username is set.
-  #username: ''
-  #password: ''
-
-  # Kafka version libbeat is assumed to run against. Defaults to the "1.0.0".
-  #version: '1.0.0'
-
-  # Configure JSON encoding.
-  #codec.json:
-    # Pretty print json event
-    #pretty: false
-
-    # Configure escaping html symbols in strings.
-    #escape_html: true
-
-  # Metadata update configuration. Metadata do contain leader information
-  # deciding which broker to use when publishing.
-  #metadata:
-    # Max metadata request retry attempts when cluster is in middle of leader
-    # election. Defaults to 3 retries.
-    #retry.max: 3
-
-    # Waiting time between retries during leader elections. Default is 250ms.
-    #retry.backoff: 250ms
-
-    # Refresh metadata interval. Defaults to every 10 minutes.
-    #refresh_frequency: 10m
-
-  # The number of concurrent load-balanced Kafka output workers.
-  #worker: 1
-
-  # The number of times to retry publishing an event after a publishing failure.
-  # After the specified number of retries, the events are typically dropped.
-  # Set max_retries to a value less than 0 to retry
-  # until all events are published. The default is 3.
-  #max_retries: 3
-
-  # The maximum number of events to bulk in a single Kafka request. The default
-  # is 2048.
-  #bulk_max_size: 2048
-
-  # The number of seconds to wait for responses from the Kafka brokers before
-  # timing out. The default is 30s.
-  #timeout: 30s
-
-  # The maximum duration a broker will wait for number of required ACKs. The
-  # default is 10s.
-  #broker_timeout: 10s
-
-  # The number of messages buffered for each Kafka broker. The default is 256.
-  #channel_buffer_size: 256
-
-  # The keep-alive period for an active network connection. If 0s, keep-alives
-  # are disabled. The default is 0 seconds.
-  #keep_alive: 0
-
-  # Sets the output compression codec. Must be one of none, snappy and gzip. The
-  # default is gzip.
-  #compression: gzip
-
-  # Set the compression level. Currently only gzip provides a compression level
-  # between 0 and 9. The default value is chosen by the compression algorithm.
-  #compression_level: 4
-
-  # The maximum permitted size of JSON-encoded messages. Bigger messages will be
-  # dropped. The default value is 1000000 (bytes). This value should be equal to
-  # or less than the broker's message.max.bytes.
-  #max_message_bytes: 1000000
-
-  # The ACK reliability level required from broker. 0=no response, 1=wait for
-  # local commit, -1=wait for all replicas to commit. The default is 1.  Note:
-  # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
-  # on error.
-  #required_acks: 1
-
-  # The configurable ClientID used for logging, debugging, and auditing
-  # purposes.  The default is "beats".
-  #client_id: beats
-
-  # Enable SSL support. SSL is automatically enabled if any SSL setting is set.
-  #ssl.enabled: false
-
-  # Optional SSL configuration options. SSL is off by default.
-  # Configure SSL verification mode. If `none` is configured, all server hosts
-  # and certificates will be accepted. In this mode, SSL based connections are
-  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
-  # `full`.
-  #ssl.verification_mode: full
-
-  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
-  # 1.2 are enabled.
-  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
-
-  # List of root certificates for HTTPS server verifications.
-  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
-
-  # Certificate for SSL client authentication.
-  #ssl.certificate: "/etc/pki/client/cert.pem"
-
-  # Client Certificate Key
-  #ssl.key: "/etc/pki/client/cert.key"
-
-  # Optional passphrase for decrypting the Certificate Key.
-  # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
-  #ssl.key_passphrase: ''
-
-  # Configure cipher suites to be used for SSL connections.
-  #ssl.cipher_suites: []
-
-  # Configure curve types for ECDHE based cipher suites.
-  #ssl.curve_types: []
-
-  # Configure what types of renegotiation are supported. Valid options are
-  # never, once, and freely. Default is never.
-  #ssl.renegotiation: never
-
-  # Authentication type to use with Kerberos. Available options: keytab, password.
-  #kerberos.auth_type: password
-
-  # Path to the keytab file. It is used when auth_type is set to keytab.
-  #kerberos.keytab: /etc/krb5kdc/kafka.keytab
-
-  # Path to the Kerberos configuration.
-  #kerberos.config_path: /etc/path/config
-
-  # The service principal name.
-  #kerberos.service_name: HTTP/my-service@realm
-
-  # Name of the Kerberos user. It is used when auth_type is set to password.
-  #kerberos.username: elastic
-
-  # Password of the Kerberos user. It is used when auth_type is set to password.
-  #kerberos.password: changeme
-
-  # Kerberos realm.
-  #kerberos.realm: ELASTIC
-
-#================================= Paths ==================================
-
-# The home path for the apm-server installation. This is the default base path
-# for all other path settings and for miscellaneous files that come with the
-# distribution.
-# If not set by a CLI flag or in the configuration file, the default for the
-# home path is the location of the binary.
-#path.home:
-
-# The configuration path for the apm-server installation. This is the default
-# base path for configuration files, including the main YAML configuration file
-# and the Elasticsearch template file. If not set by a CLI flag or in the
-# configuration file, the default for the configuration path is the home path.
-#path.config: ${path.home}
-
-# The data path for the apm-server installation. This is the default base path
-# for all the files in which apm-server needs to store its data. If not set by a
-# CLI flag or in the configuration file, the default for the data path is a data
-# subdirectory inside the home path.
-#path.data: ${path.home}/data
-
-# The logs path for an apm-server installation. If not set by a CLI flag or in the
-# configuration file, the default is a logs subdirectory inside the home path.
-#path.logs: ${path.home}/logs
-
-#================================= Logging =================================
-
-# There are three options for the log output: syslog, file, and stderr.
-# Windows systems default to file output. All other systems default to syslog.
-
-# Sets the minimum log level. The default log level is info.
-# Available log levels are: error, warning, info, or debug.
-#logging.level: info
-
-# Enable debug output for selected components. To enable all selectors use ["*"].
-# Other available selectors are "beat", "publish", or "service".
-# Multiple selectors can be chained.
-#logging.selectors: [ ]
-
-# Send all logging output to syslog. The default is false.
-#logging.to_syslog: true
-
-# If enabled, apm-server periodically logs its internal metrics that have changed
-# in the last period. For each metric that changed, the delta from the value at
-# the beginning of the period is logged. Also, the total values for
-# all non-zero internal metrics are logged on shutdown. The default is false.
-#logging.metrics.enabled: false
-
-# The period after which to log the internal metrics. The default is 30s.
-#logging.metrics.period: 30s
-
-# Logging to rotating files. When true, writes all logging output to files.
-# The log files are automatically rotated when the log file size limit is reached.
-#logging.to_files: true
-#logging.files:
-  # Configure the path where the logs are written. The default is the logs directory
-  # under the home path (the binary location).
-  #path: /var/log/apm-server
-
-  # The name of the files where the logs are written to.
-  #name: apm-server
-
-  # Configure log file size limit. If limit is reached, log file will be
-  # automatically rotated.
-  #rotateeverybytes: 10485760 # = 10MB
-
-  # Number of rotated log files to keep. Oldest files will be deleted first.
-  #keepfiles: 7
-
-  # The permissions mask to apply when rotating log files. The default value is 0600.
-  # Must be a valid Unix-style file permissions mask expressed in octal notation.
-  #permissions: 0600
-
-  # Enable log file rotation on time intervals in addition to size-based rotation.
-  # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h
-  # are boundary-aligned with minutes, hours, days, weeks, months, and years as
-  # reported by the local system clock. All other intervals are calculated from the
-  # Unix epoch. Defaults to disabled.
-  #interval: 0
-
-# Set to true to log messages in json format.
-#logging.json: false
-
-# Set to true to log with minimal Elastic Common Schema (ECS) fields set.
-# It is recommended to set `logging.json=true` when enabling ECS logging.
-# Defaults to false.
-#logging.ecs: false
-
-
-#=============================== HTTP Endpoint ===============================
-
-# apm-server can expose internal metrics through a HTTP endpoint. For security
-# reasons the endpoint is disabled by default. This feature is currently experimental.
-# Stats can be access through http://localhost:5066/stats. For pretty JSON output
-# append ?pretty to the URL.
-
-# Defines if the HTTP endpoint is enabled.
-#http.enabled: false
-
-# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
-#http.host: localhost
-
-# Port on which the HTTP endpoint will bind. Default is 5066.
-#http.port: 5066
-
-#============================= X-pack Monitoring =============================
-
-# APM server can export internal metrics to a central Elasticsearch monitoring
-# cluster.  This requires x-pack monitoring to be enabled in Elasticsearch.  The
-# reporting is disabled by default.
-
-# Set to true to enable the monitoring reporter.
-#monitoring.enabled: false
-
-# Most settings from the Elasticsearch output are accepted here as well.
-# Note that these settings should be configured to point to your Elasticsearch *monitoring* cluster.
-# Any setting that is not set is automatically inherited from the Elasticsearch
-# output configuration. This means that if you have the Elasticsearch output configured,
-# you can simply uncomment the following line.
-#monitoring.elasticsearch:
-
-  # Protocol - either `http` (default) or `https`.
-  #protocol: "https"
-
-  # Authentication credentials - either API key or username/password.
-  #api_key: "id:api_key"
-  #username: "elastic"
-  #password: "changeme"
-
-  # Array of hosts to connect to.
-  # Scheme and port can be left out and will be set to the default (`http` and `9200`).
-  # In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
-  # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
-  #hosts: ["localhost:9200"]
-
-  # Set gzip compression level.
-  #compression_level: 0
-
-  # Dictionary of HTTP parameters to pass within the URL with index operations.
-  #parameters:
-    #param1: value1
-    #param2: value2
-
-  # Custom HTTP headers to add to each request.
-  #headers:
-  #  X-My-Header: Contents of the header
-
-  # Proxy server url.
-  #proxy_url: http://proxy:3128
-
-  # The number of times a particular Elasticsearch index operation is attempted. If
-  # the indexing operation doesn't succeed after this many retries, the events are
-  # dropped. The default is 3.
-  #max_retries: 3
-
-  # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
-  # The default is 50.
-  #bulk_max_size: 50
-
-  # The number of seconds to wait before trying to reconnect to Elasticsearch
-  # after a network error. After waiting backoff.init seconds, apm-server
-  # tries to reconnect. If the attempt fails, the backoff timer is increased
-  # exponentially up to backoff.max. After a successful connection, the backoff
-  # timer is reset. The default is 1s.
-  #backoff.init: 1s
-
-  # The maximum number of seconds to wait before attempting to connect to
-  # Elasticsearch after a network error. The default is 60s.
-  #backoff.max: 60s
-
-  # Configure HTTP request timeout before failing an request to Elasticsearch.
-  #timeout: 90
-
-  # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
-  #ssl.enabled: true
-
-  # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
-  # Configure SSL verification mode. If `none` is configured, all server hosts
-  # and certificates will be accepted. In this mode, SSL based connections are
-  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
-  # `full`.
-  #ssl.verification_mode: full
-
-  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
-  # 1.2 are enabled.
-  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
-
-  # List of root certificates for HTTPS server verifications.
-  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
-
-  # Certificate for SSL client authentication.
-  #ssl.certificate: "/etc/pki/client/cert.pem"
-
-  # Client Certificate Key
-  #ssl.key: "/etc/pki/client/cert.key"
-
-  # Optional passphrase for decrypting the Certificate Key.
-  # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
-  #ssl.key_passphrase: ''
-
-  # Configure cipher suites to be used for SSL connections.
-  #ssl.cipher_suites: []
-
-  # Configure curve types for ECDHE based cipher suites.
-  #ssl.curve_types: []
-
-  # Configure what types of renegotiation are supported. Valid options are
-  # never, once, and freely. Default is never.
-  #ssl.renegotiation: never
-
-  # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
-  #kerberos.enabled: true
-
-  # Authentication type to use with Kerberos. Available options: keytab, password.
-  #kerberos.auth_type: password
-
-  # Path to the keytab file. It is used when auth_type is set to keytab.
-  #kerberos.keytab: /etc/elastic.keytab
-
-  # Path to the Kerberos configuration.
-  #kerberos.config_path: /etc/krb5.conf
-
-  # Name of the Kerberos user.
-  #kerberos.username: elastic
-
-  # Password of the Kerberos user. It is used when auth_type is set to password.
-  #kerberos.password: changeme
-
-  # Kerberos realm.
-  #kerberos.realm: ELASTIC
-
-  #metrics.period: 10s
-  #state.period: 1m
diff --git a/ansible/roles/elastic/templates/kibana.yml.j2 b/ansible/roles/elastic/templates/kibana.yml.j2
deleted file mode 100644
index d22c9f6e8ff0d0392043f89badc35711e26c00e4..0000000000000000000000000000000000000000
--- a/ansible/roles/elastic/templates/kibana.yml.j2
+++ /dev/null
@@ -1,116 +0,0 @@
-# Kibana is served by a back end server. This setting specifies the port to use.
-server.port: {{ kibana_default_port }}
-
-# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
-# The default is 'localhost', which usually means remote machines will not be able to connect.
-# To allow connections from remote users, set this parameter to a non-loopback address.
-#server.host: "localhost"
-server.host: {{ kibana_server_host }}
-
-# Enables you to specify a path to mount Kibana at if you are running behind a proxy.
-# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
-# from requests it receives, and to prevent a deprecation warning at startup.
-# This setting cannot end in a slash.
-# server.basePath: ""
-
-# Specifies whether Kibana should rewrite requests that are prefixed with
-# `server.basePath` or require that they are rewritten by your reverse proxy.
-# This setting was effectively always `false` before Kibana 6.3 and will
-# default to `true` starting in Kibana 7.0.
-#server.rewriteBasePath: false
-
-# The maximum payload size in bytes for incoming server requests.
-#server.maxPayloadBytes: 1048576
-
-# The Kibana server's name.  This is used for display purposes.
-#server.name: "your-hostname"
-
-# The URLs of the Elasticsearch instances to use for all your queries.
-elasticsearch.hosts: ["http://{{ elastic_host }}:9200"]
-
-# When this setting's value is true Kibana uses the hostname specified in the server.host
-# setting. When the value of this setting is false, Kibana uses the hostname of the host
-# that connects to this Kibana instance.
-#elasticsearch.preserveHost: true
-
-# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
-# dashboards. Kibana creates a new index if the index doesn't already exist.
-#kibana.index: ".kibana"
-
-# The default application to load.
-#kibana.defaultAppId: "home"
-
-# If your Elasticsearch is protected with basic authentication, these settings provide
-# the username and password that the Kibana server uses to perform maintenance on the Kibana
-# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
-# is proxied through the Kibana server.
-#elasticsearch.username: "kibana_system"
-#elasticsearch.password: "pass"
-
-# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
-# These settings enable SSL for outgoing requests from the Kibana server to the browser.
-#server.ssl.enabled: false
-#server.ssl.certificate: /path/to/your/server.crt
-#server.ssl.key: /path/to/your/server.key
-
-# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
-# These files are used to verify the identity of Kibana to Elasticsearch and are required when
-# xpack.security.http.ssl.client_authentication in Elasticsearch is set to required.
-#elasticsearch.ssl.certificate: /path/to/your/client.crt
-#elasticsearch.ssl.key: /path/to/your/client.key
-
-# Optional setting that enables you to specify a path to the PEM file for the certificate
-# authority for your Elasticsearch instance.
-#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
-
-# To disregard the validity of SSL certificates, change this setting's value to 'none'.
-#elasticsearch.ssl.verificationMode: full
-
-# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
-# the elasticsearch.requestTimeout setting.
-#elasticsearch.pingTimeout: 1500
-
-# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
-# must be a positive integer.
-#elasticsearch.requestTimeout: 30000
-
-# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
-# headers, set this value to [] (an empty list).
-#elasticsearch.requestHeadersWhitelist: [ authorization ]
-
-# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
-# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
-#elasticsearch.customHeaders: {}
-
-# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
-#elasticsearch.shardTimeout: 30000
-
-# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
-#elasticsearch.startupTimeout: 5000
-
-# Logs queries sent to Elasticsearch. Requires logging.verbose set to true.
-#elasticsearch.logQueries: false
-
-# Specifies the path where Kibana creates the process ID file.
-#pid.file: /var/run/kibana.pid
-
-# Enables you specify a file where Kibana stores log output.
-#logging.dest: stdout
-
-# Set the value of this setting to true to suppress all logging output.
-#logging.silent: false
-
-# Set the value of this setting to true to suppress all logging output other than error messages.
-#logging.quiet: false
-
-# Set the value of this setting to true to log all events, including system usage information
-# and all requests.
-#logging.verbose: false
-
-# Set the interval in milliseconds to sample system and process performance
-# metrics. Minimum is 100ms. Defaults to 5000.
-#ops.interval: 5000
-
-# Specifies locale to be used for all localizable strings, dates and number formats.
-# Supported languages are the following: English - en , by default , Chinese - zh-CN .
-#i18n.locale: "en"
diff --git a/ansible/roles/fail2ban/defaults/main.yml b/ansible/roles/fail2ban/defaults/main.yml
deleted file mode 100644
index 01cf6ad6c51d68f0fe9b01186c1ec70ba3df9cd7..0000000000000000000000000000000000000000
--- a/ansible/roles/fail2ban/defaults/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-
-f2b_packages:
-  - fail2ban
-  - rsyslog
-
-f2b_enabled: "{% if envsetup_fail2ban_enabled | bool %}true{% else %}false{% endif %}"
-f2b_ignoreip: 127.0.0.1/8 ::1
-f2b_maxretry: "{{ envsetup_fail2ban_maxretry | default('5', true) }}"
-f2b_bantime: "{{ envsetup_fail2ban_bantime | default('10m', true) }}"
-f2b_sender: "{{ envsetup_email_sender | default('root@localhost', true) }}"
-f2b_destemail: "{% if envsetup_fail2ban_dest_email is string %}{{ envsetup_fail2ban_dest_email }}{% else %}{{ envsetup_fail2ban_dest_email | join(',') }}{% endif %}"
-f2b_destemail_admins: "{% if envsetup_email_admins is string %}{{ envsetup_email_admins }}{% else %}{{ envsetup_email_admins | join(',') }}{% endif %}"
-f2b_action: "{% if envsetup_fail2ban_send_email | bool %}action_mwl{% else %}action_{% endif %}"
-
-...
diff --git a/ansible/roles/fail2ban/handlers/main.yml b/ansible/roles/fail2ban/handlers/main.yml
deleted file mode 100644
index 83588db6e0e8b89317cd29fb2ae2a9767955f47d..0000000000000000000000000000000000000000
--- a/ansible/roles/fail2ban/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart fail2ban
-  systemd:
-    name: fail2ban
-    state: restarted
-
-...
diff --git a/ansible/roles/fail2ban/tasks/main.yml b/ansible/roles/fail2ban/tasks/main.yml
deleted file mode 100644
index 4b8eed8834eaed1a360da81f7a30278487e3a969..0000000000000000000000000000000000000000
--- a/ansible/roles/fail2ban/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-
-- name: packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ f2b_packages }}"
-    state: latest
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: jail defaults
-  notify: restart fail2ban
-  template:
-    src: jail.local.j2
-    dest: /etc/fail2ban/jail.local
-
-- name: service
-  systemd:
-    name: fail2ban
-    enabled: true
-    state: started
-
-...
diff --git a/ansible/roles/fail2ban/templates/jail.local.j2 b/ansible/roles/fail2ban/templates/jail.local.j2
deleted file mode 100644
index 71d2e3038895dbe2578485d8126f214dca2f0cc1..0000000000000000000000000000000000000000
--- a/ansible/roles/fail2ban/templates/jail.local.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-[DEFAULT]
-
-ignoreip = {{ f2b_ignoreip }}
-bantime = {{ f2b_bantime }}
-maxretry = {{ f2b_maxretry }}
-destemail = {{ f2b_destemail | default(f2b_destemail_admins, true) }}
-sender = {{ f2b_sender }}
-action = %({{ f2b_action }})s
diff --git a/ansible/roles/ferm-configure/defaults/main.yml b/ansible/roles/ferm-configure/defaults/main.yml
deleted file mode 100644
index 947f9f5b836d280a446994e0ed3642774eeeeac2..0000000000000000000000000000000000000000
--- a/ansible/roles/ferm-configure/defaults/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-
-# filename into which rules will be written
-# /etc/ferm/{ferm|input|output|forward}.d/<filename>.conf
-ferm_rules_filename: default
-
-# input rule
-ferm_input_rules: []
-
-# ouput rule
-ferm_output_rules: []
-
-# forward rule
-ferm_forward_rules: []
-
-# global settings to be put in ferm.d directory
-ferm_global_settings:
-
-...
diff --git a/ansible/roles/ferm-configure/handlers/main.yml b/ansible/roles/ferm-configure/handlers/main.yml
deleted file mode 100644
index dec631c0142f85c713dc89830dba3d0abee43e14..0000000000000000000000000000000000000000
--- a/ansible/roles/ferm-configure/handlers/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-
-- name: restart ferm
-  when: ansible_facts.services['ferm.service'] is defined
-  systemd:
-    name: ferm
-    state: restarted
-
-- name: restart fail2ban
-  when: ansible_facts.services['fail2ban.service'] is defined
-  systemd:
-    name: fail2ban
-    state: started
-...
diff --git a/ansible/roles/ferm-configure/tasks/main.yml b/ansible/roles/ferm-configure/tasks/main.yml
deleted file mode 100644
index 2f3d4a0fb216f6fc1b1a68c0c2a34775c20f910e..0000000000000000000000000000000000000000
--- a/ansible/roles/ferm-configure/tasks/main.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-
-- name: populate service facts
-  service_facts:
-
-- name: directories
-  loop:
-    - /etc/ferm/ferm.d
-    - /etc/ferm/input.d
-    - /etc/ferm/output.d
-    - /etc/ferm/forward.d
-  file:
-    path: "{{ item }}"
-    state: directory
-
-- name: global
-  when: ferm_global_settings | d(false)
-  notify:
-    - restart ferm
-    - restart fail2ban
-  copy:
-    dest: /etc/ferm/ferm.d/{{ ferm_rules_filename }}.conf
-    content: "{{ ferm_global_settings }}"
-
-- name: input
-  when: ferm_input_rules | length > 0
-  notify:
-    - restart ferm
-    - restart fail2ban
-  template:
-    src: ferm_rules_input.conf.j2
-    dest: /etc/ferm/input.d/{{ ferm_rules_filename }}.conf
-
-- name: output
-  when: ferm_output_rules | length > 0
-  notify:
-    - restart ferm
-    - restart fail2ban
-  template:
-    src: ferm_rules_output.conf.j2
-    dest: /etc/ferm/output.d/{{ ferm_rules_filename }}.conf
-
-- name: forward
-  when: ferm_forward_rules | length > 0
-  notify:
-    - restart ferm
-    - restart fail2ban
-  template:
-    src: ferm_rules_forward.conf.j2
-    dest: /etc/ferm/forward.d/{{ ferm_rules_filename }}.conf
-
-...
diff --git a/ansible/roles/ferm-configure/templates/ferm_rules_forward.conf.j2 b/ansible/roles/ferm-configure/templates/ferm_rules_forward.conf.j2
deleted file mode 100644
index b0903511b24757f17264e4e75954b3eb42638eed..0000000000000000000000000000000000000000
--- a/ansible/roles/ferm-configure/templates/ferm_rules_forward.conf.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-{% for rule in ferm_forward_rules %}
-{% if rule.mod is defined and rule.mod %}mod {{ rule.mod }} {% endif %}
-{% if rule.helper is defined and rule.helper %}helper {{ rule.helper }} {% endif %}
-{% if rule.saddr is defined and rule.saddr %}saddr @ipfilter(({{ rule.saddr | join(' ') }})) {% endif %}
-{% if rule.daddr is defined and rule.daddr %}daddr @ipfilter(({{ rule.daddr | join(' ') }})) {% endif %}
-{% if rule.proto is defined and rule.proto %}proto ({{ rule.proto | join(' ') }}) {% endif %}
-{% if rule.dport is defined and rule.dport %}dport ({{ rule.dport | join(' ') }}) {% endif %}
-{% if rule.sport is defined and rule.sport %}sport ({{ rule.sport | join(' ') }}) {% endif %}
-{% if rule.policy is defined and rule.policy %}{{ rule.policy | upper }}{% else %}ACCEPT{% endif %};
-{% endfor %}
\ No newline at end of file
diff --git a/ansible/roles/ferm-configure/templates/ferm_rules_input.conf.j2 b/ansible/roles/ferm-configure/templates/ferm_rules_input.conf.j2
deleted file mode 100644
index 102d91d294898dc6f4c698e8a7ebe3f4e9bbce64..0000000000000000000000000000000000000000
--- a/ansible/roles/ferm-configure/templates/ferm_rules_input.conf.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-{% for rule in ferm_input_rules %}
-{% if rule.mod is defined and rule.mod %}mod {{ rule.mod }} {% endif %}
-{% if rule.helper is defined and rule.helper %}helper {{ rule.helper }} {% endif %}
-{% if rule.saddr is defined and rule.saddr %}saddr @ipfilter(({{ rule.saddr | join(' ') }})) {% endif %}
-{% if rule.daddr is defined and rule.daddr %}daddr @ipfilter(({{ rule.daddr | join(' ') }})) {% endif %}
-{% if rule.proto is defined and rule.proto %}proto ({{ rule.proto | join(' ') }}) {% endif %}
-{% if rule.dport is defined and rule.dport %}dport ({{ rule.dport | join(' ') }}) {% endif %}
-{% if rule.sport is defined and rule.sport %}sport ({{ rule.sport | join(' ') }}) {% endif %}
-{% if rule.policy is defined and rule.policy %}{{ rule.policy | upper }}{% else %}ACCEPT{% endif %};
-{% endfor %}
\ No newline at end of file
diff --git a/ansible/roles/ferm-configure/templates/ferm_rules_output.conf.j2 b/ansible/roles/ferm-configure/templates/ferm_rules_output.conf.j2
deleted file mode 100644
index 4784ab4e55e350fba0ec0554de40f46f2e08b10a..0000000000000000000000000000000000000000
--- a/ansible/roles/ferm-configure/templates/ferm_rules_output.conf.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-{% for rule in ferm_output_rules %}
-{% if rule.mod is defined and rule.mod %}mod {{ rule.mod }} {% endif %}
-{% if rule.helper is defined and rule.helper %}helper {{ rule.helper }} {% endif %}
-{% if rule.saddr is defined and rule.saddr %}saddr @ipfilter(({{ rule.saddr | join(' ') }})) {% endif %}
-{% if rule.daddr is defined and rule.daddr %}daddr @ipfilter(({{ rule.daddr | join(' ') }})) {% endif %}
-{% if rule.proto is defined and rule.proto %}proto ({{ rule.proto | join(' ') }}) {% endif %}
-{% if rule.dport is defined and rule.dport %}dport ({{ rule.dport | join(' ') }}) {% endif %}
-{% if rule.sport is defined and rule.sport %}sport ({{ rule.sport | join(' ') }}) {% endif %}
-{% if rule.policy is defined and rule.policy %}{{ rule.policy | upper }}{% else %}ACCEPT{% endif %};
-{% endfor %}
\ No newline at end of file
diff --git a/ansible/roles/ferm-install/defaults/main.yml b/ansible/roles/ferm-install/defaults/main.yml
deleted file mode 100644
index fc3d06fb67fb0d3a1e787489a6f907e0d274b26e..0000000000000000000000000000000000000000
--- a/ansible/roles/ferm-install/defaults/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-
-# packages to install
-ferm_packages:
-  - ferm
-
-# default filtering and logging policy for input traffic
-ferm_input_policy: DROP
-ferm_input_log: true
-ferm_input_log_prefix: "{{ ferm_input_policy }} INPUT "
-
-# default filtering and logging for output traffic
-ferm_output_policy: ACCEPT
-ferm_output_log: false
-ferm_output_log_prefix: "{{ ferm_output_policy }} OUTPUT "
-
-# default filtering and logging for forward traffic
-ferm_forward_policy: DROP
-ferm_forward_log: true
-ferm_forward_log_prefix: "{{ ferm_forward_policy }} FORWARD "
-
-# enable anti-lockout rule
-ferm_antilockout_enabled: true
-
-...
diff --git a/ansible/roles/ferm-install/handlers/main.yml b/ansible/roles/ferm-install/handlers/main.yml
deleted file mode 100644
index c2f8c0cb26f95dae4bacb2b598273310abd7bc20..0000000000000000000000000000000000000000
--- a/ansible/roles/ferm-install/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart ferm
-  systemd:
-    name: ferm
-    state: restarted
-
-...
diff --git a/ansible/roles/ferm-install/tasks/main.yml b/ansible/roles/ferm-install/tasks/main.yml
deleted file mode 100644
index c4de3e7a546ba2ffb34140e27e42b1464ead5382..0000000000000000000000000000000000000000
--- a/ansible/roles/ferm-install/tasks/main.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-
-- name: packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ ferm_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: configuration
-  notify: restart ferm
-  template:
-    src: ferm.conf.j2
-    dest: /etc/ferm/ferm.conf
-    backup: true
-
-- name: service
-  systemd:
-    name: ferm
-    enabled: true
-    masked: false
-    state: started
-
-...
diff --git a/ansible/roles/ferm-install/templates/ferm.conf.j2 b/ansible/roles/ferm-install/templates/ferm.conf.j2
deleted file mode 100644
index 219236dbe3d98af2e5f922dfdf2f8d2ef1aa3f9d..0000000000000000000000000000000000000000
--- a/ansible/roles/ferm-install/templates/ferm.conf.j2
+++ /dev/null
@@ -1,73 +0,0 @@
-# -*- shell-script -*-
-
-# include global rules
-@include 'ferm.d/';
-
-domain (ip ip6) {
-  table filter {
-    chain INPUT {
-        policy {{ ferm_input_policy | upper }};
-
-        # connection tracking
-        mod state state INVALID DROP;
-        mod state state (ESTABLISHED RELATED) ACCEPT;
-
-        # allow local connections
-        interface lo ACCEPT;
-
-        # allow ping
-        proto icmp ACCEPT;
-    {% if ferm_antilockout_enabled %}
-
-        # allow ssh, anti-lockout rule
-        proto tcp dport 22 ACCEPT;
-    {% endif %}
-
-        # include input rules
-        @include 'input.d/';
-    {% if ferm_input_log %}
-
-        # logging
-        LOG log-level warning log-prefix "{{ ferm_input_log_prefix }}";
-    {% endif %}
-    }
-
-    chain OUTPUT {
-        policy {{ ferm_output_policy | upper }};
-
-        # connection tracking
-        mod state state INVALID DROP;
-        mod state state (ESTABLISHED RELATED) ACCEPT;
-
-        # allow local connections
-        outerface lo ACCEPT;
-
-        # allow ping
-        proto icmp ACCEPT;
-
-        # include output rules
-        @include 'output.d/';
-    {% if ferm_output_log %}
-
-        # logging
-        LOG log-level warning log-prefix "{{ ferm_output_log_prefix }}";
-    {% endif %}
-    }
-
-    chain FORWARD {
-        policy {{ ferm_forward_policy | upper }};
-
-        # connection tracking
-        mod state state INVALID DROP;
-        mod state state (ESTABLISHED RELATED) ACCEPT;
-
-        # include forward rules
-        @include 'forward.d/';
-    {% if ferm_forward_log %}
-
-        # logging
-        LOG log-level warning log-prefix "{{ ferm_forward_log_prefix }}";
-    {% endif %}
-    }
-  }
-}
diff --git a/ansible/roles/haproxy/defaults/main.yml b/ansible/roles/haproxy/defaults/main.yml
deleted file mode 100644
index 35a1da1dfdd74d9aad0b66c39dc70647cc2213b1..0000000000000000000000000000000000000000
--- a/ansible/roles/haproxy/defaults/main.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-
-hap_packages:
-  - haproxy
-
-hap_config: /etc/haproxy/haproxy.cfg
-
-hap_config_global: |2
-  log /dev/log    local0 warning
-  log /dev/log    local1 warning
-  chroot /var/lib/haproxy
-  stats socket /run/haproxy/admin.sock mode 664 level admin
-  stats timeout 30s
-  user haproxy
-  group haproxy
-  daemon
-
-hap_config_defaults: |2
-  log global
-  mode tcp
-  balance leastconn
-  retries 2
-  timeout tunnel 30m
-  timeout client 30m
-  timeout connect 5s
-  timeout server 30m
-  timeout check 15s
-  option tcplog
-  option tcpka
-  option clitcpka
-  option srvtcpka
-
-hap_stats_enabled: false
-hap_config_stats: |2
-  bind :9000
-  mode http
-  stats enable
-  stats uri /
-  stats auth admin:password
-
-hap_config_listen:
-  - name: pgsql-primary
-    content: |2
-        bind :54321
-        default-server inter 2s fall 3 rise 2 on-marked-down shutdown-sessions
-        option tcp-check
-        tcp-check expect string primary
-        maxconn 500
-        server {{ groups['postgres'][0] }} {{ hostvars[groups['postgres'][0]]['ansible_default_ipv4']['address'] }}:5432 maxconn 500 check port 8543
-        server {{ groups['postgres'][1] }} {{ hostvars[groups['postgres'][1]]['ansible_default_ipv4']['address'] }}:5432 maxconn 500 check port 8543 backup
-# hap_config_listen: []
-
-...
diff --git a/ansible/roles/haproxy/handlers/main.yml b/ansible/roles/haproxy/handlers/main.yml
deleted file mode 100644
index 7e29375f6a5d93d91b74bdd14e2b3fc5e4f42f66..0000000000000000000000000000000000000000
--- a/ansible/roles/haproxy/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: reload haproxy
-  systemd:
-    name: haproxy
-    state: reloaded
-
-...
diff --git a/ansible/roles/haproxy/tasks/main.yml b/ansible/roles/haproxy/tasks/main.yml
deleted file mode 100644
index 0c9adbcffb73bd380a91ca0ebe162c22942036b2..0000000000000000000000000000000000000000
--- a/ansible/roles/haproxy/tasks/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-
-- name: install packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ hap_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: configure
-  notify: reload haproxy
-  template:
-    src: haproxy.cfg.j2
-    dest: /etc/haproxy/haproxy.cfg
-    backup: true
-
-- meta: flush_handlers
-
-...
diff --git a/ansible/roles/haproxy/templates/haproxy.cfg.j2 b/ansible/roles/haproxy/templates/haproxy.cfg.j2
deleted file mode 100644
index 3de0cc3e39030fc671bb8f2386cd092d05068669..0000000000000000000000000000000000000000
--- a/ansible/roles/haproxy/templates/haproxy.cfg.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-global
-{{ hap_config_global }}
-
-defaults
-{{ hap_config_defaults }}
-{% if hap_stats_enabled %}
-
-listen stats
-{{ hap_config_stats }}
-{% endif %}
-{% for listen in hap_config_listen %}
-
-listen {{ listen.name }}
-{{ listen.content }}
-{% endfor %}
diff --git a/ansible/roles/init/defaults/main.yml b/ansible/roles/init/defaults/main.yml
deleted file mode 100644
index d140d408264d82ff19e5703844a0e95a76e715db..0000000000000000000000000000000000000000
--- a/ansible/roles/init/defaults/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-
-init_packages:
-  - apt-utils
-  - gnupg
-  - ssh-client
-  - sudo
-
-...
diff --git a/ansible/roles/init/tasks/main.yml b/ansible/roles/init/tasks/main.yml
deleted file mode 100644
index 8ed302525e5103c3dc6a8c5fa90adb5e697bed5d..0000000000000000000000000000000000000000
--- a/ansible/roles/init/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-- name: install initial packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ init_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: configure proxy
-  when: proxy_when is not defined or proxy_when != "end"
-  include_role:
-    name: proxy
-    allow_duplicates: true
-
-...
diff --git a/ansible/roles/letsencrypt/defaults/main.yml b/ansible/roles/letsencrypt/defaults/main.yml
deleted file mode 100644
index f38c2f2dd5c4d484ad695896b24d698442bc145d..0000000000000000000000000000000000000000
--- a/ansible/roles/letsencrypt/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-letsencrypt_domains: []
-letsencrypt_webroot: /var/www/letsencrypt
-letsencrypt_email: sysadmin@ubicast.eu
-letsencrypt_testing: false
-
-...
diff --git a/ansible/roles/letsencrypt/handlers/main.yml b/ansible/roles/letsencrypt/handlers/main.yml
deleted file mode 100644
index 38fab58a222d274df7c367ebbe7a1853926660cf..0000000000000000000000000000000000000000
--- a/ansible/roles/letsencrypt/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart nginx
-  service:
-    name: nginx
-    state: restarted
-
-...
diff --git a/ansible/roles/letsencrypt/tasks/main.yml b/ansible/roles/letsencrypt/tasks/main.yml
deleted file mode 100644
index f831147ba5762317f785228df1611cf042198bf4..0000000000000000000000000000000000000000
--- a/ansible/roles/letsencrypt/tasks/main.yml
+++ /dev/null
@@ -1,129 +0,0 @@
----
-
-- name: install certbot
-  package:
-    force_apt_get: true
-    install_recommends: false
-    name: certbot
-
-- name: get all server_name values
-  when: letsencrypt_domains == []
-  changed_when: false
-  register: letsencryt_nginx_output
-  shell:
-    executable: /bin/bash
-    cmd: >
-      set -o pipefail;
-      nginx -T 2>&1 | grep -v localhost | grep -P '^\s+server_name\s+.*;$' | sed -r 's/\s+server_name\s+(.*);/\1/' | uniq
-
-- name: save result as list
-  when: letsencrypt_domains == []
-  set_fact:
-    letsencrypt_domains: "{{ letsencryt_nginx_output.stdout.split() }}"
-
-- name: save domains list in a file
-  register: letsencrypt_save_list
-  copy:
-    dest: /etc/letsencrypt/domains.txt
-    content: |
-      {% for domain in letsencrypt_domains %}
-      {{ domain }}
-      {% endfor %}
-
-- name: create webroot directory
-  file:
-    path: "{{ letsencrypt_webroot }}"
-    state: directory
-
-- name: create pre hook directory
-  file:
-    path: /etc/letsencrypt/renewal-hooks/pre
-    state: directory
-
-- name: create pre hook script
-  copy:
-    dest: /etc/letsencrypt/renewal-hooks/pre/mkdir
-    mode: 0755
-    content: |
-      #!/usr/bin/env bash
-      CERTBOT_DOCROOT=/var/www/letsencrypt
-      mkdir -p "$CERTBOT_DOCROOT"
-      chmod 755 "$CERTBOT_DOCROOT"
-
-- name: create deploy hook directory
-  file:
-    path: /etc/letsencrypt/renewal-hooks/deploy
-    state: directory
-
-- name: create deploy hook script
-  copy:
-    dest: /etc/letsencrypt/renewal-hooks/deploy/nginx
-    mode: 0755
-    content: |
-      #!/usr/bin/env bash
-      nginx -t > /dev/null 2>&1
-      systemctl reload nginx
-
-- name: test generate certificates
-  when:
-    - letsencrypt_domains != []
-    - letsencrypt_save_list is changed
-  register: letsencrypt_dry_run
-  ignore_errors: true
-  command:
-    cmd: >
-      certbot certonly
-        --dry-run
-        -n --agree-tos -m {{ letsencrypt_email }}
-        --webroot -w {{ letsencrypt_webroot }}
-        --expand
-        -d {{ letsencrypt_domains | join(',') }}
-
-- name: remove domains list file in case of failure
-  when: letsencrypt_dry_run is failed
-  file:
-    path: "{{ letsencrypt_save_list.dest }}"
-    state: absent
-
-- name: exit in case of failure
-  when: letsencrypt_dry_run is failed
-  fail:
-
-- name: generate certificates
-  notify: restart nginx
-  when:
-    - letsencrypt_domains != []
-    - letsencrypt_save_list is changed
-    - letsencrypt_dry_run is succeeded
-  command:
-    cmd: >
-      certbot certonly
-        {% if letsencrypt_testing %}--staging{% endif %}
-        -n --agree-tos -m {{ letsencrypt_email }}
-        --webroot -w {{ letsencrypt_webroot }}
-        --expand
-        -d {{ letsencrypt_domains | join(',') }}
-
-- name: update nginx certificate configuration
-  when:
-    - letsencrypt_domains != []
-    - letsencrypt_save_list is changed
-    - letsencrypt_dry_run is succeeded
-  notify: restart nginx
-  lineinfile:
-    path: /etc/nginx/conf.d/ssl_certificate.conf
-    regexp: 'ssl_certificate\s+([\w/\-\_\.]+);'
-    line: 'ssl_certificate /etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/fullchain.pem;'
-
-- name: update nginx certificate key configuration
-  when:
-    - letsencrypt_domains != []
-    - letsencrypt_save_list is changed
-    - letsencrypt_dry_run is succeeded
-  notify: restart nginx
-  lineinfile:
-    path: /etc/nginx/conf.d/ssl_certificate.conf
-    regexp: 'ssl_certificate_key\s+([\w/\-\_\.]+);'
-    line: 'ssl_certificate_key /etc/letsencrypt/live/{{ letsencrypt_domains[0] }}/privkey.pem;'
-
-...
diff --git a/ansible/roles/live/defaults/main.yml b/ansible/roles/live/defaults/main.yml
deleted file mode 100644
index 1b95f19b3e46793dc0272980bd1238a1e8ce811a..0000000000000000000000000000000000000000
--- a/ansible/roles/live/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-debian_packages:
-  - ubicast-live
-
-role_tmpfs_size: "{{ tmpfs_size | d('2048m') }}"
-
-...
diff --git a/ansible/roles/live/handlers/main.yml b/ansible/roles/live/handlers/main.yml
deleted file mode 100644
index b7774856aa335af9eb5885e0efcd4e2093c9e167..0000000000000000000000000000000000000000
--- a/ansible/roles/live/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart nginx
-  systemd:
-    name: nginx
-    state: restarted
-
-...
diff --git a/ansible/roles/live/tasks/main.yml b/ansible/roles/live/tasks/main.yml
deleted file mode 100644
index 1f3c1a6f578e94ee717a1101eb91f28b4883ddfb..0000000000000000000000000000000000000000
--- a/ansible/roles/live/tasks/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-
-- name: Live packages installation
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ debian_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: TMPFS creation to store the live chunks
-  notify: restart nginx
-  mount:
-    path: /var/tmp/nginx-rtmp
-    src: 'tmpfs'
-    fstype: tmpfs
-    opts: 'defaults,size={{ role_tmpfs_size }}'
-    state: mounted
-
-- name: Changing the rights on the TMPFS directory
-  notify: restart nginx
-  file:
-    path: /var/tmp/nginx-rtmp
-    owner: nginx
-    group: root
-    mode: '0700'
-
-...
diff --git a/ansible/roles/mediacache/defaults/main.yml b/ansible/roles/mediacache/defaults/main.yml
deleted file mode 100644
index fce7af4ed375d7f5c5be8055f06f4f5149e731d9..0000000000000000000000000000000000000000
--- a/ansible/roles/mediacache/defaults/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-
-# MediaCache data folder - for VOD
-role_mc_vod_folder: "{{ mediacache_vod_folder | d('/var/cache/nginx/mediacache-vod') }}"
-# MediaCache size in Gb - for VOD
-role_mc_vod_size: "{{ mediacache_vod_size }}"
-
-# MediaCache data folder - for live
-role_mc_live_folder: "{{ mediacache_live_folder | d('/var/cache/nginx/mediacache-live') }}"
-# MediaCache size in Gb - for live
-role_mc_live_size: "{{ mediacache_live_size | d('1') }}"
-
-...
diff --git a/ansible/roles/mediacache/handlers/main.yml b/ansible/roles/mediacache/handlers/main.yml
deleted file mode 100644
index b7774856aa335af9eb5885e0efcd4e2093c9e167..0000000000000000000000000000000000000000
--- a/ansible/roles/mediacache/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart nginx
-  systemd:
-    name: nginx
-    state: restarted
-
-...
diff --git a/ansible/roles/mediacache/meta/main.yml b/ansible/roles/mediacache/meta/main.yml
deleted file mode 100644
index 9c7711bb55dd2f0f0059aad32d2ac8d0f2050997..0000000000000000000000000000000000000000
--- a/ansible/roles/mediacache/meta/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-dependencies:
-  - role: base
-  - role: nginx
-
-...
diff --git a/ansible/roles/mediacache/tasks/main.yml b/ansible/roles/mediacache/tasks/main.yml
deleted file mode 100644
index fc09d6b6b9b6b1ec5630569d2180bb1c5f6803b5..0000000000000000000000000000000000000000
--- a/ansible/roles/mediacache/tasks/main.yml
+++ /dev/null
@@ -1,85 +0,0 @@
----
-
-- name: MediaCache packages installation
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ debian_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: resolve domain name to localhost
-  lineinfile:
-    path: /etc/hosts
-    line: '127.0.1.1 {{ mediacache_url }}'
-    backup: true
-
-- name: create mediacache VOD data directory
-  file:
-    dest: '{{ role_mc_vod_folder }}'
-    state: directory
-    owner: nginx
-    group: root
-    mode: '0700'
-
-- name: create mediacache live data directory
-  file:
-    dest: '{{ role_mc_live_folder }}'
-    state: directory
-    owner: nginx
-    group: root
-    mode: '0700'
-  when: live_url is defined
-
-- name: fill the vhost file
-  notify: restart nginx
-  replace:
-    path: /etc/nginx/sites-available/mediacache.conf
-    regexp: '^(\s+server_name)\s+.*(;)$'
-    replace: '\1 {{ mediacache_url }}\2'
-
-- name: fill the mediacache zones file - VOD folder
-  notify: restart nginx
-  replace:
-    path: /etc/mediacache/nginx-zones.conf
-    regexp: '/var/cache/nginx/mediacache-vod'
-    replace: '{{ role_mc_vod_folder }}'
-
-- name: fill the mediacache zones file - Live folder
-  notify: restart nginx
-  replace:
-    path: /etc/mediacache/nginx-zones.conf
-    regexp: '/var/cache/nginx/mediacache-live'
-    replace: '{{ role_mc_live_folder }}'
-
-- name: fill the mediacache zones file - VOD folder size
-  notify: restart nginx
-  replace:
-    path: /etc/mediacache/nginx-zones.conf
-    regexp: '(?P<key>keys_zone=mediacache-vod.*max_size=).*(?P<unit>g)'
-    replace: '\g<key>{{ role_mc_vod_size }}\g<unit>'
-
-- name: fill the mediacache zones file - Live folder size
-  notify: restart nginx
-  replace:
-    path: /etc/mediacache/nginx-zones.conf
-    regexp: '(?P<key>keys_zone=mediacache-live.*max_size=).*(?P<unit>g)'
-    replace: '\g<key>{{ role_mc_live_size }}\g<unit>'
-
-- name: fill the nginx VOD proxypass
-  notify: restart nginx
-  replace:
-    path: /etc/mediacache/nginx-proxy-mediaserver.conf
-    regexp: '^(proxy_pass)\s+.*(;)$'
-    replace: '\1 https://{{ ms_url }}\2'
-
-- name: fill the nginx Live proxypass
-  notify: restart nginx
-  replace:
-    path: /etc/mediacache/nginx-proxy-live.conf
-    regexp: '^(proxy_pass)\s+.*(;)$'
-    replace: '\1 https://{{ live_url }}\2'
-
-...
diff --git a/ansible/roles/mediacache/templates/index.j2 b/ansible/roles/mediacache/templates/index.j2
deleted file mode 100644
index b2ade6d4e339666402838b7e8f4181cbb2c4c23a..0000000000000000000000000000000000000000
--- a/ansible/roles/mediacache/templates/index.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-<!DOCTYPE html>
-<html xmlns="http://www.w3.org/1999/xhtml">
-    <head>
-        <title>UbiCast cache server</title>
-        <style>
-            html { background: #222; color: #ddd; }
-            body { margin: 0 auto; max-width: 500px; }
-            a { color: #5cf; text-decoration: none; }
-            a:hover { text-decoration: underline; }
-        </style>
-    </head>
-    <body>
-        <h1>UbiCast cache server</h1>
-        <hr/>
-        <p>Powered by UbiCast -- <a href="https://www.ubicast.eu">https://www.ubicast.eu</a></p>
-    </body>
-</html>
diff --git a/ansible/roles/mediacache/templates/mediacache.j2 b/ansible/roles/mediacache/templates/mediacache.j2
deleted file mode 100644
index 3b8795834921d87a276e08d48118732d86a05c6d..0000000000000000000000000000000000000000
--- a/ansible/roles/mediacache/templates/mediacache.j2
+++ /dev/null
@@ -1,96 +0,0 @@
-# Cache configuration
-#  Maximum retention of 30 days
-#  1G of RAM for keys zone (one megabyte can store about 8 thousand keys)
-#  wost case is up to 2800 hours of 720p video (50M files) on a 2 TB cache
-#  WARNING: keys_zone=name:size size does not accept g units, only m
-#  https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path
-proxy_cache_path {{ mediacache_folder }} levels=1:2 keys_zone=mediacache:1000m max_size={{ mediacache_size }}g inactive=30d;
-
-server {
-    listen 80 default_server;
-
-    server_name {{ mc_url }};
-
-    location /.well-known/acme-challenge {
-        default_type "text/plain";
-        root /var/www/letsencrypt;
-    }
-
-    location / {
-        return 301 https://$host$request_uri;
-    }
-
-}
-
-server {
-    listen 443 default_server ssl backlog=15000;
-
-    server_name {{ mc_url }};
-    root /var/www/mediacache/;
-
-    location /crossdomain {
-
-    }
-
-    location /streaming/ {
-        # Live; expiration headers are defined by upstream (nginx/wowza)
-        rewrite ^/(.*)$ /$1? break;
-        proxy_pass https://{{ ms_url }};
-        proxy_cache mediacache;
-
-        # do not consider secure urls as new files
-        proxy_cache_key $scheme$proxy_host$uri;
-
-        # only one request at a time will be allowed to populate a new cache element
-        proxy_cache_lock on;
-
-        # hide upstream X-Cache header
-        proxy_hide_header X-Cache;
-
-        # add own X-Cache header
-        add_header X-Cache $upstream_cache_status;
-
-        # rm cookie
-        proxy_hide_header       Set-Cookie;
-        proxy_ignore_headers    Set-Cookie;
-
-    }
-
-    location /resources/ {
-        # VOD
-        location ~ \.(m3u8|ts|mp4|mp3|oga|ogv|ogg|mov|flv)$ {
-            rewrite ^/(.*)$ /$1? break;
-            proxy_pass https://{{ ms_url }};
-            proxy_cache mediacache;
-
-            # do not consider secure urls as new files
-            proxy_cache_key $scheme$proxy_host$uri;
-
-            # only one request at a time will be allowed to populate a new cache element
-            proxy_cache_lock on;
-
-            # how long should the data be kept in the cache
-            proxy_cache_valid 200 30d;
-
-            # instruct browser to cache this
-            expires 7d;
-
-            # headers
-            proxy_ignore_headers "Cache-Control" "X-Accel-Expires" "Expires";
-            add_header X-Cache $upstream_cache_status;
-
-            # rm cookie
-            proxy_hide_header       Set-Cookie;
-            proxy_ignore_headers    Set-Cookie;
-
-        }
-
-    }
-
-    location / {
-        # only urls to video and audio files are allowed, discard any requested path for other urls
-        rewrite ^/(.*)$ /index.html? break;
-
-    }
-
-}
diff --git a/ansible/roles/mediacache/vars/main.yml b/ansible/roles/mediacache/vars/main.yml
deleted file mode 100644
index dc62c20bdd01e84c20e66b16efb00a0767671957..0000000000000000000000000000000000000000
--- a/ansible/roles/mediacache/vars/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-debian_packages:
-  - ubicast-mediacache
-
-...
diff --git a/ansible/roles/mediaimport/defaults/main.yml b/ansible/roles/mediaimport/defaults/main.yml
deleted file mode 100644
index ee7c14b755c8520fb96c6092f07304c11af9e15f..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaimport/defaults/main.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-
-mediaimport_users:
-  - name: "{{ envsetup_mediaimport_user | d() }}"
-    passwd: "{{ envsetup_mediaimport_password | d() }}"
-
-mediaimport_packages:
-  - clamav
-  - mysecureshell
-  - openssh-server
-  - openssl
-  - pure-ftpd
-  - python3-unidecode
-  - ubicast-mediaimport
-  # required by ansible tasks
-  - python3-openssl
-
-mediaimport_pureftpd_config:
-  - key: AllowDotFiles
-    value: "no"
-  - key: CallUploadScript
-    value: "yes"
-  - key: ChrootEveryone
-    value: "yes"
-  - key: DontResolve
-    value: "yes"
-  - key: PAMAuthentication
-    value: "yes"
-  - key: TLS
-    value: "1"
-
-mediaimport_virus_scan_on_upload: false
-
-mediaimport_ms_api_key: "{{ envsetup_ms_api_key | d() }}"
-mediaimport_ms_server_name: "{{ envsetup_ms_server_name | d() }}"
-
-mediaimport_fail2ban_enabled: "{{ envsetup_fail2ban_enabled | d(true) }}"
-
-mediaimport_firewall_enabled: true
-mediaimport_ferm_rules_filename: import
-mediaimport_ferm_input_rules:
-  - proto:
-      - tcp
-    dport:
-      - 21
-      - 22
-  - mod: helper
-    helper: ftp
-mediaimport_ferm_output_rules: []
-mediaimport_ferm_global_settings:
-
-...
diff --git a/ansible/roles/mediaimport/files/mediaimport b/ansible/roles/mediaimport/files/mediaimport
deleted file mode 100644
index 3294539e8c7c441ecca84e81478fdbbc02e7e7da..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaimport/files/mediaimport
+++ /dev/null
@@ -1,6 +0,0 @@
-# purge mediaimport files that are older than 60 days
-0 23 * * * root /usr/bin/find /home/ftp/storage/incoming/ -type f -mtime +60 -delete
-
-# purge empty folders
-0 23 * * * root /usr/bin/find /home/ftp/storage -type d -empty -name thumbnails -delete
-0 23 * * * root /usr/bin/find /home/ftp/storage -type d -empty -name "*20*-*" -delete
diff --git a/ansible/roles/mediaimport/files/mediaimport.py b/ansible/roles/mediaimport/files/mediaimport.py
deleted file mode 100644
index 0cb2b47a7db80ddc10a4f9b40c7ee0eab35cc445..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaimport/files/mediaimport.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python3
-
-import argparse
-import crypt
-import shutil
-import subprocess
-
-BASE_DIR = "/home/ftp/storage"
-INCOMING_DIR = BASE_DIR + "/incoming"
-WATCH_DIR = BASE_DIR + "/watchfolder"
-
-
-def main():
-    commands = MediaImport()
-
-    parser = argparse.ArgumentParser(prog="mediaimport", description=commands.__doc__)
-    subparsers = parser.add_subparsers(title="available commands", dest="command")
-    subparsers.required = True
-
-    # add command and arguments
-    parser_add = subparsers.add_parser("add", help=commands.add_user.__doc__)
-    parser_add.add_argument(
-        "-u",
-        "--user",
-        help="username",
-        action="store",
-        type=commands._new_user,
-        required=True,
-    )
-    parser_add.add_argument(
-        "-p", "--passwd", help="password", action="store", type=str, required=True
-    )
-    parser_add.add_argument(
-        "-y", "--yes", action="store_true", help="do not prompt for confirmation"
-    )
-    parser_add.set_defaults(func=commands.add_user)
-
-    # delete command and arguments
-    parser_del = subparsers.add_parser("delete", help=commands.del_user.__doc__)
-    parser_del.add_argument(
-        "-u",
-        "--user",
-        help="username",
-        action="store",
-        type=commands._user,
-        required=True,
-    )
-    parser_del.add_argument(
-        "-y", "--yes", action="store_true", help="do not prompt for confirmation"
-    )
-    parser_del.set_defaults(func=commands.del_user)
-
-    # list command and arguments
-    parser_list = subparsers.add_parser("list", help=commands.list_users.__doc__)
-    parser_list.set_defaults(func=commands.list_users)
-
-    # parse and run
-    args = parser.parse_args()
-    args.func(args)
-
-
-class MediaImport:
-    """Manage mediaimport users."""
-
-    def __init__(self):
-        self.users = self._get_users()
-
-    def _get_users(self) -> list:
-        """Get mysecureshell users list."""
-
-        with open("/etc/passwd") as fh:
-            passwd = fh.readlines()
-
-        return sorted(
-            [
-                u.split(":")[0]
-                for u in passwd
-                if u.split(":")[-1].strip() == "/usr/bin/mysecureshell"
-            ]
-        )
-
-    def _confirm(self, message: str = None):
-        """Ask for confirmation."""
-
-        if message:
-            print(message)
-        choice = input("Do you want to continue [y/N]? ").lower()
-
-        if choice not in ["y", "yes"]:
-            print("Exit.")
-            exit(0)
-
-    def _new_user(self, value: str) -> str:
-        """Check that username does not exist."""
-
-        if value in self.users:
-            raise argparse.ArgumentTypeError(f"{value} already exists")
-
-        return value
-
-    def _user(self, value: str) -> str:
-        """Check that username exists."""
-
-        if value not in self.users:
-            raise argparse.ArgumentTypeError(f"{value} does not exists")
-
-        return value
-
-    def add_user(self, args: argparse.Namespace):
-        """add an user"""
-
-        username = args.user
-        password = args.passwd
-
-        if not args.yes:
-            self._confirm(f"MediaImport user '{username}' will be created.")
-
-        # create user
-        subprocess.Popen(
-            [
-                "useradd",
-                "-b",
-                INCOMING_DIR,
-                "-m",
-                "-p",
-                crypt.crypt(password),
-                "-s",
-                "/usr/bin/mysecureshell",
-                "-U",
-                username,
-            ],
-            stdout=subprocess.DEVNULL,
-        )
-        print(f"User {username} created, adjust /etc/mediaserver/mediaimport.json and restart the service:\nsystemctl restart mediaimport")
-
-    def del_user(self, args: argparse.Namespace):
-        """delete an user"""
-
-        username = args.user
-        paths = [f"{INCOMING_DIR}/{username}", f"{WATCH_DIR}/{username}"]
-
-        if not args.yes:
-            self._confirm(f"MediaImport user '{username}' data will be deleted.")
-
-        # remove user
-        subprocess.Popen(
-            ["userdel", "-f", "-r", username],
-            stdout=subprocess.DEVNULL,
-            stderr=subprocess.DEVNULL,
-        )
-
-        # remove user's folders
-        for path in paths:
-            shutil.rmtree(path, ignore_errors=True)
-        print(f"User {username} deleted, adjust /etc/mediaserver/mediaimport.json and restart the service:\nsystemctl restart mediaimport")
-
-    def list_users(self, args: argparse.Namespace):
-        """list existing users"""
-
-        if len(self.users):
-            print("\n".join(self.users))
-
-
-if __name__ == "__main__":
-    main()
diff --git a/ansible/roles/mediaimport/files/on-upload b/ansible/roles/mediaimport/files/on-upload
deleted file mode 100755
index 770286dd32b194ba8c3e4c538fec4faced985db2..0000000000000000000000000000000000000000
Binary files a/ansible/roles/mediaimport/files/on-upload and /dev/null differ
diff --git a/ansible/roles/mediaimport/files/on-upload.go b/ansible/roles/mediaimport/files/on-upload.go
deleted file mode 100644
index b4d27ecf5724da51c3af4c2fe1dc470661ebe518..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaimport/files/on-upload.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package main
-
-import (
-	"log"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"strings"
-	"unicode"
-
-	"github.com/jessevdk/go-flags"
-	"golang.org/x/text/transform"
-	"golang.org/x/text/unicode/norm"
-)
-
-const (
-	baseDir       = "/home/ftp/storage"
-	incomingDir   = baseDir + "/incoming"
-	watchDir      = baseDir + "/watchfolder"
-	quarantineDir = baseDir + "/quarantine"
-)
-
-func setPermissions(path string) error {
-	stat, err := os.Stat(path)
-	if err != nil {
-		return err
-	}
-
-	switch mode := stat.Mode(); {
-	case mode.IsDir():
-		if err := os.Chmod(path, 0755); err != nil {
-			return err
-		}
-	case mode.IsRegular():
-		if err := os.Chmod(path, 0644); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func cleanName(filename string) string {
-	// normalize
-	isMn := func(r rune) bool {
-		return unicode.Is(unicode.Mn, r)
-	}
-	t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
-	cleanedName, _, _ := transform.String(t, filename)
-
-	// replace non allowed characters
-	allowedChars := strings.Split("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-.", "")
-	for _, filenameChar := range strings.Split(cleanedName, "") {
-		flagged := false
-		for _, allowedChar := range allowedChars {
-			if filenameChar == allowedChar {
-				flagged = true
-			}
-		}
-		// if not in allowed list replace by underscore
-		if !flagged {
-			cleanedName = strings.Replace(cleanedName, filenameChar, "_", 1)
-		}
-	}
-
-	return cleanedName
-}
-
-func virusScan(path string) error {
-	// will move file into quarantine directory if infected
-	cmd := exec.Command(
-		"/usr/bin/clamscan",
-		"--quiet",
-		"--infected",
-		"--recursive",
-		"--move="+quarantineDir,
-		"--max-scantime=600000", // 10 minutes
-		"--max-filesize=4000M",
-		"--max-scansize=4000M",
-		"--max-files=200",
-		"--max-recursion=6",
-		"--max-dir-recursion=6",
-		path,
-	)
-	err := cmd.Run()
-
-	return err
-}
-
-func main() {
-	var opts struct {
-		Scan bool `short:"s" long:"scan-virus" description:"Scan file for virus"`
-		Args struct {
-			SrcPaths []string `positional-arg-name:"path" required:"yes" description:"Paths of uploaded files"`
-		} `positional-args:"yes"`
-	}
-
-	if _, err := flags.Parse(&opts); err != nil {
-		os.Exit(1)
-	}
-
-	for _, srcPath := range opts.Args.SrcPaths {
-		// check that file is into incoming folder
-		if !strings.HasPrefix(srcPath, baseDir) {
-			log.Fatalln("file not in base dir (" + baseDir + "): " + srcPath)
-		}
-
-		// ensure permissions are correct
-		if err := setPermissions(srcPath); err != nil {
-			log.Fatalln(err)
-		}
-
-		// scan for virus if enabled
-		if opts.Scan {
-			if err := os.MkdirAll(quarantineDir, 0775); err != nil {
-				log.Fatalln(err)
-			}
-			if err := virusScan(srcPath); err != nil {
-				log.Fatalln(err)
-			}
-		}
-
-		// cleanup and set destination path
-		srcDir, srcFile := filepath.Split(srcPath)
-		dstFile := cleanName(srcFile)
-		dstDir := strings.ReplaceAll(srcDir, incomingDir, watchDir)
-		dstPath := dstDir + dstFile
-
-		// create destination directory
-		if err := os.MkdirAll(dstDir, 0775); err != nil {
-			log.Fatalln(err)
-		}
-
-		// move file into watchfolder
-		if err := os.Rename(srcPath, dstPath); err != nil {
-			log.Fatalln(err)
-		}
-
-		log.Println(srcPath + " moved to " + dstPath)
-	}
-}
diff --git a/ansible/roles/mediaimport/handlers/main.yml b/ansible/roles/mediaimport/handlers/main.yml
deleted file mode 100644
index f432847852fa10ae85f5385c7087d51337ec5b7b..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaimport/handlers/main.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-
-- name: reload systemd
-  systemd:
-    daemon_reload: true
-
-- name: restart pure-ftpd
-  systemd:
-    name: pure-ftpd
-    state: restarted
-
-- name: restart mysecureshell
-  systemd:
-    name: mysecureshell
-    state: restarted
-
-- name: restart mediaimport
-  systemd:
-    name: mediaimport
-    state: restarted
-
-- name: restart fail2ban
-  systemd:
-    name: fail2ban
-    state: restarted
-
-- name: sftp-verif
-  command:
-    cmd: timeout 30 sftp-verif
-
-...
diff --git a/ansible/roles/mediaimport/meta/main.yml b/ansible/roles/mediaimport/meta/main.yml
deleted file mode 100644
index e45d692ae3567f856967cd6f66c91d13e2e94e4e..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaimport/meta/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-dependencies:
-  - role: base
-
-...
diff --git a/ansible/roles/mediaimport/tasks/main.yml b/ansible/roles/mediaimport/tasks/main.yml
deleted file mode 100644
index 9ba9adb2a77e259b30bce0641dfdafcbfce28afc..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaimport/tasks/main.yml
+++ /dev/null
@@ -1,166 +0,0 @@
----
-
-- name: install packages
-  package:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ mediaimport_packages }}"
-
-## USERS
-
-- name: create ftp folders
-  loop:
-    - /home/ftp/storage/incoming
-    - /home/ftp/storage/watchfolder
-  file:
-    path: "{{ item }}"
-    state: directory
-
-- name: deploy users management script
-  copy:
-    src: files/mediaimport.py
-    dest: /usr/local/bin/mediaimport
-    mode: 0755
-
-- name: create users
-  loop: "{{ mediaimport_users }}"
-  when:
-    - item.name | d(false)
-    - item.passwd | d(false)
-  no_log: true
-  command: mediaimport add --yes --user {{ item.name }} --passwd {{ item.passwd }}
-  args:
-    creates: /home/ftp/storage/incoming/{{ item.name }}
-
-- name: deploy on-upload script with setuid
-  copy:
-    src: files/on-upload
-    dest: /home/ftp/on-upload
-    mode: 04755
-
-## MYSECURESHELL
-
-- name: set the setuid on mysecureshell
-  file:
-    path: /usr/bin/mysecureshell
-    mode: 04755
-
-- name: configure mysecureshell
-  notify:
-    - restart mysecureshell
-    - sftp-verif
-  template:
-    src: sftp_config.j2
-    dest: /etc/ssh/sftp_config
-
-## PURE-FTPD
-
-- name: set pure-ftpd default config
-  notify: restart pure-ftpd
-  copy:
-    dest: /etc/default/pure-ftpd-common
-    content: |
-      STANDALONE_OR_INETD=standalone
-      VIRTUALCHROOT=false
-      UPLOADSCRIPT="/home/ftp/on-upload{% if mediaimport_virus_scan_on_upload %} --scan-virus{% endif %}"
-      UPLOADUID=0
-      UPLOADGID=0
-
-- name: configure pure-ftpd
-  notify: restart pure-ftpd
-  loop: "{{ mediaimport_pureftpd_config }}"
-  copy:
-    dest: /etc/pure-ftpd/conf/{{ item.key }}
-    content: "{{ item.value }}"
-
-## PURE-FTPD CERTIFICATES
-
-- name: create certificate directory
-  file:
-    path: /etc/ssl/{{ ansible_fqdn }}
-    state: directory
-
-- name: generate an private key
-  register: mediaimport_privkey
-  openssl_privatekey:
-    path: /etc/ssl/{{ ansible_fqdn }}/key.pem
-
-- name: generate an csr
-  when: mediaimport_privkey is changed
-  register: mediaimport_csr
-  openssl_csr:
-    path: /etc/ssl/{{ ansible_fqdn }}/csr.pem
-    privatekey_path: /etc/ssl/{{ ansible_fqdn }}/key.pem
-    common_name: "{{ ansible_fqdn }}"
-
-- name: generate a self-signed certificate
-  when: mediaimport_csr is changed
-  register: mediaimport_cert
-  openssl_certificate:
-    path: /etc/ssl/{{ ansible_fqdn }}/cert.pem
-    privatekey_path: /etc/ssl/{{ ansible_fqdn }}/key.pem
-    csr_path: /etc/ssl/{{ ansible_fqdn }}/csr.pem
-    provider: selfsigned
-
-- name: concatenate key and certificate
-  when: mediaimport_cert is changed
-  notify: restart pure-ftpd
-  shell: >
-    cat /etc/ssl/{{ ansible_fqdn }}/key.pem /etc/ssl/{{ ansible_fqdn }}/cert.pem > /etc/ssl/private/pure-ftpd.pem;
-    chmod 600 /etc/ssl/private/pure-ftpd.pem;
-
-- name: generate dhparams
-  notify: restart pure-ftpd
-  openssl_dhparam:
-    path: /etc/ssl/private/pure-ftpd-dhparams.pem
-    size: 1024
-
-## MEDIAIMPORT
-
-- name: setup cron job
-  copy:
-    src: files/mediaimport
-    dest: /etc/cron.d/mediaimport
-
-- name: configure mediaimport
-  when:
-    - mediaimport_ms_api_key | d(false)
-    - mediaimport_ms_server_name | d(false)
-  notify: restart mediaimport
-  template:
-    src: mediaimport.json.j2
-    dest: /etc/mediaserver/mediaimport.json
-    backup: true
-    mode: 0640
-
-- name: enable mediaimport service
-  systemd:
-    name: mediaimport
-    enabled: true
-
-# FAIL2BAN
-
-- name: deploy fail2ban jail
-  notify: restart fail2ban
-  template:
-    src: fail2ban_ftpd.conf.j2
-    dest: /etc/fail2ban/jail.d/pure-ftpd.conf
-    mode: 0644
-
-- meta: flush_handlers
-
-# FIREWALL
-
-- name: firewall
-  when: mediaimport_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ mediaimport_ferm_rules_filename }}"
-    ferm_input_rules: "{{ mediaimport_ferm_input_rules }}"
-    ferm_output_rules: "{{ mediaimport_ferm_output_rules }}"
-    ferm_global_settings: "{{ mediaimport_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-- meta: flush_handlers
-
-...
diff --git a/ansible/roles/mediaimport/templates/fail2ban_ftpd.conf.j2 b/ansible/roles/mediaimport/templates/fail2ban_ftpd.conf.j2
deleted file mode 100644
index e8f463cc2eaa8462e626536cf1a298a6239b0b83..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaimport/templates/fail2ban_ftpd.conf.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-[pure-ftpd]
-enabled = {% if mediaimport_fail2ban_enabled | bool %}true{% else %}false{% endif %}
diff --git a/ansible/roles/mediaimport/templates/mediaimport.json.j2 b/ansible/roles/mediaimport/templates/mediaimport.json.j2
deleted file mode 100644
index aa2ac42d07bc20a0b8cc0d2fb5bb7eb27b31632b..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaimport/templates/mediaimport.json.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-  "email_to": "support-team@ubicast.eu",
-  "users": [{% for user in mediaimport_users %}
-    {
-      "enabled": true,
-      "mediaserver_api_key": "{{ mediaimport_ms_api_key }}",
-      "mediaserver_url": "https://{{ mediaimport_ms_server_name }}",
-      "folders": [
-        {
-          "path": "/home/ftp/storage/watchfolder/{{ user.name }}"
-        }
-      ]
-    }{% if not loop.last %},{% endif %}
-  {% endfor %}]
-}
diff --git a/ansible/roles/mediaimport/templates/sftp_config.j2 b/ansible/roles/mediaimport/templates/sftp_config.j2
deleted file mode 100644
index b33a786d8fd2b3029536244801a3a1c076859b2d..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaimport/templates/sftp_config.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-## MySecureShell Configuration File
-# To get more informations on all possible options, please look at the doc:
-# http://mysecureshell.readthedocs.org
-
-#Default rules for everybody
-<Default>
-        GlobalDownload          50k
-        GlobalUpload            0
-        Download                5k
-        Upload                  0
-        StayAtHome              true
-        VirtualChroot           true
-        LimitConnection         100
-        LimitConnectionByUser   2
-        LimitConnectionByIP     10
-        Home                    /home/ftp/storage/incoming/$USER
-        CallbackUpload          "/home/ftp/on-upload{% if mediaimport_virus_scan_on_upload %} --scan-virus{% endif %} /home/ftp/storage/incoming/$USER$LAST_FILE_PATH"
-        IdleTimeOut             5m
-        ResolveIP               false
-        HideNoAccess            true
-        DefaultRights           0640 0750
-        ShowLinksAsLinks        false
-        LogFile                 /var/log/sftp-server.log
-        LogLevel                6
-        LogSyslog               true
-</Default>
diff --git a/ansible/roles/mediaserver/defaults/main.yml b/ansible/roles/mediaserver/defaults/main.yml
deleted file mode 100644
index cb06469b4dcc0e23125f65e7c3df8b9842cf6dd9..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaserver/defaults/main.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-
-server_packages:
-  - postgresql-client
-  - cron
-  - memcached
-  - nginx
-  - postfix
-  - celerity-utils
-  - ubicast-mediaserver
-
-server_default_email_sender: "noreply@{{ server_hostname }}"
-server_email_sender: "{{ envsetup_email_sender | default(server_default_email_sender, true) }}"
-
-server_id: "{{ envsetup_ms_id }}"
-server_instance_name: "{{ server_id.split('_')[-1] }}"
-server_hostname: "{{ envsetup_ms_server_name }}"
-server_campusmanager: "{{ envsetup_cm_server_name | d('mirismanager.' + server_hostname) }}"
-server_api_key: "{{ envsetup_ms_api_key }}"
-server_superuser_passwd: "{{ envsetup_ms_superuser_pwd }}"
-server_admin_passwd: "{{ envsetup_ms_admin_pwd }}"
-server_instances:
-  - name: "{{ server_instance_name }}"
-    ms_server_name: "{{ server_hostname }}"
-    ms_id: "{{ server_id }}"
-    ms_api_key: "{{ server_api_key }}"
-    cm_server_name: "{{ server_campusmanager }}"
-    ms_superuser_pwd: "{{ server_superuser_passwd }}"
-    ms_admin_pwd: "{{ server_admin_passwd }}"
-
-server_celerity_signing_key: "{{ envsetup_celerity_signing_key }}"
-
-server_live_host: "{{ envsetup_live_host | d('') }}"
-
-server_firewall_enabled: true
-server_ferm_rules_filename: server
-server_ferm_input_rules:
-  - proto:
-      - tcp
-    dport:
-      - 80
-      - 443
-server_ferm_output_rules: []
-server_ferm_global_settings:
-
-real_ip_from: ""  # default for OVH is 10.108.0.0/14
-
-...
diff --git a/ansible/roles/mediaserver/handlers/main.yml b/ansible/roles/mediaserver/handlers/main.yml
deleted file mode 100644
index 8a9540afe0f79214c2124c72df626098da1fdc8e..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaserver/handlers/main.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-
-- name: mscontroller restart
-  command:
-    cmd: mscontroller.py restart
-
-- name: restart nginx
-  systemd:
-    name: nginx
-    state: restarted
-
-- name: restart mediaserver
-  systemd:
-    name: mediaserver
-    state: restarted
-
-- name: restart systemd-sysusers
-  systemd:
-    name: systemd-sysusers
-    state: restarted
-
-...
diff --git a/ansible/roles/mediaserver/meta/main.yml b/ansible/roles/mediaserver/meta/main.yml
deleted file mode 100644
index af619600602f0b632ce9c29b002468a34de9c01a..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaserver/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-
-dependencies:
-  - role: base
-  - role: nginx
-  - when: "'celerity' in group_names"
-    role: celerity
-  # - when: "'postgres' in group_names and groups['postgres'] | length > 1"
-  #   role: postgres-ha
-  # - when: "'postgres' in group_names and groups['postgres'] | length == 1"
-  #   role: postgres
-  - when: "groups['postgres'] | length > 1"
-    role: haproxy
-
-...
diff --git a/ansible/roles/mediaserver/tasks/main.yml b/ansible/roles/mediaserver/tasks/main.yml
deleted file mode 100644
index 72a5b2827c93e43f26e01627dffa2b243f30d02c..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaserver/tasks/main.yml
+++ /dev/null
@@ -1,212 +0,0 @@
----
-
-- name: mediaserver install
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ server_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: fetch ssh public key
-  register: root_ssh_pubkey
-  slurp:
-    path: /root/.ssh/id_ed25519.pub
-  tags: always
-
-- name: register ssh public key as an ansible fact
-  set_fact:
-    pubkey: "{{ root_ssh_pubkey['content'] | b64decode }}"
-  tags: always
-
-- name: share ssh public key between cluster members
-  loop: "{{ groups['mediaserver'] }}"
-  authorized_key:
-    user: root
-    key: "{{ hostvars[item]['pubkey'] }}"
-  tags: always
-
-- name: resolve domain name to localhost
-  when: not in_docker
-  notify: restart nginx
-  loop: "{{ server_instances }}"
-  lineinfile:
-    path: /etc/hosts
-    line: '127.0.1.1 {{ item.ms_server_name }}'
-    backup: true
-
-# - name: synchronize configuration
-#   when: groups['mediaserver'] | length > 1
-#   loop:
-#     - /etc/passwd
-#     - /etc/shadow
-#     - /etc/group
-#   synchronize:
-#     src: "{{ item }}"
-#     dest: "{{ item }}"
-#     mode: push
-#     copy_links: yes
-#     set_remote_user: no
-#   delegate_to: "{{ groups['mediaserver'][0] }}"
-#   tags: always
-
-- name: create celerity-config
-  notify: restart celerity-server
-  template:
-    src: celerity-config.py.j2
-    dest: /etc/celerity/config.py
-    mode: 0644
-    owner: root
-    group: root
-  when:
-    - inventory_hostname not in groups['celerity']
-  changed_when: "'molecule-idempotence-notest' not in ansible_skip_tags"
-
-- name: create instances
-  when: inventory_hostname == groups['mediaserver'][0]
-  loop: "{{ server_instances }}"
-  environment:
-    MS_ID: "{{ item.ms_id }}"
-    MS_SERVER_NAME: "{{ item.ms_server_name }}"
-    MS_API_KEY: "{{ item.ms_api_key }}"
-    CM_SERVER_NAME: "{{ item.cm_server_name }}"
-    MS_SUPERUSER_PWD: "{{ item.ms_superuser_pwd }}"
-    MS_ADMIN_PWD: "{{ item.ms_admin_pwd }}"
-  command:
-    cmd: msinstaller.py {{ item.name }} --no-input
-    creates: /etc/nginx/sites-available/mediaserver-{{ item.name }}.conf
-
-- name: create instances for secondary servers
-  when:
-    - groups['mediaserver'] | length > 1
-    - inventory_hostname != groups['mediaserver'][0]
-  loop: "{{ server_instances }}"
-  environment:
-    MS_ID: "{{ item.ms_id }}"
-    MS_SERVER_NAME: "{{ item.ms_server_name }}"
-    MS_API_KEY: "{{ item.ms_api_key }}"
-    CM_SERVER_NAME: "{{ item.cm_server_name }}"
-    MS_SUPERUSER_PWD: "{{ item.ms_superuser_pwd }}"
-    MS_ADMIN_PWD: "{{ item.ms_admin_pwd }}"
-  command:
-    cmd: msinstaller.py {{ item.name }} --no-input
-    creates: /etc/nginx/sites-available/mediaserver-{{ item.name }}.conf
-  throttle: 1
-
-- name: synchronize configuration between servers
-  ignore_errors: true
-  when:
-    - groups['mediaserver'] | length > 1
-    - inventory_hostname != groups['mediaserver'][0]
-  loop:
-    - /etc/mediaserver
-    - /etc/nginx
-    - /etc/celerity
-    - /etc/sysusers.d
-    - /var/www
-  synchronize:
-    src: "{{ item }}"
-    dest: "{{ item }}"
-    mode: push
-    copy_links: true
-    delete: true
-    recursive: true
-    set_remote_user: false
-    existing_only: true
-  notify:
-    - restart mediaserver
-    - restart nginx
-    - restart systemd-sysusers
-  delegate_to: "{{ groups['mediaserver'][0] }}"
-  tags: mediaserver-synchronize
-
-- name: synchronize letsencrypt configuration between servers
-  ignore_errors: true
-  when:
-    - groups['mediaserver'] | length > 1
-    - inventory_hostname != groups['mediaserver'][0]
-    - letsencrypt_enabled | d(false)
-  loop:
-    - /etc/letsencrypt
-  synchronize:
-    src: "{{ item }}"
-    dest: "{{ item }}"
-    mode: push
-    copy_links: true
-    delete: true
-    recursive: true
-    set_remote_user: false
-    existing_only: true
-  notify:
-    - restart nginx
-  delegate_to: "{{ groups['mediaserver'][0] }}"
-  tags: mediaserver-synchronize
-
-- name: configure email sender address
-  notify: mscontroller restart
-  lineinfile:
-    path: /etc/mediaserver/msconf.py
-    backup: true
-    create: true
-    regexp: '^#? ?DEFAULT_FROM_EMAIL.*'
-    line: "DEFAULT_FROM_EMAIL = '{{ server_email_sender }}'"
-    validate: python3 -m py_compile %s
-
-- name: configure domain name in nginx conf
-  notify: restart nginx
-  loop: "{{ server_instances }}"
-  replace:
-    path: /etc/nginx/sites-available/mediaserver-{{ item.name }}.conf
-    regexp: '^(\s*server_name).*;$'
-    replace: '\1 {{ item.ms_server_name }};'
-    backup: true
-
-- name: configure domain name in database
-  loop: "{{ server_instances }}"
-  shell:
-    cmd: |
-      python3 /usr/lib/python3/dist-packages/mediaserver/scripts/mssiteconfig.py {{ item.name }} site_url=https://{{ item.ms_server_name }} ;
-      mscontroller.py restart -u {{ item.name }} ;
-      touch /etc/mediaserver/.{{ item.ms_server_name }}.mssiteconfig.log ;
-    creates: /etc/mediaserver/.{{ item.ms_server_name }}.mssiteconfig.log
-
-- name: reset service resources
-  loop: "{{ server_instances }}"
-  shell:
-    cmd: |
-      python3 /usr/lib/python3/dist-packages/mediaserver/scripts/reset_service_resources.py {{ item.name }} local ;
-      mscontroller.py restart -u {{ item.name }} ;
-      touch /etc/mediaserver/.{{ item.ms_server_name }}.reset_service_resources.log ;
-    creates: /etc/mediaserver/.{{ item.ms_server_name }}.reset_service_resources.log
-
-- name: add realip configuration for LoadBalancer in HA configuration
-  notify: restart nginx
-  when:
-    - groups['mediaserver'] | length > 1
-    - real_ip_from | length > 0
-  template:
-    src: realip.conf.j2
-    dest: /etc/nginx/conf.d/realip.conf
-
-- name: ensure mediaserver is running
-  service:
-    name: mediaserver
-    enabled: true
-    state: started
-
-# FIREWALL
-
-- name: firewall
-  when: server_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ server_ferm_rules_filename }}"
-    ferm_input_rules: "{{ server_ferm_input_rules }}"
-    ferm_output_rules: "{{ server_ferm_output_rules }}"
-    ferm_global_settings: "{{ server_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-- meta: flush_handlers
-
-...
diff --git a/ansible/roles/mediaserver/templates/celerity-config.py.j2 b/ansible/roles/mediaserver/templates/celerity-config.py.j2
deleted file mode 100644
index dbc839c95719781a90c24733d000ef1cbbb8384e..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaserver/templates/celerity-config.py.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-SIGNING_KEY = '{{ server_celerity_signing_key }}'
-SERVER_URL = 'https://{{ hostvars[groups['celerity'][0]]['ansible_default_ipv4']['address'] }}:6200'
-
-# Queues count, default: min(round(thread_count/2)-1,2)
-#QUEUES_PER_WORKER = 2
-
-# MediaServer interactions
-MEDIASERVERS = {
-}
diff --git a/ansible/roles/mediaserver/templates/realip.conf.j2 b/ansible/roles/mediaserver/templates/realip.conf.j2
deleted file mode 100644
index c13da19f3cbd6d3af8a52a02887f8c8113c4629d..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaserver/templates/realip.conf.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-# {{ ansible_managed }}
-
-set_real_ip_from {{ real_ip_from }}; # IP/network of the reverse proxy
-real_ip_header X-Forwarded-For;
diff --git a/ansible/roles/mediavault/defaults/main.yml b/ansible/roles/mediavault/defaults/main.yml
deleted file mode 100644
index 3333bb11890723ef2facb82f26e3c2a657a3237d..0000000000000000000000000000000000000000
--- a/ansible/roles/mediavault/defaults/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-
-mvt_packages:
-  - ubicast-mediavault
-
-mvt_mailer_enabled: true
-mvt_mailer_script_path: /usr/local/sbin/systemd-mailer
-mvt_mailer_from: "{{ ansible_fqdn }} <backup@{{ ansible_fqdn }}>"
-mvt_mailer_to: sysadmin+backup@ubicast.eu
-mvt_mailer_service_name: status-email-admin
-mvt_mailer_service_path: /etc/systemd/system/{{ mvt_mailer_service_name }}@.service
-
-# firewall rules
-mvt_firewall_enabled: true
-mvt_ferm_rules_filename: vault
-mvt_ferm_input_rules: []
-mvt_ferm_output_rules:
-  - proto:
-      - tcp
-    dport:
-      - 22
-mvt_ferm_global_settings:
-
-...
diff --git a/ansible/roles/mediavault/meta/main.yml b/ansible/roles/mediavault/meta/main.yml
deleted file mode 100644
index e45d692ae3567f856967cd6f66c91d13e2e94e4e..0000000000000000000000000000000000000000
--- a/ansible/roles/mediavault/meta/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-dependencies:
-  - role: base
-
-...
diff --git a/ansible/roles/mediavault/tasks/mailer.yml b/ansible/roles/mediavault/tasks/mailer.yml
deleted file mode 100644
index 2cf37894176cc642d3dfd0a5d121badd66d37d15..0000000000000000000000000000000000000000
--- a/ansible/roles/mediavault/tasks/mailer.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-
-- name: create mailer script
-  when: mvt_mailer_enabled
-  template:
-    src: systemd-mailer-script.j2
-    dest: "{{ mvt_mailer_script_path }}"
-    mode: 0755
-
-- name: create mailer service
-  when: mvt_mailer_enabled
-  notify: systemd daemon reload
-  template:
-    src: systemd-mailer-service.j2
-    dest: "{{ mvt_mailer_service_path }}"
-
-...
diff --git a/ansible/roles/mediavault/tasks/main.yml b/ansible/roles/mediavault/tasks/main.yml
deleted file mode 100644
index cbeea8e8b2bbbb2b85b339ab76182320bf490f2a..0000000000000000000000000000000000000000
--- a/ansible/roles/mediavault/tasks/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-
-- name: install packages
-  package:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ mvt_packages }}"
-    state: present
-
-- name: generate ssh keys pair
-  user:
-    name: root
-    generate_ssh_key: true
-    ssh_key_type: ed25519
-    ssh_key_file: .ssh/id_ed25519
-
-# MAILER
-- include: mailer.yml
-
-# FIREWALL
-- name: firewall
-  when: mvt_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ mvt_ferm_rules_filename }}"
-    ferm_input_rules: "{{ mvt_ferm_input_rules }}"
-    ferm_output_rules: "{{ mvt_ferm_output_rules }}"
-    ferm_global_settings: "{{ mvt_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-- meta: flush_handlers
-
-...
diff --git a/ansible/roles/mediavault/templates/systemd-mailer-script.j2 b/ansible/roles/mediavault/templates/systemd-mailer-script.j2
deleted file mode 100644
index 0067074b59587abca0c222c8b1e947ccf09127d1..0000000000000000000000000000000000000000
--- a/ansible/roles/mediavault/templates/systemd-mailer-script.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-sendmail -t <<ERRMAIL
-To: $1
-From: {{ mvt_mailer_from }}
-Subject: ({{ ansible_fqdn }}) $2 status
-Content-Transfer-Encoding: 8bit
-Content-Type: text/plain; charset=UTF-8
-
-$(systemctl status --full "$2")
-ERRMAIL
diff --git a/ansible/roles/mediavault/templates/systemd-mailer-service.j2 b/ansible/roles/mediavault/templates/systemd-mailer-service.j2
deleted file mode 100644
index 209b1fe5628efbec3addb4ac85b8f3f0b01753e3..0000000000000000000000000000000000000000
--- a/ansible/roles/mediavault/templates/systemd-mailer-service.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-[Unit]
-Description=status email for %i to {{ mvt_mailer_to }}
-
-[Service]
-Type=oneshot
-ExecStart={{ mvt_mailer_script_path }} {{ mvt_mailer_to }} %i
-User=nobody
-Group=systemd-journal
diff --git a/ansible/roles/mediaworker/defaults/main.yml b/ansible/roles/mediaworker/defaults/main.yml
deleted file mode 100644
index 7abfc42f49c4cf526a6313245453384bc935092f..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaworker/defaults/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-
-worker_celerity_signing_key: "{{ envsetup_celerity_signing_key }}"
-worker_celerity_server: "{{ envsetup_celerity_server | d(envsetup_ms_server_name, true) }}"
-
-worker_ms_id: "{{ envsetup_ms_id }}"
-worker_ms_api_key: "{{ envsetup_ms_api_key }}"
-worker_ms_hostname: "{{ envsetup_ms_server_name }}"
-worker_ms_instances:
-  - ms_id: "{{ worker_ms_id }}"
-    ms_api_key: "{{ worker_ms_api_key }}"
-    ms_server_name: "{{ worker_ms_hostname }}"
-
-worker_firewall_enabled: true
-worker_ferm_rules_filename: worker
-worker_ferm_input_rules: []
-worker_ferm_output_rules:
-  - proto:
-      - tcp
-    dport:
-      - 80
-      - 443
-  - proto:
-      - tcp
-    dport:
-      - 6200
-worker_ferm_global_settings:
-
-...
diff --git a/ansible/roles/mediaworker/handlers/main.yml b/ansible/roles/mediaworker/handlers/main.yml
deleted file mode 100644
index d06d284e8fea73f13623971095ed8b2c5d0aa07b..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaworker/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart celerity-workers
-  service:
-    name: celerity-workers
-    state: restarted
-
-...
diff --git a/ansible/roles/mediaworker/meta/main.yml b/ansible/roles/mediaworker/meta/main.yml
deleted file mode 100644
index e45d692ae3567f856967cd6f66c91d13e2e94e4e..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaworker/meta/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-dependencies:
-  - role: base
-
-...
diff --git a/ansible/roles/mediaworker/tasks/main.yml b/ansible/roles/mediaworker/tasks/main.yml
deleted file mode 100644
index b03a0b61338edc60fcda1f5fca632886f72f46d2..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaworker/tasks/main.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-
-- name: install celerity worker
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: celerity-workers
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: config celerity worker
-  notify: restart celerity-workers
-  template:
-    src: celerity-config.py.j2
-    dest: /etc/celerity/config.py
-    mode: 0644
-    owner: root
-    group: root
-
-- name: ensure celerity worker is running
-  service:
-    name: celerity-workers
-    enabled: true
-    state: started
-
-# FIREWALL
-
-- name: firewall
-  when: worker_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ worker_ferm_rules_filename }}"
-    ferm_input_rules: "{{ worker_ferm_input_rules }}"
-    ferm_output_rules: "{{ worker_ferm_output_rules }}"
-    ferm_global_settings: "{{ worker_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-- meta: flush_handlers
-
-...
diff --git a/ansible/roles/mediaworker/templates/celerity-config.py.j2 b/ansible/roles/mediaworker/templates/celerity-config.py.j2
deleted file mode 100644
index cf17f88ab380fb66932437a97ee467edd3073b41..0000000000000000000000000000000000000000
--- a/ansible/roles/mediaworker/templates/celerity-config.py.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-SIGNING_KEY = '{{ worker_celerity_signing_key }}'
-SERVER_URL = 'https://{{ worker_celerity_server }}:6200'
-
-# Queues count, default: min(round(thread_count/2)-1,2)
-#QUEUES_PER_WORKER = 2
-
-# MediaServer interactions
-MEDIASERVERS = {
-{% for instance in worker_ms_instances %}
-    '{{ instance.ms_id }}': {'url': 'https://{{ instance.ms_server_name }}', 'api_key': '{{ instance.ms_api_key }}'},
-{% endfor %}
-}
diff --git a/ansible/roles/metricbeat/defaults/main.yml b/ansible/roles/metricbeat/defaults/main.yml
deleted file mode 100644
index f3a01583e783f8e6d5b8617f9646486e79fb3713..0000000000000000000000000000000000000000
--- a/ansible/roles/metricbeat/defaults/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-elastic_host: localhost
-elastic_port: 9200
-kibana_server_host: localhost
-
-...
diff --git a/ansible/roles/metricbeat/handlers/main.yml b/ansible/roles/metricbeat/handlers/main.yml
deleted file mode 100644
index 5d576b93bdc9bac8372926d851fff7227e5059b9..0000000000000000000000000000000000000000
--- a/ansible/roles/metricbeat/handlers/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: restart metricbeat
-  service:
-    name: metricbeat
-    state: restarted
-
-...
diff --git a/ansible/roles/metricbeat/tasks/main.yml b/ansible/roles/metricbeat/tasks/main.yml
deleted file mode 100644
index c90b6aa416b37191051547a9d3ee7e81af25f247..0000000000000000000000000000000000000000
--- a/ansible/roles/metricbeat/tasks/main.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-- name: install apt-transport-https
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: apt-transport-https
-    state: latest
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: install elastic GPG key
-  apt_key:
-    url: https://artifacts.elastic.co/GPG-KEY-elasticsearch
-    state: present
-
-- name: install elastic repository
-  apt_repository:
-    repo: deb https://artifacts.elastic.co/packages/7.x/apt stable main
-
-- name: install metricbeat
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: metricbeat
-    state: latest
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: install metricbeat configuration
-  template:
-    src: metricbeat.yml.j2
-    dest: /etc/metricbeat/metricbeat.yml
-  notify: restart metricbeat
-
-- name: enable metricbeat dashboard
-  command: metricbeat setup
-  when: inventory_hostname == groups['mediaserver'][0]
-
-- name: enable sql metricbeat configuration
-  template:
-    src: postgresql.yml.j2
-    dest: /etc/metricbeat/modules.d/postgresql.yml
-  when: "'postgres' in group_names"
-  notify: restart metricbeat
-
-- name: enable metricbeat client
-  systemd:
-    name: metricbeat
-    enabled: true
-    state: started
-
-...
diff --git a/ansible/roles/metricbeat/templates/metricbeat.yml.j2 b/ansible/roles/metricbeat/templates/metricbeat.yml.j2
deleted file mode 100644
index 26a6e18c6fd4971461c72864ac0805f8295d8bac..0000000000000000000000000000000000000000
--- a/ansible/roles/metricbeat/templates/metricbeat.yml.j2
+++ /dev/null
@@ -1,166 +0,0 @@
-###################### Metricbeat Configuration Example #######################
-
-# This file is an example configuration file highlighting only the most common
-# options. The metricbeat.reference.yml file from the same directory contains all the
-# supported options with more comments. You can use it as a reference.
-#
-# You can find the full configuration reference here:
-# https://www.elastic.co/guide/en/beats/metricbeat/index.html
-
-# =========================== Modules configuration ============================
-
-metricbeat.config.modules:
-  # Glob pattern for configuration loading
-  path: ${path.config}/modules.d/*.yml
-
-  # Set to true to enable config reloading
-  reload.enabled: false
-
-  # Period on which files under path should be checked for changes
-  #reload.period: 10s
-
-# ======================= Elasticsearch template setting =======================
-
-setup.template.settings:
-  index.number_of_shards: 1
-  index.codec: best_compression
-  #_source.enabled: false
-
-
-# ================================== General ===================================
-
-# The name of the shipper that publishes the network data. It can be used to group
-# all the transactions sent by a single shipper in the web interface.
-#name:
-
-# The tags of the shipper are included in their own field with each
-# transaction published.
-#tags: ["service-X", "web-tier"]
-
-# Optional fields that you can specify to add additional information to the
-# output.
-#fields:
-#  env: staging
-
-# ================================= Dashboards =================================
-# These settings control loading the sample dashboards to the Kibana index. Loading
-# the dashboards is disabled by default and can be enabled either by setting the
-# options here or by using the `setup` command.
-#setup.dashboards.enabled: false
-
-# The URL from where to download the dashboards archive. By default this URL
-# has a value which is computed based on the Beat name and version. For released
-# versions, this URL points to the dashboard archive on the artifacts.elastic.co
-# website.
-#setup.dashboards.url:
-
-# =================================== Kibana ===================================
-
-# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
-# This requires a Kibana endpoint configuration.
-setup.kibana:
-
-  # Kibana Host
-  # Scheme and port can be left out and will be set to the default (http and 5601)
-  # In case you specify and additional path, the scheme is required: http://localhost:5601/path
-  # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
-  host: "{{ kibana_server_host }}:5601"
-
-  # Kibana Space ID
-  # ID of the Kibana Space into which the dashboards should be loaded. By default,
-  # the Default Space will be used.
-  #space.id:
-
-# =============================== Elastic Cloud ================================
-
-# These settings simplify using Metricbeat with the Elastic Cloud (https://cloud.elastic.co/).
-
-# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
-# `setup.kibana.host` options.
-# You can find the `cloud.id` in the Elastic Cloud web UI.
-#cloud.id:
-
-# The cloud.auth setting overwrites the `output.elasticsearch.username` and
-# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
-#cloud.auth:
-
-# ================================== Outputs ===================================
-
-# Configure what output to use when sending the data collected by the beat.
-
-# ---------------------------- Elasticsearch Output ----------------------------
-output.elasticsearch:
-  # Array of hosts to connect to.
-  hosts: ["{{ elastic_host }}:{{ elastic_port }}"]
-
-  # Protocol - either `http` (default) or `https`.
-  #protocol: "https"
-
-  # Authentication credentials - either API key or username/password.
-  #api_key: "id:api_key"
-  #username: "elastic"
-  #password: "changeme"
-
-# ------------------------------ Logstash Output -------------------------------
-#output.logstash:
-  # The Logstash hosts
-  #hosts: ["localhost:5044"]
-
-  # Optional SSL. By default is off.
-  # List of root certificates for HTTPS server verifications
-  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
-
-  # Certificate for SSL client authentication
-  #ssl.certificate: "/etc/pki/client/cert.pem"
-
-  # Client Certificate Key
-  #ssl.key: "/etc/pki/client/cert.key"
-
-# ================================= Processors =================================
-
-# Configure processors to enhance or manipulate events generated by the beat.
-
-processors:
-  - add_host_metadata: ~
-  - add_cloud_metadata: ~
-  - add_docker_metadata: ~
-  - add_kubernetes_metadata: ~
-
-
-# ================================== Logging ===================================
-
-# Sets log level. The default log level is info.
-# Available log levels are: error, warning, info, debug
-#logging.level: debug
-
-# At debug level, you can selectively enable logging only for some components.
-# To enable all selectors use ["*"]. Examples of other selectors are "beat",
-# "publish", "service".
-#logging.selectors: ["*"]
-
-# ============================= X-Pack Monitoring ==============================
-# Metricbeat can export internal metrics to a central Elasticsearch monitoring
-# cluster.  This requires xpack monitoring to be enabled in Elasticsearch.  The
-# reporting is disabled by default.
-
-# Set to true to enable the monitoring reporter.
-#monitoring.enabled: false
-
-# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
-# Metricbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
-# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
-#monitoring.cluster_uuid:
-
-# Uncomment to send the metrics to Elasticsearch. Most settings from the
-# Elasticsearch output are accepted here as well.
-# Note that the settings should point to your Elasticsearch *monitoring* cluster.
-# Any setting that is not set is automatically inherited from the Elasticsearch
-# output configuration, so if you have the Elasticsearch output configured such
-# that it is pointing to your Elasticsearch monitoring cluster, you can simply
-# uncomment the following line.
-#monitoring.elasticsearch:
-
-# ================================= Migration ==================================
-
-# This allows to enable 6.7 migration aliases
-#migration.6_to_7.enabled: true
\ No newline at end of file
diff --git a/ansible/roles/metricbeat/templates/postgresql.yml.j2 b/ansible/roles/metricbeat/templates/postgresql.yml.j2
deleted file mode 100644
index ee3a33a53b20947f43ecfea5ea1c8007b3883361..0000000000000000000000000000000000000000
--- a/ansible/roles/metricbeat/templates/postgresql.yml.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-# Module: postgresql
-# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.8/metricbeat-module-postgresql.html
-
-- module: postgresql
-  #metricsets:
-  #  - database
-  #  - bgwriter
-  #  - activity
-  period: 10s
-  hosts: ["postgres://localhost:5432"]
-  username: {{ metricbeat_pgsql_username }}
-  password: {{ metricbeat_pgsql_pass }}
diff --git a/ansible/roles/mirismanager/defaults/main.yml b/ansible/roles/mirismanager/defaults/main.yml
deleted file mode 100644
index 8bb82b0ab66a34f5413ba7e045bccd1fe02ca01f..0000000000000000000000000000000000000000
--- a/ansible/roles/mirismanager/defaults/main.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-
-# ubicast-skyreach-runtime packages: todo: manage the database in inventory (/!\ can be idenpendent of the MS database)
-dependencies_packages:
-  - apt-cacher-ng
-  - cron
-  - nginx
-  - postfix
-  - postgresql
-
-manager_packages:
-  - ubicast-skyreach
-
-manager_testing: false
-manager_hostname: "{{ envsetup_cm_server_name }}"
-manager_default_email_sender: "noreply@{{ manager_hostname }}"
-manager_email_sender: "{{ envsetup_email_sender | default(manager_default_email_sender, true) }}"
-manager_proxy_http: "{{ envsetup_proxy_http }}"
-
-manager_firewall_enabled: true
-manager_ferm_rules_filename: manager
-manager_ferm_input_rules:
-  - proto:
-      - tcp
-    dport:
-      - 80
-      - 443
-  - proto:
-      - tcp
-    dport:
-      - 3142
-manager_ferm_output_rules: []
-manager_ferm_global_settings:
-
-...
diff --git a/ansible/roles/mirismanager/handlers/main.yml b/ansible/roles/mirismanager/handlers/main.yml
deleted file mode 100644
index 9c36ad008fd812203199bbbc2446d02225dc6410..0000000000000000000000000000000000000000
--- a/ansible/roles/mirismanager/handlers/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-- name: restart nginx
-  service:
-    name: nginx
-    state: restarted
-
-- name: restart skyreach
-  service:
-    name: skyreach
-    state: restarted
-
-- name: restart apt-cacher-ng
-  service:
-    name: apt-cacher-ng
-    state: restarted
-
-...
diff --git a/ansible/roles/mirismanager/meta/main.yml b/ansible/roles/mirismanager/meta/main.yml
deleted file mode 100644
index c4cc4780bdd8beed15375c59565a39585cff6b22..0000000000000000000000000000000000000000
--- a/ansible/roles/mirismanager/meta/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-
-dependencies:
-  - role: base
-  - role: nginx
-  - when: "'postgres' in group_names"
-    role: postgres
-
-...
diff --git a/ansible/roles/mirismanager/tasks/main.yml b/ansible/roles/mirismanager/tasks/main.yml
deleted file mode 100644
index 2b26a79f75a436c5ec03eeedb6ddba4dbba5f2e3..0000000000000000000000000000000000000000
--- a/ansible/roles/mirismanager/tasks/main.yml
+++ /dev/null
@@ -1,115 +0,0 @@
----
-
-- name: mirismanager dependencies install
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ dependencies_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: start postgresql
-  systemd:
-    name: postgresql
-    state: started
-
-- name: mirismanager install
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ manager_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: configure domain name in nginx conf
-  notify: restart nginx
-  replace:
-    path: /etc/nginx/sites-available/skyreach.conf
-    regexp: '^(\s*server_name).*;$'
-    replace: '\1 {{ manager_hostname }};'
-    backup: true
-
-- name: configure domain name in settings
-  notify: restart skyreach
-  lineinfile:
-    path: /home/skyreach/skyreach_data/private/settings_override.py
-    regexp: '^#? ?SITE_URL.*'
-    line: "SITE_URL = 'https://{{ manager_hostname }}'"
-    backup: true
-
-- name: configure site title in settings
-  notify: restart skyreach
-  lineinfile:
-    path: /home/skyreach/skyreach_data/private/settings_override.py
-    regexp: '^#? ?SITE_TITLE.*'
-    line: "SITE_TITLE = '{{ manager_hostname }}'"
-    backup: true
-
-- name: configure site name in settings
-  notify: restart skyreach
-  lineinfile:
-    path: /home/skyreach/skyreach_data/private/settings_override.py
-    regexp: '^#? ?SITE_NAME.*'
-    line: "SITE_NAME = '{{ manager_hostname }}'"
-    backup: true
-
-- name: configure email sender address in settings
-  notify: restart skyreach
-  lineinfile:
-    path: /home/skyreach/skyreach_data/private/settings_override.py
-    regexp: '^#? ?DEFAULT_FROM_EMAIL.*'
-    line: "DEFAULT_FROM_EMAIL = '{{ manager_email_sender }}'"
-    backup: true
-
-- name: resolve domain name to localhost ipv4
-  when: not in_docker
-  notify: restart nginx
-  lineinfile:
-    path: /etc/hosts
-    line: '127.0.0.1 {{ manager_hostname }}'
-    backup: true
-
-- name: ensure skyreach is running
-  service:
-    name: skyreach
-    enabled: true
-    state: started
-
-- name: check apt cacher ng config exists
-  register: manager_apt_cacher_conf
-  stat:
-    path: /etc/apt-cacher-ng/acng.conf
-
-- name: configure apt-cacher-ng
-  when:
-    - manager_apt_cacher_conf.stat.exists
-    - manager_proxy_http | d(false)
-  notify: restart apt-cacher-ng
-  lineinfile:
-    path: /etc/apt-cacher-ng/acng.conf
-    regexp: '^Proxy: .*'
-    line: 'Proxy: {{ manager_proxy_http }}'
-
-- name: ensure apt-cacher-ng is running
-  service:
-    name: apt-cacher-ng
-    enabled: true
-    state: started
-
-# FIREWALL
-
-- name: firewall
-  when: manager_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ manager_ferm_rules_filename }}"
-    ferm_input_rules: "{{ manager_ferm_input_rules }}"
-    ferm_output_rules: "{{ manager_ferm_output_rules }}"
-    ferm_global_settings: "{{ manager_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-- meta: flush_handlers
-
-...
diff --git a/ansible/roles/munin/msmonitor/defaults/main.yml b/ansible/roles/munin/msmonitor/defaults/main.yml
deleted file mode 100644
index cd549e13d1d5baad11d1273db1e801ef6c257f73..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/msmonitor/defaults/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-
-monitor_shell_pwd: "{{ envsetup_monitor_shell_pwd }}"
-monitor_hostname: "{{ envsetup_monitor_server_name }}"
-
-monitor_firewall_enabled: true
-monitor_ferm_rules_filename: monitor
-monitor_ferm_input_rules:
-  - proto:
-      - tcp
-    dport:
-      - 80
-      - 443
-monitor_ferm_output_rules: []
-monitor_ferm_global_settings:
-
-...
diff --git a/ansible/roles/munin/msmonitor/handlers/main.yml b/ansible/roles/munin/msmonitor/handlers/main.yml
deleted file mode 100644
index b30d218a40fdc2985b9b3259731e0c7b0838c727..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/msmonitor/handlers/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: restart nginx
-  service:
-    name: nginx
-    state: restarted
-...
diff --git a/ansible/roles/munin/msmonitor/meta/main.yml b/ansible/roles/munin/msmonitor/meta/main.yml
deleted file mode 100644
index 531dba176fd1cedcdbca88a1fd3d4495af8c8412..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/msmonitor/meta/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-
-dependencies:
-  - role: conf           # get conf.sh
-  - role: init           # setup keys
-  - role: sysconfig      # setup repos
-  - role: ferm-install
-  - role: ferm-configure
-  - role: nginx
-
-...
diff --git a/ansible/roles/munin/msmonitor/tasks/main.yml b/ansible/roles/munin/msmonitor/tasks/main.yml
deleted file mode 100644
index a736753d99fb1677e104e2c4435abcf4bf82e8af..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/msmonitor/tasks/main.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-
-- name: install ubicast msmonitor
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    state: latest
-    name:
-      - ubicast-monitor
-      - ubicast-monitor-runtime
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: set msmonitor account password
-  user:
-    name: msmonitor
-    password: "{{ monitor_shell_pwd | password_hash('sha512', 'monitor') }}"
-
-- name: configure domain name in nginx
-  notify: restart nginx
-  replace:
-    path: /etc/nginx/sites-available/msmonitor.conf
-    regexp: '^(\s*server_name).*;$'
-    replace: '\1 {{ monitor_hostname }};'
-    backup: true
-
-- name: resolve domain name to localhost ipv4
-  when: not in_docker or in_docker is not defined
-  notify: restart nginx
-  lineinfile:
-    path: /etc/hosts
-    line: '127.0.1.1 {{ monitor_hostname }}'
-    backup: true
-
-- name: ensure msmonitor is running
-  service:
-    name: msmonitor
-    enabled: true
-    state: started
-
-- name: set directory permissions
-  file:
-    path: /home/msmonitor/msmonitor
-    mode: 0755
-    state: directory
-
-# FIREWALL
-
-- name: firewall
-  when: monitor_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ monitor_ferm_rules_filename }}"
-    ferm_input_rules: "{{ monitor_ferm_input_rules }}"
-    ferm_output_rules: "{{ monitor_ferm_output_rules }}"
-    ferm_global_settings: "{{ monitor_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-...
diff --git a/ansible/roles/munin/munin-node/defaults/main.yml b/ansible/roles/munin/munin-node/defaults/main.yml
deleted file mode 100644
index 57776454f2c2b02ff3248390d30252e1c0d2d2be..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/munin-node/defaults/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-munin_node_logfile: /var/log/munin/munin-node.log
-munin_node_pidfile: /var/run/munin/munin-node.pid
-
-...
diff --git a/ansible/roles/munin/munin-node/handlers/main.yml b/ansible/roles/munin/munin-node/handlers/main.yml
deleted file mode 100644
index e68afb7a82d5006c410efabf2da3d6b7f6735f57..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/munin-node/handlers/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: restart munin-node
-  service:
-    name: munin-node
-    state: restarted
-...
diff --git a/ansible/roles/munin/munin-node/meta/main.yml b/ansible/roles/munin/munin-node/meta/main.yml
deleted file mode 100644
index f58686c2c01077136bd9a0cc83e0d5216a743efb..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/munin-node/meta/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-
-dependencies:
-  - role: conf           # get conf.sh
-  - role: init           # setup keys pkgs
-  - role: sysconfig      # setup repos
-
-# TODO:
-# - remove all uneeded dependencies
-# - only need :
-#     * ubicast repo
-#     * firewall input 4949
-
-...
diff --git a/ansible/roles/munin/munin-node/tasks/main.yml b/ansible/roles/munin/munin-node/tasks/main.yml
deleted file mode 100644
index 163d158c494b319bb1185cef00361f113634eb79..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/munin-node/tasks/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-
-- name: install required packages for munin-node
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    state: latest
-    name:
-      - munin-node
-      - ubicast-munin
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: copy munin-node configuration
-  notify: restart munin-node
-  template:
-    src: munin-node.conf.j2
-    dest: /etc/munin/munin-node.conf
-
-- name: setup munin-node plugins link
-  notify: restart munin-node
-  shell:
-    cmd: munin-node-configure --shell --remove-also 2>&1 | sh -x
-  # sh -x print executed cmd to stderr
-  register: munin_plugin_linked
-  changed_when: munin_plugin_linked.stderr | length > 0
-
-...
diff --git a/ansible/roles/munin/munin-node/templates/munin-node.conf.j2 b/ansible/roles/munin/munin-node/templates/munin-node.conf.j2
deleted file mode 100644
index c58f4edf08544b8683d0f68b7bfa9c52324b4d4a..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/munin-node/templates/munin-node.conf.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-# Munin node configuration 
-# Deployed with ansible
-
-log_level 4
-log_file {{ munin_node_logfile }}
-pid_file {{ munin_node_pidfile }}
-
-background 1
-setsid 1
-
-user root
-group root
-
-ignore_file [\#~]$
-ignore_file DEADJOE$
-ignore_file \.bak$
-ignore_file %$
-ignore_file \.dpkg-(tmp|new|old|dist)$
-ignore_file \.rpm(save|new)$
-ignore_file \.pod$
-
-host_name {{ inventory_hostname }}.{{ customer_short_name }}
-
-{% for host in groups['munin_server'] %}
-allow ^{{ hostvars[host]['ansible_facts']['default_ipv4']['address'] | replace('.','\.') }}$
-{% endfor %}
-allow ^127\.0\.0\.1$
-allow ^::1$
-
-host 0.0.0.0
-port 4949
-
diff --git a/ansible/roles/munin/munin-server/handlers/main.yml b/ansible/roles/munin/munin-server/handlers/main.yml
deleted file mode 100644
index f0bac579b41a63a6f72c0b9392d34ea5352f78b5..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/munin-server/handlers/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: restart munin-server
-  service:
-    name: munin
-    state: restarted
-...
diff --git a/ansible/roles/munin/munin-server/tasks/main.yml b/ansible/roles/munin/munin-server/tasks/main.yml
deleted file mode 100644
index 919a6599d763a410667bb60ae8c390325724b690..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/munin-server/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-
-- name: "install required packages for munin-server"
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    state: latest
-    name:
-      - munin
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: "copy munin-server configuration"
-  notify: restart munin-server
-  template:
-    src: munin.conf.j2
-    dest: /etc/munin/munin.conf
-
-- name: "remove default localdomain files"
-  file:
-    path: /var/cache/munin/www/localdomain
-    state: absent
-
-...
diff --git a/ansible/roles/munin/munin-server/templates/munin.conf.j2 b/ansible/roles/munin/munin-server/templates/munin.conf.j2
deleted file mode 100644
index 00d59b69e8857942a8302218555bea7ef29aeed9..0000000000000000000000000000000000000000
--- a/ansible/roles/munin/munin-server/templates/munin.conf.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-# Munin server configuration 
-# Deployed with ansible
-
-# (Exactly one) directory to include all files from.
-includedir /etc/munin/munin-conf.d
-
-{% for host in groups['munin_node'] %}
-[{{ host }}.{{ customer_short_name }}]
-    address {{hostvars[host]['ansible_facts']['default_ipv4']['address']}}
-    use_node_name yes
-
-{% endfor %}
-
diff --git a/ansible/roles/netcapture/defaults/main.yml b/ansible/roles/netcapture/defaults/main.yml
deleted file mode 100644
index 7a81c73533743bf82449f6a2fc576045a5848732..0000000000000000000000000000000000000000
--- a/ansible/roles/netcapture/defaults/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-
-netcapture_registry_host: registry.ubicast.eu
-netcapture_registry_login: "{{ envsetup_netcapture_docker_login }}"
-netcapture_registry_password: "{{ envsetup_netcapture_docker_pwd }}"
-netcapture_cm_url: "https://{{ envsetup_cm_server_name | default('https://mirismanager.ubicast.eu', true) }}"
-netcapture_check_ssl: true
-netcapture_conf_folder: /etc/miris/conf
-netcapture_media_folder: /data/netcapture/media
-netcapture_hw_acceleration: false
-netcapture_miris_user_pwd: "{{ lookup('password', '/tmp/passwordfile length=12 chars=ascii_letters,digits') }}"
-netcapture_miris_auth: true
-
-netcapture_firewall_enabled: true
-netcapture_ferm_rules_filename: netcapture
-netcapture_ferm_input_rules: []
-netcapture_ferm_output_rules: []
-netcapture_ferm_global_settings:
-
-...
diff --git a/ansible/roles/netcapture/meta/main.yml b/ansible/roles/netcapture/meta/main.yml
deleted file mode 100644
index d2ba8a17b5db5d62df1d4702b7ad70d468d01054..0000000000000000000000000000000000000000
--- a/ansible/roles/netcapture/meta/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-dependencies:
-  - role: base
-  - role: docker
-
-...
diff --git a/ansible/roles/netcapture/tasks/main.yml b/ansible/roles/netcapture/tasks/main.yml
deleted file mode 100644
index 97353d79289aa032445072bac6bb4bc32a6d4351..0000000000000000000000000000000000000000
--- a/ansible/roles/netcapture/tasks/main.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-
-- name: netcapture install
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: python3-miris-netcapture
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: netcapture config
-  template:
-    src: netcapture.json.j2
-    dest: /etc/miris/netcapture.json
-
-- name: netcapture miris
-  template:
-    src: miris-api.json.j2
-    dest: /etc/miris/conf/api.json
-
-- name: netcapture config dir
-  file:
-    path: "{{ netcapture_conf_folder }}"
-    group: video
-    mode: u=rwX,g=rwX,o=r
-    recurse: true
-    state: directory
-
-- name: netcapture media dir
-  file:
-    path: "{{ netcapture_media_folder }}"
-    group: video
-    mode: u=rwX,g=rwX,o=r
-    recurse: true
-    state: directory
-
-# FIREWALL
-
-- name: firewall
-  when: netcapture_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ netcapture_ferm_rules_filename }}"
-    ferm_input_rules: "{{ netcapture_ferm_input_rules }}"
-    ferm_output_rules: "{{ netcapture_ferm_output_rules }}"
-    ferm_global_settings: "{{ netcapture_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-- meta: flush_handlers
-
-...
diff --git a/ansible/roles/netcapture/templates/miris-api.json.j2 b/ansible/roles/netcapture/templates/miris-api.json.j2
deleted file mode 100644
index 0f00edfaeb72a80887738f4a8f29cfadc9047c0f..0000000000000000000000000000000000000000
--- a/ansible/roles/netcapture/templates/miris-api.json.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-{
-  "auth_user_password": "{{ netcapture_miris_user_pwd }}",
-  "auth_enable": {% if netcapture_miris_auth %}true{% else %}false{% endif %}
-}
diff --git a/ansible/roles/netcapture/templates/netcapture.json.j2 b/ansible/roles/netcapture/templates/netcapture.json.j2
deleted file mode 100644
index 4db5c27677955ed5192f98ef8e97656ca2eef971..0000000000000000000000000000000000000000
--- a/ansible/roles/netcapture/templates/netcapture.json.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "docker_registry_host": "{{ netcapture_registry_host }}",
-  "docker_registry_login": "{{ netcapture_registry_login }}",
-  "docker_registry_password": "{{ netcapture_registry_password }}",
-  "mirismanager_url": "{{ netcapture_cm_url }}",
-  "mirismanager_check_ssl": {% if netcapture_check_ssl %}true{% else %}false{% endif %},
-  "netcapture_conf_folder": "{{ netcapture_conf_folder }}",
-  "netcapture_media_folder": "{{ netcapture_media_folder }}",
-  "enable_hw_acceleration": {% if netcapture_hw_acceleration %}true{% else %}false{% endif %}
-}
diff --git a/ansible/roles/network/defaults/main.yml b/ansible/roles/network/defaults/main.yml
deleted file mode 100644
index 8bb78b758d30ac8515ad091d073567ed7071bf98..0000000000000000000000000000000000000000
--- a/ansible/roles/network/defaults/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-
-network_apply: false
-
-network_packages:
-  - cockpit
-  - libnm-dev
-  - network-manager
-  - python3-dbus
-  - python3-gi
-
-network_ip: "{{ envsetup_network_ip | d() }}"
-network_mask: "{{ envsetup_network_mask | d() }}"
-network_ip_mask: "{{ network_ip }}/{{ network_mask }}"
-network_ip_mask_cidr: "{{ network_ip_mask | ipaddr }}"
-network_gateway: "{{ envsetup_network_gateway | d() }}"
-network_dns: "{{ envsetup_network_dns.split(',') | d() }}"
-
-...
diff --git a/ansible/roles/network/tasks/main.yml b/ansible/roles/network/tasks/main.yml
deleted file mode 100644
index 0b28420924a0f3f8945633e2fdb2801590e75ba1..0000000000000000000000000000000000000000
--- a/ansible/roles/network/tasks/main.yml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-
-- name: if network settings are set
-  when:
-    - network_apply | bool
-    - network_ip | d(false)
-    - network_mask | d(false)
-    - network_gateway | d(false)
-    - network_dns | d(false)
-  block:
-
-    - name: packages
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        name: "{{ network_packages }}"
-        state: present
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: cleanup
-      register: network_cleanup_interfaces
-      copy:
-        dest: /etc/network/interfaces
-        backup: true
-        content: |
-          # This file describes the network interfaces available on your system
-          # and how to activate them. For more information, se interfaces(5).
-
-          source /etc/network/interfaces.d/*
-
-          # The loopback network interface
-          auto lo
-          iface lo inet loopback
-
-    - name: service
-      when: network_cleanup_interfaces is changed
-      systemd:
-        name: network-manager
-        enabled: true
-        state: restarted
-
-    - name: interface
-      nmcli:
-        conn_name: "envsetup-{{ ansible_default_ipv4.interface }}"
-        type: ethernet
-        ifname: "{{ ansible_default_ipv4.interface }}"
-        ip4: "{{ network_ip_mask_cidr | ipv4 }}"
-        gw4: "{{ network_gateway | ipv4 }}"
-        dns4: "{{ network_dns | ipv4 }}"
-        autoconnect: true
-        activate: false
-        state: present
-
-...
diff --git a/ansible/roles/nginx/defaults/main.yml b/ansible/roles/nginx/defaults/main.yml
deleted file mode 100644
index ce0f4ececd1b81ff4d24311549a0de482a898c38..0000000000000000000000000000000000000000
--- a/ansible/roles/nginx/defaults/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-
-nginx_packages:
-  - nginx
-  - uwsgi
-  - uwsgi-plugin-python3
-
-nginx_ssl_certificate: /etc/ssl/certs/ssl-cert-snakeoil.pem
-nginx_ssl_certificate_key: /etc/ssl/private/ssl-cert-snakeoil.key
-
-...
diff --git a/ansible/roles/nginx/handlers/main.yml b/ansible/roles/nginx/handlers/main.yml
deleted file mode 100644
index b7774856aa335af9eb5885e0efcd4e2093c9e167..0000000000000000000000000000000000000000
--- a/ansible/roles/nginx/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart nginx
-  systemd:
-    name: nginx
-    state: restarted
-
-...
diff --git a/ansible/roles/nginx/tasks/main.yml b/ansible/roles/nginx/tasks/main.yml
deleted file mode 100644
index 117f3aba56c660cd250a9e2cd32bd98410b70525..0000000000000000000000000000000000000000
--- a/ansible/roles/nginx/tasks/main.yml
+++ /dev/null
@@ -1,61 +0,0 @@
----
-
-- name: nginx install
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ nginx_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: nginx remove default vhost
-  notify: restart nginx
-  loop:
-    - /etc/nginx/sites-enabled/default
-    - /etc/nginx/sites-enabled/default.conf
-  file:
-    path: "{{ item }}"
-    state: absent
-
-- name: nginx check old ssl conf exists
-  register: nginx_old_ssl_conf
-  stat:
-    path: /etc/nginx/conf.d/ssl.conf
-
-- name: nginx migrate old ssl certificate conf
-  when: nginx_old_ssl_conf.stat.exists
-  notify: restart nginx
-  loop:
-    - grep ssl_certificate /etc/nginx/conf.d/ssl.conf > /etc/nginx/conf.d/ssl_certificate.conf
-    - mv /etc/nginx/conf.d/ssl.conf /etc/nginx/conf.d/ssl.conf.old
-  command:
-    cmd: "{{ item }}"
-
-- name: nginx check ssl cert conf exists
-  register: nginx_ssl_cert_conf
-  stat:
-    path: /etc/nginx/conf.d/ssl_certificate.conf
-
-- name: nginx update ssl certificate conf
-  when:
-    - nginx_ssl_cert_conf.stat.exists
-    - nginx_ssl_certificate != "/etc/ssl/certs/ssl-cert-snakeoil.pem"
-  notify: restart nginx
-  lineinfile:
-    path: /etc/nginx/conf.d/ssl_certificate.conf
-    regexp: 'ssl_certificate\s+([\w/\-\_\.]+);'
-    line: 'ssl_certificate {{ nginx_ssl_certificate }};'
-
-- name: nginx update ssl certificate key conf
-  when:
-    - nginx_ssl_cert_conf.stat.exists
-    - nginx_ssl_certificate_key != "/etc/ssl/private/ssl-cert-snakeoil.key"
-  notify: restart nginx
-  lineinfile:
-    path: /etc/nginx/conf.d/ssl_certificate.conf
-    regexp: 'ssl_certificate_key\s+([\w/\-\_\.]+);'
-    line: 'ssl_certificate_key {{ nginx_ssl_certificate_key }};'
-
-...
diff --git a/ansible/roles/postfix/defaults/main.yml b/ansible/roles/postfix/defaults/main.yml
deleted file mode 100644
index 8ca16d7092b77d61b96c1579d2b2a88b1b755b30..0000000000000000000000000000000000000000
--- a/ansible/roles/postfix/defaults/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-
-postfix_packages:
-  - postfix
-  - bsd-mailx
-
-postfix_mailname: "{{ envsetup_ms_server_name }}"
-postfix_default_email_sender: noreply@{{ postfix_mailname }}
-postfix_email_sender: "{{ envsetup_email_sender | default(postfix_default_email_sender, true) }}"
-postfix_relay_host: "{{ envsetup_email_smtp_server }}"
-postfix_relay_user: "{{ envsetup_email_smtp_user }}"
-postfix_relay_pass: "{{ envsetup_email_smtp_pwd }}"
-postfix_admin: sysadmin@ubicast.eu
-
-...
diff --git a/ansible/roles/postfix/handlers/main.yml b/ansible/roles/postfix/handlers/main.yml
deleted file mode 100644
index f55195130a602f6fc484528c5ed43295313c2e4b..0000000000000000000000000000000000000000
--- a/ansible/roles/postfix/handlers/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-
-- name: postmap sasl
-  command: postmap hash:/etc/postfix/sasl-passwords
-
-- name: postmap generic
-  command: postmap hash:/etc/postfix/generic
-
-- name: postmap virtual
-  command: postmap hash:/etc/postfix/virtual
-
-- name: newaliases
-  command: newaliases
-
-- name: restart postfix
-  service:
-    name: postfix
-    state: restarted
-
-...
diff --git a/ansible/roles/postfix/meta/main.yml b/ansible/roles/postfix/meta/main.yml
deleted file mode 100644
index e8c55ae416ea2a478accac6868dbe270825cf1b2..0000000000000000000000000000000000000000
--- a/ansible/roles/postfix/meta/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-dependencies:
-  - role: conf
-
-...
diff --git a/ansible/roles/postfix/tasks/main.yml b/ansible/roles/postfix/tasks/main.yml
deleted file mode 100644
index beada33716630245ebc0eadc91fa3c3a60f9eb48..0000000000000000000000000000000000000000
--- a/ansible/roles/postfix/tasks/main.yml
+++ /dev/null
@@ -1,89 +0,0 @@
----
-
-- name: create postfix dir
-  file:
-    path: /etc/postfix
-    state: directory
-
-- name: postfix main config
-  notify: restart postfix
-  template:
-    backup: true
-    src: main.cf.j2
-    dest: /etc/postfix/main.cf
-
-- name: postfix mailname
-  notify: restart postfix
-  copy:
-    backup: true
-    dest: /etc/mailname
-    content: "{{ postfix_mailname }}"
-
-- name: postfix local aliases
-  notify:
-    - newaliases
-    - restart postfix
-  copy:
-    backup: true
-    dest: /etc/aliases
-    content: |
-      devnull: /dev/null
-      clamav: root
-      root: {{ postfix_admin }}
-
-- name: postfix virtual aliases
-  notify:
-    - postmap virtual
-    - restart postfix
-  copy:
-    backup: true
-    dest: /etc/postfix/virtual
-    content: |
-      postmaster@{{ postfix_mailname }} root
-      bounces@{{ postfix_mailname }} root
-      noreply@{{ postfix_mailname }} devnull
-
-- name: postfix generic aliases, sender rewriting
-  notify:
-    - postmap generic
-    - restart postfix
-  copy:
-    backup: true
-    dest: /etc/postfix/generic
-    content: |
-      root@localhost {{ postfix_email_sender }}
-      root@{{ postfix_mailname }} {{ postfix_email_sender }}
-      root@{{ ansible_hostname }} {{ postfix_email_sender }}
-      @{{ postfix_mailname }} {{ postfix_email_sender }}
-      @{{ ansible_hostname }} {{ postfix_email_sender }}
-
-- name: postfix authentication
-  when:
-    - postfix_relay_host | d(false)
-    - postfix_relay_user | d(false)
-    - postfix_relay_pass | d(false)
-  notify:
-    - postmap sasl
-    - restart postfix
-  copy:
-    backup: true
-    dest: /etc/postfix/sasl-passwords
-    content: "{{ postfix_relay_host }} {{ postfix_relay_user }}:{{ postfix_relay_pass }}"
-
-- name: install postfix
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ postfix_packages }}"
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: ensure postfix is running
-  service:
-    name: postfix
-    enabled: true
-    state: started
-
-...
diff --git a/ansible/roles/postfix/templates/main.cf.j2 b/ansible/roles/postfix/templates/main.cf.j2
deleted file mode 100644
index 8f3e24631ca7f72ca579cb05d21da3cb2a8e3273..0000000000000000000000000000000000000000
--- a/ansible/roles/postfix/templates/main.cf.j2
+++ /dev/null
@@ -1,47 +0,0 @@
-# See /usr/share/postfix/main.cf.dist for a commented, more complete version
-
-smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
-biff = no
-
-# appending .domain is the MUA's job.
-append_dot_mydomain = no
-
-# Uncomment the next line to generate "delayed mail" warnings
-#delay_warning_time = 4h
-
-# TLS parameters
-smtpd_tls_cert_file = /etc/ssl/certs/ssl-cert-snakeoil.pem
-smtpd_tls_key_file = /etc/ssl/private/ssl-cert-snakeoil.key
-smtpd_use_tls = yes
-smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated reject_unauth_destination
-smtpd_tls_session_cache_database = btree:${queue_directory}/smtpd_scache
-smtp_tls_session_cache_database = btree:${queue_directory}/smtp_scache
-
-# See /usr/share/doc/postfix/TLS_README.gz in the postfix-doc package for
-# information on enabling SSL in the smtp client.
-
-myhostname = {{ postfix_mailname }}
-alias_maps = hash:/etc/aliases
-alias_database = hash:/etc/aliases
-virtual_maps = hash:/etc/postfix/virtual
-myorigin = /etc/mailname
-mydestination = {{ postfix_mailname }}, {{ ansible_hostname }}, localdomain, localhost.localdomain, localhost
-relayhost = {{ postfix_relay_host }}
-mynetworks = 127.0.0.0/8
-mailbox_size_limit = 0
-recipient_delimiter = +
-inet_interfaces = localhost
-inet_protocols = ipv4
-default_transport = smtp
-relay_transport = smtp
-disable_vrfy_command = yes
-smtp_generic_maps = hash:/etc/postfix/generic
-notify_classes = bounce
-bounce_notice_recipient = bounces@{{ postfix_mailname }}
-{% if postfix_relay_user %}
-
-# SMTP relay authentication
-smtp_sasl_auth_enable = yes
-smtp_sasl_password_maps = hash:/etc/postfix/sasl-passwords
-smtp_sasl_security_options = noanonymous
-{% endif %}
diff --git a/ansible/roles/postgres-ha/defaults/main.yml b/ansible/roles/postgres-ha/defaults/main.yml
deleted file mode 100644
index 038debef0316c504031f3fd4cffb37f6bf28aac9..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres-ha/defaults/main.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-
-repmgr_packages:
-  - repmgr
-  # rephacheck:
-  - python3
-  - python3-psycopg2
-  - python3-toml
-
-repmgr_pg_version: "{{ pg_version | default('11') }}"
-repmgr_pg_cluster: "{{ pg_cluster | default('main') }}"
-repmgr_pg_data: /var/lib/postgresql/{{ repmgr_pg_version }}/{{ repmgr_pg_cluster }}
-
-repmgr_config: /etc/postgresql/{{ repmgr_pg_version }}/{{ repmgr_pg_cluster }}/repmgr.conf
-
-repmgr_user: repmgr
-repmgr_password:
-repmgr_db: repmgr
-repmgr_roles: LOGIN,REPLICATION,SUPERUSER
-
-repmgr_primary_node: "{{ hostvars[groups['postgres'][0]]['ansible_default_ipv4']['address'] }}"
-
-repmgr_timeout: 5
-
-repmgr_node_id: "{{ (groups['postgres'].index(inventory_hostname))+1|int }}"
-repmgr_node_name: "{{ ansible_hostname }}"
-repmgr_conninfo: host={{ ansible_default_ipv4.address }} dbname={{ repmgr_db }} user={{ repmgr_user }} connect_timeout={{ repmgr_timeout }}
-
-repmgr_repha_port: 8543
-
-pg_firewall_enabled: true
-pg_ferm_rules_filename: postgres_ha
-pg_ferm_input_rules:
-  - proto:
-      - tcp
-    dport:
-      - 5432
-      - 8543
-pg_ferm_output_rules:
-  - proto:
-      - tcp
-    dport:
-      - 54321
-      - 54322
-pg_ferm_global_settings:
-
-...
diff --git a/ansible/roles/postgres-ha/handlers/main.yml b/ansible/roles/postgres-ha/handlers/main.yml
deleted file mode 100644
index 6b60369e5e0af7c27e2559489ca84e47274eca17..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres-ha/handlers/main.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-
-- name: reload systemd
-  systemd:
-    daemon_reload: true
-
-- name: restart postgresql
-  systemd:
-    name: postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}
-    state: restarted
-
-- name: restart repmgrd
-  systemd:
-    name: repmgrd
-    state: restarted
-
-- name: restart rephacheck
-  systemd:
-    name: rephacheck.socket
-    state: restarted
-
-...
diff --git a/ansible/roles/postgres-ha/tasks/main.yml b/ansible/roles/postgres-ha/tasks/main.yml
deleted file mode 100644
index 97d69f90aaf8fb8ace8e1984530755f792db1539..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres-ha/tasks/main.yml
+++ /dev/null
@@ -1,335 +0,0 @@
----
-
-# INSTALLATION
-
-- name: install packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ repmgr_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-# POSTGRESQL
-
-- name: postgresql
-  vars:
-    pg_hba:
-      - type: local
-        method: peer
-      - type: host
-        address: 127.0.0.1/32
-      - type: host
-        address: ::1/128
-      - type: host
-        address: 0.0.0.0/0
-      - type: host
-        address: ::/0
-      - type: local
-        database: replication
-        method: peer
-      - type: host
-        database: replication
-        address: 127.0.0.1/32
-      - type: host
-        database: replication
-        address: ::1/128
-      - type: host
-        database: replication
-        address: 0.0.0.0/0
-      - type: host
-        database: replication
-        address: ::/0
-    pg_conf:
-      - name: main
-        content: |
-          listen_addresses = '*'
-      - name: modules
-        content: |
-          shared_preload_libraries = 'repmgr'
-    pg_users:
-      - name: "{{ repmgr_user }}"
-        password: "{{ repmgr_password }}"
-        roles: "{{ repmgr_roles }}"
-    pg_databases:
-      - name: "{{ repmgr_db }}"
-        owner: "{{ repmgr_user }}"
-    pg_ferm_input_rules:
-      - proto:
-          - tcp
-        dport:
-          - 5432
-          - "{{ repmgr_repha_port }}"
-  include_role:
-    name: postgres
-
-# CONFIGURATION
-
-- name: configure repmgr
-  notify: restart repmgrd
-  template:
-    src: repmgr.conf.j2
-    dest: "{{ repmgr_config }}"
-    owner: postgres
-    group: postgres
-
-- name: configure debian default
-  notify: restart repmgrd
-  loop:
-    - key: REPMGRD_ENABLED
-      value: 'yes'
-    - key: REPMGRD_CONF
-      value: "{{ repmgr_config }}"
-  replace:
-    path: /etc/default/repmgrd
-    regexp: '^#?{{ item.key }}=.*$'
-    replace: '{{ item.key }}={{ item.value }}'
-
-- name: configure sudo
-  copy:
-    dest: /etc/sudoers.d/postgres
-    validate: visudo -cf %s
-    content: |
-      Defaults:postgres !requiretty
-      postgres ALL=NOPASSWD: \
-        /bin/systemctl start postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}, \
-        /bin/systemctl stop postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}, \
-        /bin/systemctl restart postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}, \
-        /bin/systemctl reload postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}
-
-# SSH
-
-- name: ensure postgres account have a ssh keypair
-  user:
-    name: postgres
-    generate_ssh_key: true
-    ssh_key_type: ed25519
-    ssh_key_file: ~postgres/.ssh/id_ed25519
-
-- name: fetch postgres ssh public key
-  register: repmgr_postgres_ssh_pubkey
-  slurp:
-    path: ~postgres/.ssh/id_ed25519.pub
-
-- name: register postgres ssh public key as an ansible fact
-  set_fact:
-    pubkey: "{{ repmgr_postgres_ssh_pubkey['content'] | b64decode }}"
-
-- name: share postgres ssh public key between cluster members
-  loop: "{{ groups['postgres'] }}"
-  authorized_key:
-    user: postgres
-    key: "{{ hostvars[item]['pubkey'] }}"
-
-- name: postgres ssh client configuration
-  copy:
-    dest: ~postgres/.ssh/config
-    owner: postgres
-    group: postgres
-    content: |
-      IdentityFile	~/.ssh/id_ed25519
-      StrictHostKeyChecking	no
-      UserKnownHostsFile	/dev/null
-
-# REGISTER PRIMARY
-
-- name: setup primary
-  when: (db_role is defined and db_role == "primary") or (db_role is undefined and inventory_hostname == groups['postgres'][0])
-  block:
-
-    - name: check if primary already joined
-      become: true
-      become_user: postgres
-      register: repmgr_check_primary
-      postgresql_query:
-        db: repmgr
-        query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
-
-    - name: register primary
-      become: true
-      become_user: postgres
-      when: repmgr_check_primary.query_result | length == 0
-      notify: restart repmgrd
-      command:
-        cmd: repmgr --config-file={{ repmgr_config }} primary register
-
-- meta: flush_handlers
-
-# REGISTER STANDBY
-
-- name: setup standby
-  when: (db_role is defined and db_role == "standby") or (db_role is undefined and inventory_hostname == groups['postgres'][1])
-  block:
-
-    - name: check if standby already joined
-      become: true
-      become_user: postgres
-      register: repmgr_check_standby
-      postgresql_query:
-        db: repmgr
-        query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
-
-    - name: stop postgresql service
-      when: repmgr_check_standby.query_result | length == 0
-      systemd:
-        name: postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}
-        state: stopped
-
-    - name: remove existing pgdata
-      when: repmgr_check_standby.query_result | length == 0
-      command:
-        cmd: mv -vf {{ repmgr_pg_data }} {{ repmgr_pg_data }}.save
-        removes: "{{ repmgr_pg_data }}"
-
-    - name: clone from primary to standby
-      become: true
-      become_user: postgres
-      when: repmgr_check_standby.query_result | length == 0
-      ignore_errors: true
-      register: repmgr_clone_standby
-      shell:
-        cmd: |
-          repmgr \
-            --config-file={{ repmgr_config }} \
-            --force \
-            --dbname={{ repmgr_db }} \
-            --host={{ repmgr_primary_node }} \
-            --port=5432 \
-            --username={{ repmgr_user }} \
-            --pgdata={{ repmgr_pg_data }} \
-            standby clone --fast-checkpoint
-
-    - name: remove pgdata backup
-      when: repmgr_clone_standby is succeeded
-      file:
-        path: "{{ repmgr_pg_data }}.save"
-        state: absent
-
-    - name: remove failed clone pgdata
-      when: repmgr_clone_standby is failed
-      file:
-        path: "{{ repmgr_pg_data }}"
-        state: absent
-
-    - name: restore pgdata backup
-      when: repmgr_clone_standby is failed
-      command:
-        cmd: mv -vf {{ repmgr_pg_data }}.save {{ repmgr_pg_data }}
-        removes: "{{ repmgr_pg_data }}.save"
-
-    - name: start postgresql service
-      systemd:
-        name: postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}
-        state: started
-
-    - name: standby clone failed
-      when: repmgr_clone_standby is failed
-      fail:
-        msg: "{{ repmgr_clone_standby.stderr }}"
-
-    - name: register standby
-      become: true
-      become_user: postgres
-      when: repmgr_check_standby.query_result | length == 0
-      notify: restart repmgrd
-      command:
-        cmd: repmgr --config-file={{ repmgr_config }} standby register
-
-- meta: flush_handlers
-
-# REGISTER WITNESS
-
-- name: setup witness
-  when: (db_role is defined and db_role == "witness") or (db_role is undefined and inventory_hostname == groups['postgres'][2])
-  block:
-
-    - name: check if witness already joined
-      become: true
-      become_user: postgres
-      register: repmgr_check_witness
-      postgresql_query:
-        db: repmgr
-        query: SELECT 1 FROM pg_tables WHERE tablename='nodes'
-
-    - name: register witness
-      become: true
-      become_user: postgres
-      when: repmgr_check_witness.query_result | length == 0
-      notify: restart repmgrd
-      command:
-        cmd: repmgr --config-file={{ repmgr_config }} --host={{ repmgr_primary_node }} witness register
-
-- meta: flush_handlers
-
-# REPHACHECK
-
-- name: install rephacheck
-  template:
-    src: rephacheck.py.j2
-    dest: /usr/bin/rephacheck
-    mode: 0755
-
-- name: register variables needed by rephacheck as facts
-  set_fact:
-    repmgr_node_name: "{{ repmgr_node_name }}"
-    repmgr_node_id: "{{ repmgr_node_id }}"
-
-- name: configure rephacheck
-  template:
-    src: rephacheck.conf.j2
-    dest: /etc/postgresql/{{ repmgr_pg_version }}/{{ repmgr_pg_cluster }}/rephacheck.conf
-    owner: postgres
-    group: postgres
-    mode: 0644
-
-- name: configure rephacheck socket
-  notify:
-    - reload systemd
-    - restart rephacheck
-  copy:
-    dest: /etc/systemd/system/rephacheck.socket
-    content: |
-      [Unit]
-      Description=RepHACheck socket
-
-      [Socket]
-      ListenStream={{ repmgr_repha_port }}
-      Accept=yes
-
-      [Install]
-      WantedBy=sockets.target
-
-- name: configure rephacheck service
-  notify:
-    - reload systemd
-    - restart rephacheck
-  copy:
-    dest: /etc/systemd/system/rephacheck@.service
-    content: |
-      [Unit]
-      Description=RepHACheck - Health check for PostgreSQL cluster managed by repmgr
-
-      [Service]
-      ExecStart=-/usr/bin/rephacheck
-      StandardInput=socket
-      User=postgres
-      Group=postgres
-
-- name: enable and start rephacheck
-  service:
-    name: rephacheck.socket
-    state: started
-    enabled: true
-
-- name: firewall
-  when: pg_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ pg_ferm_rules_filename }}"
-    ferm_input_rules: "{{ pg_ferm_input_rules }}"
-    ferm_output_rules: "{{ pg_ferm_output_rules }}"
-    ferm_global_settings: "{{ pg_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-...
diff --git a/ansible/roles/postgres-ha/templates/rephacheck.conf.j2 b/ansible/roles/postgres-ha/templates/rephacheck.conf.j2
deleted file mode 100644
index afc1e6bd23492826d2698b6456b3913c7a87b010..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres-ha/templates/rephacheck.conf.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-local_node_id = "{{ repmgr_node_id }}"
-connect_timeout = "{{ repmgr_timeout }}"
-
-{% for node in play_hosts %}
-[nodes."{{ hostvars[node]['repmgr_node_name'] }}"]
-addr = "{{ hostvars[node]['ansible_default_ipv4']['address'] }}"
-port = 5432
-node_id = {{ hostvars[node]['repmgr_node_id'] }}
-
-{% endfor %}
-
-[conninfo]
-dbname = "{{ repmgr_db }}"
-user = "{{ repmgr_user }}"
-password = "{{ repmgr_password }}"
diff --git a/ansible/roles/postgres-ha/templates/rephacheck.py.j2 b/ansible/roles/postgres-ha/templates/rephacheck.py.j2
deleted file mode 100644
index 7ec6cfea94d9197c1b28ecbb5432c21fbb5ec706..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres-ha/templates/rephacheck.py.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python3
-
-"""
-Determine by voting which is the state of each node.
-For this to work properly, you need to have an odd number of nodes.
-"""
-
-from collections import Counter
-import psycopg2
-import toml
-
-
-def get_state(addr, port, node_id):
-    try:
-        # postgresql query
-        con_args = {'host': addr, 'port': port, 'connect_timeout': TIMEOUT}
-        con_args.update(CONNINFO)
-        con = psycopg2.connect(**con_args)
-        cur = con.cursor()
-        query = 'SELECT active, type FROM repmgr.nodes WHERE node_id = {};'
-        cur.execute(query.format(node_id))
-        data = cur.fetchone()
-        cur.close()
-        # return result
-        return data
-    except Exception:
-        # an error occured, so return false by default
-        return (False, 'unknown')
-
-
-def get_quorum_state(node_id):
-    # init vars
-    votes = []
-    # ask each node for the state of `node_id`
-    for node in NODES.values():
-        active, role = get_state(node['addr'], node['port'], node_id)
-        # if node considered active take vote, otherwise fence it
-        if active:
-            votes.append(role)
-        else:
-            votes.append('fenced')
-    # determines voting result
-    results = Counter(votes)
-    state = results.most_common(1)[0][0]
-    # catch inconsistent state case
-    if state == 'standby' and 'primary' in votes:
-        state = 'fenced'
-    # return result
-    return state
-
-
-if __name__ == '__main__':
-    with open('/etc/postgresql/{{ repmgr_pg_version }}/{{ repmgr_pg_cluster }}/rephacheck.conf') as rephaconf:
-        conf = toml.load(rephaconf)
-
-    NODES = conf.get('nodes')
-    CONNINFO = conf.get('conninfo')
-    TIMEOUT = conf.get('connect_timeout', 3)
-    CURRENT = conf.get('local_node_id')
-
-    state = get_quorum_state(CURRENT)
-    print(state)
diff --git a/ansible/roles/postgres-ha/templates/repmgr.conf.j2 b/ansible/roles/postgres-ha/templates/repmgr.conf.j2
deleted file mode 100644
index 62012de04746f230e1b5bcecde995cec9e53381d..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres-ha/templates/repmgr.conf.j2
+++ /dev/null
@@ -1,70 +0,0 @@
-# https://raw.githubusercontent.com/2ndQuadrant/repmgr/master/repmgr.conf.sample
-#------------------------------------------------------------------------------
-# Node configuration settigns
-#------------------------------------------------------------------------------
-# node information
-node_id={{ repmgr_node_id }}
-node_name={{ repmgr_node_name }}
-# database connection information
-conninfo='{{ repmgr_conninfo }}'
-# repmgr data directory
-data_directory='/var/lib/postgresql/{{ repmgr_pg_version }}/{{ repmgr_pg_cluster }}'
-# log level
-log_level='INFO'
-# log to file
-log_file='/var/log/postgresql/repmgrd.log'
-
-#------------------------------------------------------------------------------
-# Replication settings
-#------------------------------------------------------------------------------
-# physical replication slots (at least the number of standbys which will connect to the primary)
-use_replication_slots=1
-
-#------------------------------------------------------------------------------
-# Event notification settings
-#------------------------------------------------------------------------------
-# RM#33392
-# external program or script (should be executable by repmgr user)
-#event_notification_command='/usr/bin/repmgr-event %n %e %s "%t" "%d"'
-
-#------------------------------------------------------------------------------
-# Environment/command settings
-#------------------------------------------------------------------------------
-# path to PostgreSQL binary directory ((location of pg_ctl, pg_basebackup etc.) 
-pg_bindir='/usr/lib/postgresql/{{ repmgr_pg_version }}/bin/'
-
-#------------------------------------------------------------------------------
-# external command options
-#------------------------------------------------------------------------------
-# options to append to "pg_ctl"
-pg_ctl_options='-s'
-# options to append to "pg_basebackup"
-pg_basebackup_options='--label=repmgr_backup'
-
-#------------------------------------------------------------------------------
-# Failover and monitoring settings (repmgrd)
-#------------------------------------------------------------------------------
-# The max length of time (in seconds) to wait for the new primary to become available
-primary_follow_timeout=30
-# determines what action to take in the event of upstream failure
-#  'automatic': repmgrd will automatically attempt to promote the node or follow the new upstream node
-#  'manual': repmgrd will take no action and the node will require manual attention to reattach it to replication
-failover=automatic
-# Number of attempts which will be made to reconnect to an unreachable primary (or other upstream node)
-reconnect_attempts=6
-# Interval between attempts to reconnect to an unreachable primary (or other upstream node)
-reconnect_interval=5
-# command repmgrd executes when promoting a new primary
-promote_command='repmgr -f {{ repmgr_config }} standby promote'
-# command repmgrd executes when instructing a standby to follow a new primary
-follow_command='repmgr -f {{ repmgr_config }} standby follow -W'
-
-#------------------------------------------------------------------------------
-# service control commands
-#------------------------------------------------------------------------------
-# override the default pg_ctl commands used to stop, start, restart, reload and promote the PostgreSQL cluster
-service_start_command='sudo systemctl start postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}'
-service_stop_command='sudo systemctl stop postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}'
-service_restart_command='sudo systemctl restart postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}'
-service_reload_command='sudo systemctl reload postgresql@{{ repmgr_pg_version }}-{{ repmgr_pg_cluster }}'
-
diff --git a/ansible/roles/postgres/defaults/main.yml b/ansible/roles/postgres/defaults/main.yml
deleted file mode 100644
index 9bf750b55d2d9c85ae1930f3a0a04f08ba2a4f6b..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres/defaults/main.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-
-pg_packages:
-  - postgresql
-
-pg_version: 11
-pg_cluster: main
-
-pg_password: "{{ envsetup_db_pg_root_pwd | d() }}"
-
-pg_conf_dir: /etc/postgresql/{{ pg_version }}/{{ pg_cluster }}
-
-pg_conf:
-  - name: main
-    content:
-
-pg_hba:
-  - type: local
-    method: peer
-  - type: host
-    address: 127.0.0.1/32
-  - type: host
-    address: ::1/128
-  - type: local
-    database: replication
-    method: peer
-  - type: host
-    database: replication
-    address: 127.0.0.1/32
-  - type: host
-    database: replication
-    address: ::1/128
-
-pg_users: []
-
-pg_databases: []
-
-pg_firewall_enabled: true
-pg_ferm_rules_filename: postgres
-pg_ferm_input_rules:
-  - proto:
-      - tcp
-    dport:
-      - 5432
-pg_ferm_output_rules: []
-pg_ferm_global_settings:
-
-...
diff --git a/ansible/roles/postgres/handlers/main.yml b/ansible/roles/postgres/handlers/main.yml
deleted file mode 100644
index 2f1c67e4c548ddbe3b23e86c3b1673f62f700dc1..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart postgresql
-  systemd:
-    name: postgresql@{{ pg_version }}-{{ pg_cluster }}
-    state: restarted
-
-...
diff --git a/ansible/roles/postgres/meta/main.yml b/ansible/roles/postgres/meta/main.yml
deleted file mode 100644
index e45d692ae3567f856967cd6f66c91d13e2e94e4e..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres/meta/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-dependencies:
-  - role: base
-
-...
diff --git a/ansible/roles/postgres/tasks/main.yml b/ansible/roles/postgres/tasks/main.yml
deleted file mode 100644
index 80cbf592b69698c8d0846cf3a44cbe4e18a111dc..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres/tasks/main.yml
+++ /dev/null
@@ -1,129 +0,0 @@
----
-
-- name: ansible postgresql requirements install
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: python3-psycopg2
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: install packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ pg_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-# CONFIGURATION
-
-- name: ensure conf directory exists
-  file:
-    path: "{{ pg_conf_dir }}/conf.d"
-    owner: postgres
-    group: postgres
-    state: directory
-
-- name: ensure conf directory is included
-  replace:
-    path: "{{ pg_conf_dir }}/postgresql.conf"
-    backup: true
-    regexp: "^#?include_dir = '[A-Za-z\\.]+'(\\s+.*)$"
-    replace: "include_dir = 'conf.d'\\1"
-
-- name: change max connections value
-  replace:
-    path: "{{ pg_conf_dir }}/postgresql.conf"
-    backup: true
-    regexp: "^#?max_connections = [0-9]+"
-    replace: "max_connections = {{ pg_conf_max_connections }}"
-  when: pg_conf_max_connections is defined
-
-- name: configure custom settings
-  notify: restart postgresql
-  loop: "{{ pg_conf }}"
-  when: item.content | d(false)
-  copy:
-    dest: "{{ pg_conf_dir }}/conf.d/{{ item.name }}.conf"
-    owner: postgres
-    group: postgres
-    backup: true
-    content: "{{ item.content }}"
-
-- name: configure authentication
-  notify: restart postgresql
-  template:
-    src: pg_hba.conf.j2
-    dest: "{{ pg_conf_dir }}/pg_hba.conf"
-    owner: postgres
-    group: postgres
-    mode: 0640
-    backup: true
-
-- name: ensure service is enabled and running
-  systemd:
-    name: postgresql@{{ pg_version }}-{{ pg_cluster }}
-    enabled: true
-    state: started
-
-# USERS
-
-- name: set superuser password
-  become: true
-  become_user: postgres
-  no_log: true
-  postgresql_user:
-    name: postgres
-    password: "{{ pg_password | d(omit) }}"
-
-- name: manage users
-  become: true
-  become_user: postgres
-  no_log: true
-  loop: "{{ pg_users }}"
-  postgresql_user:
-    name: "{{ item.name }}"
-    password: "{{ item.password | d(omit) }}"
-    db: "{{ item.db | d(omit) }}"
-    priv: "{{ item.priv | d(omit) }}"
-    role_attr_flags: "{{ item.roles | d(omit) }}"
-
-- name: set .pgpass to allow passwordless connection
-  loop: "{{ query('nested', ['root', 'postgres'], pg_users) }}"
-  blockinfile:
-    path: "~{{ item.0 }}/.pgpass"
-    block: "*:*:*:{{ item.1.name }}:{{ item.1.password }}"
-    marker: "# {mark} {{ item.1.name }}"
-    create: true
-    owner: "{{ item.0 }}"
-    group: "{{ item.0 }}"
-    mode: 0600
-
-# DATABASES
-
-- name: create databases
-  become: true
-  become_user: postgres
-  loop: "{{ pg_databases }}"
-  postgresql_db:
-    name: "{{ item.name }}"
-    owner: "{{ item.owner | d(omit) }}"
-
-# FIREWALL
-
-- name: firewall
-  when: pg_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ pg_ferm_rules_filename }}"
-    ferm_input_rules: "{{ pg_ferm_input_rules }}"
-    ferm_output_rules: "{{ pg_ferm_output_rules }}"
-    ferm_global_settings: "{{ pg_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-- meta: flush_handlers
-
-...
diff --git a/ansible/roles/postgres/templates/pg_hba.conf.j2 b/ansible/roles/postgres/templates/pg_hba.conf.j2
deleted file mode 100644
index 4d5a5bc478978a21e390378ed47efa6b2f06034f..0000000000000000000000000000000000000000
--- a/ansible/roles/postgres/templates/pg_hba.conf.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-# {{ ansible_managed }}
-
-# PostgreSQL Client Authentication Configuration File
-# ===================================================
-
-{% for connection in pg_hba %}
-{% if connection.comment is defined %}
-# {{ connection.comment }}
-{% endif %}
-{{ connection.type }}	{{ connection.database | d('all') }}	{{ connection.user | d('all') }}	{{ connection.address | d() }}	{{ connection.method | d('md5') }}
-{% endfor %}
diff --git a/ansible/roles/proxy/defaults/main.yml b/ansible/roles/proxy/defaults/main.yml
deleted file mode 100644
index 50957cb1baa86268f3a3d12219299fb744350d97..0000000000000000000000000000000000000000
--- a/ansible/roles/proxy/defaults/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-
-proxy_http: "{{ envsetup_proxy_http | d() }}"
-proxy_https: "{{ envsetup_proxy_https | d() }}"
-proxy_exclude:
-  - "localhost"
-  - "127.0.0.1"
-  - "::1"
-  - "{{ envsetup_proxy_exclude | d() }}"
-  - "{{ envsetup_ms_server_name | d() }}"
-  - "{{ envsetup_monitor_server_name | d() }}"
-  - "{{ envsetup_cm_server_name | d() }}"
-
-...
diff --git a/ansible/roles/proxy/tasks/main.yml b/ansible/roles/proxy/tasks/main.yml
deleted file mode 100644
index e3030be3bce2de57da8bec1766f56c570d2fee8d..0000000000000000000000000000000000000000
--- a/ansible/roles/proxy/tasks/main.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-
-- name: if proxy settings are set
-  when:
-    - proxy_http | d(false)
-    - proxy_https | d(false)
-  block:
-
-    - name: environment
-      blockinfile:
-        path: /etc/environment
-        create: true
-        marker_begin: BEGIN PROXY
-        marker_end: END PROXY
-        block: |
-          http_proxy={{ proxy_http }}
-          HTTP_PROXY={{ proxy_http }}
-          https_proxy={{ proxy_https }}
-          HTTPS_PROXY={{ proxy_https }}
-          no_proxy={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
-          NO_PROXY={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
-
-    - name: apt
-      copy:
-        dest: /etc/apt/apt.conf.d/proxy
-        content: |
-          Acquire::http::Proxy "{{ proxy_http }}";
-          Acquire::https::Proxy "{{ proxy_https }}";
-
-    - name: wget
-      copy:
-        dest: /etc/wgetrc
-        content: |
-          use_proxy=yes
-          http_proxy={{ proxy_http }}
-          https_proxy={{ proxy_https }}
-          no_proxy={{ proxy_exclude | flatten | unique | reject('equalto', '') | join(',') }}
-
-    - name: install git
-      apt:
-        force_apt_get: true
-        install_recommends: false
-        name: git
-      register: apt_status
-      retries: 60
-      until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-    - name: git
-      loop:
-        - name: http.proxy
-          value: "{{ proxy_http }}"
-        - name: https.proxy
-          value: "{{ proxy_https }}"
-      git_config:
-        name: "{{ item.name }}"
-        scope: global
-        value: "{{ item.value }}"
-        state: present
-
-...
diff --git a/ansible/roles/sysconfig/defaults/main.yml b/ansible/roles/sysconfig/defaults/main.yml
deleted file mode 100644
index e6c0886bc7c00968071723cf86ac98e69ea541a8..0000000000000000000000000000000000000000
--- a/ansible/roles/sysconfig/defaults/main.yml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-
-repos_prefix: "{{ envsetup_apt_cache_url | d('http://', true) }}"
-repos_deb: deb.debian.org
-repos_deb_sec: security.debian.org
-repos_release: "{{ ansible_distribution_release }}"
-
-repos_skyreach_token: "{{ envsetup_skyreach_apt_token }}"
-repos_skyreach_host: "{{ envsetup_skyreach_host }}"
-
-
-sysconfig_packages:
-  - bash-completion
-  - curl
-  - git
-  - host
-  - htop
-  - ifupdown
-  - iotop
-  - iftop
-  - ipython3
-  - lm-sensors
-  - make
-  - net-tools
-  - netcat
-  - nfs-client
-  - openssh-server
-  - pciutils
-  - python3-psutil
-  - python3-openssl
-  - python3-requests
-  - python3-spf
-  - python3-packaging
-  - python3-lxml
-  - pwgen
-  - rsync
-  - smartmontools
-  - sudo
-  - unattended-upgrades
-  - vim
-  - man
-  - git-man
-
-sysconfig_firewall_enabled: true
-sysconfig_ferm_rules_filename: sysutils
-sysconfig_ferm_input_rules:
-  # munin
-  - proto:
-      - tcp
-    dport:
-      - 4949
-  # cockpit
-  - proto:
-      - tcp
-    dport:
-      - 9090
-sysconfig_ferm_output_rules: []
-sysconfig_ferm_global_settings:
-
-locale_packages:
-  - locales
-  - tzdata
-
-init_locale: "{{ envsetup_locale | d('C.UTF-8', true) }}"
-
-init_timezone: "{{ envsetup_timezone | d('Etc/UTC', true) }}"
-
-sysconfig_logs_packages:
-  - rsyslog
-
-ntp_servers: "{{ envsetup_ntp_server }}"
-
-...
diff --git a/ansible/roles/sysconfig/handlers/main.yml b/ansible/roles/sysconfig/handlers/main.yml
deleted file mode 100644
index ecd373946c21cab71eb3f2fe6daf2283136dcda4..0000000000000000000000000000000000000000
--- a/ansible/roles/sysconfig/handlers/main.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: update cache
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    update_cache: true
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: systemd daemon reload
-  systemd:
-    daemon_reload: true
-
-- name: update locale
-  command: locale-gen
-
-- name: restart cron
-  service:
-    name: cron
-    state: restarted
-
-- name: restart sshd
-  service:
-    name: sshd
-    state: restarted
-
-- name: restart unattended-upgrades
-  service:
-    name: unattended-upgrades
-    state: restarted
-
-- name: restart ntp
-  service:
-    name: ntp
-    state: restarted
-
-- name: update cache
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    update_cache: true
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-...
diff --git a/ansible/roles/sysconfig/tasks/locale.yml b/ansible/roles/sysconfig/tasks/locale.yml
deleted file mode 100644
index 9be5ec6367fac2913165b91a485c09297b935ed7..0000000000000000000000000000000000000000
--- a/ansible/roles/sysconfig/tasks/locale.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: install locale packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ locale_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: generate locale
-  locale_gen:
-    name: "{{ init_locale }}"
-
-- name: set locale
-  notify: update locale
-  copy:
-    dest: /etc/default/locale
-    content: |
-      LANG={{ init_locale }}
-      LANGUAGE={{ init_locale }}
-      LC_ALL={{ init_locale }}
-
-- name: set locale.gen
-  notify: update locale
-  lineinfile:
-    path: /etc/locale.gen
-    regexp: '^(?:# )?({{ init_locale }}.*)$'
-    backrefs: true
-    line: '\1'
-
-- name: set timezone
-  notify: restart cron
-  timezone:
-    name: "{{ init_timezone }}"
-
-...
diff --git a/ansible/roles/sysconfig/tasks/logs.yml b/ansible/roles/sysconfig/tasks/logs.yml
deleted file mode 100644
index 3b70e3c59175c7f45d6a2d109002856202dfe48f..0000000000000000000000000000000000000000
--- a/ansible/roles/sysconfig/tasks/logs.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: install logs packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ sysconfig_logs_packages }}"
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: start rsyslog
-  systemd:
-    name: rsyslog
-    enabled: true
-    state: started
-
-- name: ensure journald logs persistence is enabled
-  file:
-    path: /var/log/journal
-    state: directory
-
-...
diff --git a/ansible/roles/sysconfig/tasks/main.yml b/ansible/roles/sysconfig/tasks/main.yml
deleted file mode 100644
index 4dc3a3060035eb6a9ca742e2cce02629fde0f3fd..0000000000000000000000000000000000000000
--- a/ansible/roles/sysconfig/tasks/main.yml
+++ /dev/null
@@ -1,85 +0,0 @@
----
-- include: repos.yml
-
-- name: install system utilities
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: "{{ sysconfig_packages }}"
-    state: latest
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: remove conflicting packages
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name:
-      - exim4
-      - exim4-base
-      - exim4-config
-      - exim4-daemon-light
-    state: absent
-    purge: true
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: install unattended-upgrades
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: unattended-upgrades
-    state: latest
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: enable unattended upgrades
-  copy:
-    dest: /etc/apt/apt.conf.d/20auto-upgrades
-    content: |
-      APT::Periodic::Update-Package-Lists "1";
-      APT::Periodic::Unattended-Upgrade "1";
-
-- name: remove old kernel with unattended-upgrades
-  replace:
-    dest: /etc/apt/apt.conf.d/50unattended-upgrades
-    regexp: '^//Unattended-Upgrade::Remove-Unused-Kernel-Packages.*$'
-    replace: 'Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";'
-  notify: restart unattended-upgrades
-
-- name: enable root login via ssh with key
-  replace:
-    dest: /etc/ssh/sshd_config
-    regexp: '^#PermitRootLogin (yes|without-password|prohibit-password)'
-    replace: "PermitRootLogin without-password"
-  notify: restart sshd
-
-- name: remove disabled root login
-  replace:
-    dest: /root/.ssh/authorized_keys
-    regexp: "^no-port-forwarding,(.+) ssh-"
-    replace: "ssh-"
-  ignore_errors: true
-
-# FIREWALL
-
-- name: firewall
-  when: sysconfig_firewall_enabled
-  vars:
-    ferm_rules_filename: "{{ sysconfig_ferm_rules_filename }}"
-    ferm_input_rules: "{{ sysconfig_ferm_input_rules }}"
-    ferm_output_rules: "{{ sysconfig_ferm_output_rules }}"
-    ferm_global_settings: "{{ sysconfig_ferm_global_settings }}"
-  include_role:
-    name: ferm-configure
-
-- include: logs.yml
-
-- include: locale.yml
-
-- include: ntp.yml
-
-...
diff --git a/ansible/roles/sysconfig/tasks/ntp.yml b/ansible/roles/sysconfig/tasks/ntp.yml
deleted file mode 100644
index fc2dededb24c2224e9e76bd881557ca290d45940..0000000000000000000000000000000000000000
--- a/ansible/roles/sysconfig/tasks/ntp.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- name: create systemd-timesync service config directory
-  file:
-    path: /lib/systemd/system/systemd-timesyncd.service.d
-    state: directory
-    mode: 0755
-
-- name: ntp add condition to systemd-timesyncd service
-  notify: systemd daemon reload
-  copy:
-    dest: /lib/systemd/system/systemd-timesyncd.service.d/disable-with-time-daemon.conf
-    content: |
-      [Unit]
-      # don't run timesyncd if we have another NTP daemon installed
-      ConditionFileIsExecutable=!/usr/sbin/ntpd
-      ConditionFileIsExecutable=!/usr/sbin/openntpd
-      ConditionFileIsExecutable=!/usr/sbin/chronyd
-      ConditionFileIsExecutable=!/usr/sbin/VBoxService
-
-- name: ntp disable systemd-timesyncd service
-  notify: restart ntp
-  systemd:
-    name: systemd-timesyncd
-    enabled: false
-    state: stopped
-
-- name: ntp install
-  apt:
-    force_apt_get: true
-    install_recommends: false
-    name: ntp
-    state: present
-  register: apt_status
-  retries: 60
-  until: apt_status is success or ('Failed to lock apt for exclusive operation' not in apt_status.msg and '/var/lib/dpkg/lock' not in apt_status.msg)
-
-- name: ntp config
-  notify: restart ntp
-  template:
-    backup: true
-    src: ntp.conf.j2
-    dest: /etc/ntp.conf
-
-- name: ensure ntp is running
-  service:
-    name: ntp
-    enabled: true
-    state: started
-
-...
diff --git a/ansible/roles/sysconfig/tasks/repos.yml b/ansible/roles/sysconfig/tasks/repos.yml
deleted file mode 100644
index cf5f85215319e0d328afa6be74c47316dd49bd5d..0000000000000000000000000000000000000000
--- a/ansible/roles/sysconfig/tasks/repos.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-
-- name: ubuntu apt repo sources list
-  when:
-    - not offline_mode | d(false)
-    - ansible_distribution == 'Ubuntu'
-  notify: update cache
-  copy:
-    dest: /etc/apt/sources.list
-    content: |
-      deb {{ repos_prefix }}archive.ubuntu.com/ubuntu/ {{ repos_release }} main restricted universe multiverse
-      deb {{ repos_prefix }}archive.ubuntu.com/ubuntu/ {{ repos_release }}-updates main restricted universe multiverse
-      deb {{ repos_prefix }}archive.ubuntu.com/ubuntu/ {{ repos_release }}-backports main restricted universe multiverse
-      deb {{ repos_prefix }}security.ubuntu.com/ubuntu {{ repos_release }}-security main restricted universe multiverse
-
-
-- name: debian apt repo sources list
-  when:
-    - not offline_mode | d(false)
-    - ansible_distribution == 'Debian'
-  notify: update cache
-  copy:
-    dest: /etc/apt/sources.list
-    content: |
-      deb {{ repos_prefix }}{{ repos_deb }}/debian {{ repos_release }} main contrib non-free
-      deb {{ repos_prefix }}{{ repos_deb }}/debian {{ repos_release }}-updates main contrib non-free
-      deb {{ repos_prefix }}{{ repos_deb_sec }}/debian-security {{ repos_release }}/updates main contrib non-free
-
-- name: add skyreach apt repo key
-  when:
-    - not offline_mode | d(false)
-    - repos_skyreach_token | d(false)
-  apt_key:
-    url: https://{{ repos_skyreach_host }}/media/public.gpg
-
-- name: add skyreach apt repo
-  when:
-    - not offline_mode | d(false)
-    - repos_skyreach_token | d(false)
-  apt_repository:
-    repo: deb https://{{ repos_skyreach_host }} packaging/apt/{{ repos_skyreach_token }}/
-    filename: skyreach
-    update_cache: true
-
-...
diff --git a/ansible/roles/sysconfig/templates/ntp.conf.j2 b/ansible/roles/sysconfig/templates/ntp.conf.j2
deleted file mode 100644
index 0aa791d9722b0251a75570065a605a315f3b072d..0000000000000000000000000000000000000000
--- a/ansible/roles/sysconfig/templates/ntp.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
-
-driftfile /var/lib/ntp/ntp.drift
-
-# Leap seconds definition provided by tzdata
-leapfile /usr/share/zoneinfo/leap-seconds.list
-
-# Specify one or more NTP servers.
-{% if ntp_servers | type_debug == "AnsibleUnsafeText" %}
-pool {{ ntp_servers }} iburst
-{% else %}
-{% for server in ntp_servers %}
-pool {{ server }} iburst
-{% endfor %}
-{% endif %}
-
-# By default, exchange time with everybody, but don't allow configuration.
-restrict -4 default kod notrap nomodify nopeer noquery limited
-restrict -6 default kod notrap nomodify nopeer noquery limited
-
-# Local users may interrogate the ntp server more closely.
-restrict 127.0.0.1
-restrict ::1
-
-# Needed for adding pool entries
-restrict source notrap nomodify noquery
diff --git a/ansible/roles/users/defaults/main.yml b/ansible/roles/users/defaults/main.yml
deleted file mode 100644
index 73b625baa2745dbff4b85990eec7e0546dee9b47..0000000000000000000000000000000000000000
--- a/ansible/roles/users/defaults/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-
-users:
-  - name: ubicast
-    passwd: "{{ envsetup_shell_ubicast_pwd | password_hash('sha512', 'envsetup') }}"
-  - name: admin
-    passwd: "{{ envsetup_shell_admin_pwd | password_hash('sha512', 'envsetup') }}"
-
-users_ssh_authorized_keys:
-  - "{{ lookup('file', 'files/ubicast_support.pub') }}"
-  - "{{ envsetup_ssh_allowed_keys }}"
-
-users_root_change: true
-
-...
diff --git a/ansible/roles/users/files/.bashrc b/ansible/roles/users/files/.bashrc
deleted file mode 100644
index 62b0d5a5f936b04ae8b072bdf3a329c2eb2835f8..0000000000000000000000000000000000000000
--- a/ansible/roles/users/files/.bashrc
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/bin/bash
-# ~/.bashrc: executed by bash(1) for non-login shells.
-# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
-# for examples
-
-# If not running interactively, don't do anything
-[ -z "$PS1" ] && return
-
-# don't put duplicate lines in the history. See bash(1) for more options
-# ... or force ignoredups and ignorespace
-HISTCONTROL=ignoredups:ignorespace
-
-# append to the history file, don't overwrite it
-shopt -s histappend
-
-# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
-HISTSIZE=100000
-HISTFILESIZE=100000
-HISTTIMEFORMAT='%F %T '
-
-# check the window size after each command and, if necessary,
-# update the values of LINES and COLUMNS.
-shopt -s checkwinsize
-
-# make less more friendly for non-text input files, see lesspipe(1)
-[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
-
-# set variable identifying the chroot you work in (used in the prompt below)
-if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
-	debian_chroot=$(cat /etc/debian_chroot)
-fi
-
-# set a fancy prompt (non-color, unless we know we "want" color)
-case "$TERM" in
-	xterm-color) color_prompt=yes;;
-esac
-
-# uncomment for a colored prompt, if the terminal has the capability; turned
-# off by default to not distract the user: the focus in a terminal window
-# should be on the output of commands, not on the prompt
-force_color_prompt=yes
-
-if [ -n "$force_color_prompt" ]; then
-	if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
-	# We have color support; assume it's compliant with Ecma-48
-	# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
-	# a case would tend to support setf rather than setaf.)
-	color_prompt=yes
-	else
-	color_prompt=
-	fi
-fi
-
-if [ "$color_prompt" = yes ]; then
-	PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
-else
-	PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
-fi
-unset color_prompt force_color_prompt
-
-# If this is an xterm set the title to user@host:dir
-case "$TERM" in
-xterm*|rxvt*)
-	PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
-	;;
-*)
-	;;
-esac
-
-# enable color support of ls and also add handy aliases
-if [ -x /usr/bin/dircolors ]; then
-	test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
-	alias ls='ls --color=auto'
-	#alias dir='dir --color=auto'
-	#alias vdir='vdir --color=auto'
-
-	alias grep='grep --color=auto'
-	alias fgrep='fgrep --color=auto'
-	alias egrep='egrep --color=auto'
-	alias rgrep='rgrep --color=auto'
-fi
-
-# some more ls aliases
-alias ll='ls -alF'
-alias lh='ls -alFh'
-alias la='ls -A'
-alias l='ls -CF'
-
-# Alias definitions.
-# You may want to put all your additions into a separate file like
-# ~/.bash_aliases, instead of adding them here directly.
-# See /usr/share/doc/bash-doc/examples in the bash-doc package.
-
-if [ -f ~/.bash_aliases ]; then
-	. ~/.bash_aliases
-fi
-
-# enable programmable completion features (you don't need to enable
-# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
-# sources /etc/bash.bashrc).
-if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
-	. /etc/bash_completion
-fi
-
-# system
-alias lskernels='dpkg --get-selections | grep linux'
-alias lspackages='dpkg --get-selections'
-alias swapclear='sudo swapoff -a && sudo swapon -a'
-alias full-upgrade='sudo apt-get update && sudo apt-get dist-upgrade -y'
-alias aptud='sudo apt-get update'
-alias aptug='sudo apt-get upgrade'
-
-# files
-alias rmempty='find . -type d -empty -delete'
-alias rmpyc='find . -name *.pyc -type f -delete && find . -name __pycache__ -type d -delete'
diff --git a/ansible/roles/users/files/.vimrc b/ansible/roles/users/files/.vimrc
deleted file mode 100644
index 624f4d2ece4e643966fa9bf68f80476d602dd2c3..0000000000000000000000000000000000000000
--- a/ansible/roles/users/files/.vimrc
+++ /dev/null
@@ -1,3 +0,0 @@
-syntax on
-color ron
-set mouse-=a
diff --git a/ansible/roles/users/files/ubicast_support.pub b/ansible/roles/users/files/ubicast_support.pub
deleted file mode 100644
index 523cb03655396114090b55dc5446c32fdc99ab92..0000000000000000000000000000000000000000
--- a/ansible/roles/users/files/ubicast_support.pub
+++ /dev/null
@@ -1 +0,0 @@
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCr2IJlzvLlLxa2PyGhydAlz/PAOj240g8anQmY58X+llirLHIOlkdJXBqf94jAeZkweWpoE41RdmKPUQEz4pCO09dGJaZD4lv1NtDhrhNwTmoOnyFckoPimR6DX6+UMM9wUmfti/ytljbVEVVo/pRacXmczeumDaci3uYTURyliuAR9h3zbIMQ6D2COESXjptWmEwawE9grsTfJi84Q+XIBPvXRHjjceB5hejUMWuf7xc6GH9WIo5REh3qTUvgtxHtIGLQ3ImOzrbCsEhENrBWds0qH0pIuH0lykWGR6pumpPxLzXcVho+e/UJgUrEg5u6/58aizqJTkxFJMa8ciYz support@ubicast
diff --git a/ansible/roles/users/handlers/main.yml b/ansible/roles/users/handlers/main.yml
deleted file mode 100644
index fa217d1484825522405fd5d3b4f2182bb227be19..0000000000000000000000000000000000000000
--- a/ansible/roles/users/handlers/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: restart sshd
-  service:
-    name: sshd
-    state: restarted
-
-...
diff --git a/ansible/roles/users/tasks/main.yml b/ansible/roles/users/tasks/main.yml
deleted file mode 100644
index be416d4075ec47e28b00cd59351ad97d3ec70ce7..0000000000000000000000000000000000000000
--- a/ansible/roles/users/tasks/main.yml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-
-- name: create users groups
-  loop: "{{ users }}"
-  group:
-    name: "{{ item.name }}"
-    state: present
-
-- name: create users
-  loop: "{{ users }}"
-  user:
-    name: "{{ item.name }}"
-    group: "{{ item.name }}"
-    shell: /bin/bash
-    generate_ssh_key: true
-    ssh_key_type: ed25519
-    ssh_key_file: .ssh/id_ed25519
-    append: true
-    groups:
-      - sudo
-    state: present
-
-- name: set users passwords
-  loop: "{{ users }}"
-  user:
-    name: "{{ item.name }}"
-    password: "{{ item.passwd }}"
-    update_password: always
-
-- name: copy .bashrc
-  loop: "{{ users }}"
-  copy:
-    src: .bashrc
-    dest: ~{{ item.name }}/.bashrc
-
-- name: copy .vimrc
-  loop: "{{ users }}"
-  copy:
-    src: .vimrc
-    dest: ~{{ item.name }}/.vimrc
-
-- name: copy .bashrc for root
-  when: users_root_change
-  copy:
-    src: .bashrc
-    dest: ~root/.bashrc
-
-- name: copy .vimrc for root
-  when: users_root_change
-  copy:
-    src: .vimrc
-    dest: ~root/.vimrc
-
-- name: set users allowed ssh keys
-  loop: "{{ users | product(users_ssh_authorized_keys) | list }}"
-  authorized_key:
-    user: "{{ item[0].name }}"
-    key: "{{ item[1] }}"
-
-- name: set root allowed ssh keys
-  loop: "{{ users_ssh_authorized_keys }}"
-  authorized_key:
-    user: root
-    key: "{{ item }}"
-
-- name: sudoers without password
-  copy:
-    dest: /etc/sudoers.d/nopasswd
-    validate: visudo -cf %s
-    content: |
-      %sudo ALL=(ALL) NOPASSWD: ALL
-
-...
diff --git a/doc/bench.md b/doc/bench.md
deleted file mode 100644
index 79f9dff0f2f5e2aa2439894a3f99adeffba4846c..0000000000000000000000000000000000000000
--- a/doc/bench.md
+++ /dev/null
@@ -1,145 +0,0 @@
-# Benchmarking
-
-This documentation explains how to benchmark a MediaServer to get the maximum viewers that can be handled.
-The benchmarking tool is based on Locust.
-
-
-## Prerequisite
-
-- Envsetup and ansible [Help to get envsetup](/doc/install.md).
-- An SSH access to the machines that will be used to make requests on MediaServer (you can use the workers for that).
-- A video on demand or a live stream to test. To create a live stream for the test, please read the section `Prepare a live page` of this documentation.
-
-Note that the machines used by the benchmarker must have an access to UbiCast packages (to get the `ubicast-benchmark` package).
-
-
-## Hardware requirements
-
-To be able to test a server, the benchmarking tool must use systems with enough CPU, RAM and bandwidth.
-
-You can calculate the required CPU and RAM by extrapolating the following data:
-
-* A 20 threads CPU @ 3.0GHz is required for the benchmark worker to test a hatch rate up to 50.
-* A 20 threads CPU @ 3.0GHz is required for the benchmark worker to test up to 3000 viewers.
-* 6 GB of RAM is required for the benchmark worker to test up to 3000 viewers.
-* A maximum of 5000 viewers can be tested on a single for the benchmark worker system (due to ephemeral ports limit).
-* The benchmark server will use approximately 25% of the CPU allocated to workers (for example, if the benchmark worker has 4 CPU, the benchmark server must have 1 CPU at the same speed).
-
-
-## Prepare a live page
-
-If you want to benchmark the video on demand playback, ignore this section.
-
-This section explains how to get a live page to get an `oid` for the bench.
-This live page can be used for the bench even if no video stream is sent to the server when the `bench_dl_streams` is set to `false`.
-
-Steps:
-
-* Login in MediaServer
-* Click on the `Add content` button
-* Click on `advanced`
-* Click on `Add a live stream`
-* Click on `For UbiCast or other recorders`
-* Set a title, for example `live bench`
-* Click on `Add live stream`
-* Click on `Edit`
-* Check `published`
-* Set `live status` to `ongoing`
-* Click on `Save changes`
-* Open the `Permissions` tab
-* Set the access to `yes` for `Non authenticated users`
-* Click on `Save changes`
-* Open the `Resources` tab
-* Click on `Get encoder settings`
-* Use the `oid` value in the page url (for example `l125f58fbb8c655nth76`) in your `bench_oid` setting.
-
-
-## Inventory
-
-Create a new inventory with following configuration.
-
-
-### Hosts file
-
-This file should be located in `<your inventory>/hosts`.
-
-```
-[bench_server]
-worker1.test.com
-
-[bench_worker]
-worker2.test.com
-```
-
-The system targetted with `bench_server` will host the Locust server. This system must be unique.
-
-The systems targetted with `bench_worker` will host the Locust workers. Multiple workers can be used.
-
-
-### Group vars
-
-This file should be located in `<your inventory>/group_vars/all.yml`.
-
-```
-bench_server: <The benchmark server IP or host name>
-bench_host: <MediaServer URL. Example: "msauto.ubicast.net">
-bench_oid: <Media OID. Example: "l125f58fbb8c655nth76">
-bench_user: <MediaServer account username. Can be empty if the media access is not protected. Example: "test">
-bench_password: <MediaServer account password. Can be empty if the media access is not protected. Example: "pwd">
-bench_host_api_key: <MediaServer master API key>
-bench_dl_streams: <Download video streams or not ("true" or "false", "false" is default)>
-```
-
-
-## Install the benchmarker
-
-```
-# ansible-playbook -i inventories/<your inventory> playbooks/bench.yml
-```
-This playbook will install everything you need: benchmark server and benchmark workers.
-
-
-## Start the benchmarker
-
-Disable anti-ddos in Nginx configuration if any (usually in `/etc/nginx/conf.d/limits.conf`).
-
-If the server hosting the bench server is using a firewall, disable it to be able to access the locust interface. For example, if `ferm` is installed: `systemctl ferm stop`.
-
-If you want to test with a video stream, you can start it with using the docker container:
-`cd /usr/share/ms-testing-suite && make run_live`
-
-Or directly by hand:
-`/usr/share/ms-testing-suite/ms_live_streamer.py /etc/mediaserver/bench-streaming.conf`
-
-Go with your browser on `http://<bench_server>:8089`. You might have to use an SSH tunnel to access this port.
-
-Set the number of viewers and the hatch rate (usually the hatch rate is 0.25% of the viewers count to reach 30% of the total count in 2 minutes) and start the bench.
-
-Watch for `/var/log/error.log` and `/var/log/access.log` (warning: this file is buffered by default, you can disable buffer in `nginx.conf`).
-
-
-## Restart Locust server & workers
-
-If you need to restart both server & workers, you can launch the playbook with the tag `prepare-bench`:
-```
-# ansible-playbook -i inventories/<your inventory> playbooks/bench.yml -t prepare-bench
-```
-
-
-## /!\ EXPERIMENTAL : Elasticsearch + Kibana + metricbeat
-
-Kibana + metricbeat allows you to monitor and display statistics about the infrastructure. This feature is experimental.
-
-First you need to add a new group to your hosts files:
-```
-[elastic]
-elastic1.test.com
-```
-
-Then launch the playbook `bench-monitoring` which will install both the elastic suite on the elastic host, and metricbeat on mediaserver & postgres servers.
-
-```
-# ansible-playbook -i inventories/<your inventory> playbooks/bench-monitoring.yml
-```
-
-Your kibana instance will be accessible at `http://elastic1.test.com:5601`.
diff --git a/doc/config.md b/doc/config.md
deleted file mode 100644
index 16d8af3703c170413c172135b5f05973e0933ac0..0000000000000000000000000000000000000000
--- a/doc/config.md
+++ /dev/null
@@ -1,82 +0,0 @@
-# Configuration
-
-## SSH
-
-The Ansible deployment will be done through SSH, so you must be able to connect to all the involved hosts by using SSH public key authentication.
-
-To create a key pair:
-
-```sh
-ssh-keygen -t ed25519
-```
-
-Copy the public key to the root account of all involved hosts:
-
-```
-ssh-copy-id -i ~/.ssh/id_ed25519.pub root@<SERVER-X>
-```
-
-You can also add the content of `~/.ssh/id_ed25519.pub` to `~/.ssh/authorized_keys` of the `root` account of destination hosts.
-
-## Inventory
-
-Move to ansible directory
-
-```
-cd ./ansible
-```
-
-Make a copy of the `example` inventory and eventually customize it with the customer informations.
-
-```sh
-# for standard deployment
-cp -r inventories/example inventories/customer
-
-# for HA deployement
-cp -r inventories/example-ha inventories/customer
-```
-
-There are also inventories for local deployment, you can use one of these lines:
-```sh
-cp -r inventories/local-full        inventories/customer
-cp -r inventories/local-mediaserver inventories/customer
-cp -r inventories/local-mediaworker inventories/customer
-```
-
-### Hosts and Groups
-
-Edit `inventories/customer/hosts` to match your inrastructure.
-
-### Variables
-
-If you use a local-\* inventory, copy `inventories/customer/host_vars/localhost.dist.yml` to `inventories/customer/host_vars/localhost.yml`.
-
-You **must at least** configure:
-- `skyreach_system_key` values in `inventories/customer/host_vars/<host>.yml`
-
-If you want to set/override a variable for:
-- all: `inventories/my-customer/group_vars/all.yml`.
-- a group:`inventories/my-customer/group_vars/<group>.yml`.
-- a host: `inventories/my-customer/host_vars/<host>.yml`.
-
-If hosts have a proxy you have to set the proxy settings in the inventory variables, in `inventories/mcustomer/group_vars/all.yml`:
-
-```yaml
-proxy_http: http://proxy.my-customer.net:3128
-proxy_https: http://proxy.my-customer.net:3128
-```
-
-### Verify
-
-Make sure Ansible can connect to all the hosts:
-
-```sh
-ansible -i inventories/customer -m ping all
-
-mymediaserver | SUCCESS => {
-    "changed": false,
-    "ping": "pong"
-}
-[...]
-```
-
diff --git a/doc/contrib.md b/doc/contrib.md
deleted file mode 100644
index 2a788bfb50d709a7e0f7790740cfcc839d0921d9..0000000000000000000000000000000000000000
--- a/doc/contrib.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Contributing guide
-
-## External software
-
-Please read the tools documentations and the associated best practices.
-
-- [Ansible documentation](https://docs.ansible.com/ansible/latest/)
-- [Molecule documentation](https://molecule.readthedocs.io/en/latest/)
-- [TestInfra documentation](https://testinfra.readthedocs.io/en/latest/)
-
-## Developpement environment
-
-Install all [required tools](requirements.md) and then execute:
-
-```sh
-cd /root/envsetup
-make requirements-dev
-```
-
-Then install [docker](https://docs.docker.com/engine/install/debian/) (it is used to deploy components in container).
-
-## Test
-
-To check that your "code" is compliant:
-
-```sh
-make lint
-```
-
-To run Ansible tests:
-
-```sh
-# run default test
-make test
-
-# show debug logs
-DEBUG=1 make test
-
-# do not destroy tests containers
-KEEP=1 make test
-```
-
-If you add/modify a role, please write relevants tests in `molecule/default/tests`.
-
diff --git a/doc/deploy.md b/doc/deploy.md
deleted file mode 100644
index 2b896a8bfcdbf3f7941a61b4335fcde6145ef02f..0000000000000000000000000000000000000000
--- a/doc/deploy.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# Deployment
-
-Move to envsetup root directory
-
-```sh
-cd /root/envsetup
-```
-
-To deploy all components, execute:
-
-```sh
-make deploy i=ansible/inventories/customer
-```
-
-If you want to limit and deploy specific part, you can add a `tag`:
-
-```sh
-make deploy i=ansible/inventories/customer l=<tag>
-```
-
-The avalaible tags are:
-
-| Component     | Tag            |
-|---------------|----------------|
-| mediaserver   | `server`       |
-| mediaworker   | `worker`       |
-| mirismanager  | `manager`      |
-| mediaimport   | `import`       |
-| mediavault    | `vault`        |
-| celerity      | `celerity`     |
-| ...           | ...            |
-
-
-To view all tags avalaible, run: 
-```
-grep 'tags:' ./playbooks/site.yml | grep -v always | sed 's,.*tags: ,,'
-```
-
diff --git a/doc/requirements.md b/doc/requirements.md
deleted file mode 100644
index 6679deee8d40862d85d4c1153eca8773781fbbdf..0000000000000000000000000000000000000000
--- a/doc/requirements.md
+++ /dev/null
@@ -1,99 +0,0 @@
-# Prepare deployment environment
-
-This installation has only been tested on Linux. But it should (with some adjustements) work for MacOS or Windows WSL.  
-There are 2 installations possibilities : 
-- setup tools
-- docker image
-
-## Setup tools
-
-This installation is detailled for a Debian server. All the commands below are executed with **root rights**.
-
-### Install tools
-
-```
-apt update 
-apt upgrade -y
-apt install -y vim git make gcc python3-dev
-```
-
-### Repository
-
-Clone this repository on your computer:
-
-```sh
-cd /root
-git clone https://git.ubicast.net/mediaserver/envsetup.git
-cd envsetup/
-```
-
-### Python and ansible
-
-
-To automatically create a temporary virtualenv: 
-```
-make venv
-make install
-make install-galaxy
-```
-
-If you want a permanent venv, create manually a virtual environment with [Python's venv](https://docs.python.org/3/library/venv.html) or with the package [virtualenv](https://virtualenv.pypa.io/en/stable/). 
-
-```sh
-# create the venv
-apt-get install -y python3-venv
-python3 -m venv .venv
-
-# activate the venv
-source .venv/bin/activate
-
-# install ansible requirements
-python3 -m pip install -U pip wheel
-python3 -m pip install -r ansible/requirements.txt
-
-# install galaxy requirements
-ansible-galaxy install -r ansible/requirements.yml
-
-```
-
-## Docker
-
-If you do not want to bother with tools installation, you can use [Docker](https://docs.docker.com/install/).
-
-```sh
-docker run \
-  `# run an interactive pseudo-TTY` \
-  -it \
-  `# remove the container once you leave it` \
-  --rm \
-  `# share the current directory` \
-  -v $(pwd):/workspace \
-  `# share your SSH configuration` \
-  -v $HOME/.ssh:/home/code/.ssh:ro \
-  `# share your SSH agent` \
-  -v $SSH_AUTH_SOCK:/ssh-agent:ro \
-  `# let container know where is mapped the SSH agent` \
-  -e SSH_AUTH_SOCK=/ssh-agent \
-  `# container image to use` \
-  registry.ubicast.net/mediaserver/envsetup \
-  `# executable to run` \
-  bash
-```
-
-Make sur to share your SSH configuration with the Docker container, this may require to adapt the example command.
-
-## Testing
-
-To make sure Ansible is properly installed, run this command:
-
-```sh
-# verfiy ansible version
-ansible --version
-
-ansible 2.9.18
-  config file = None
-  configured module search path = ['/home/ubicast/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
-  ansible python module location = /home/ubicast/.local/lib/python3.7/site-packages/ansible
-  executable location = /home/ubicast/.local/bin/ansible
-  python version = 3.7.3 (default, Jan 22 2021, 20:04:44) [GCC 8.3.0]
-```
diff --git a/global-conf.sh b/global-conf.sh
deleted file mode 100644
index f2e56c3f4938a6c41a0b19a75c74faa822190544..0000000000000000000000000000000000000000
--- a/global-conf.sh
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/bin/bash
-# DO NOT EDIT THIS FILE!
-# Put your local configuration in conf.sh
-
-# Default configuration values
-# ----------------------------
-ENVSETUP_BRANCH='stable'
-
-# -- System --
-# Skyreach in which system looks for packages
-SKYREACH_HOST='mirismanager.ubicast.eu'
-SKYREACH_SSL_VERIFY='1'
-# âš  SKYREACH_API_KEY is used in ubicast-skyreach package at first installation
-SKYREACH_API_KEY=''
-SKYREACH_APT_TOKEN=''
-SKYREACH_ACTIVATION_KEY=''
-# NTP
-NTP_SERVER='0.pool.ntp.org,1.pool.ntp.org,2.pool.ntp.org,3.pool.ntp.org'
-# SSH
-SSH_ALLOWED_KEYS=''
-# âš  SSH_MAINTENANCE_PORT is used in ubicast-monitor package postinst
-SSH_MAINTENANCE_PORT=''
-# SSL certificate
-SSL_CERTIFICATE='/etc/ssl/certs/ssl-cert-snakeoil.pem'
-SSL_CERTIFICATE_KEY='/etc/ssl/private/ssl-cert-snakeoil.key'
-# APT sources
-APT_CACHE_URL=''
-# Locale & TimeZone
-LOCALE='en_US.UTF-8'
-TIMEZONE='Etc/UTC'
-
-# -- Shell --
-# ubicast shell account
-SHELL_UBICAST_PWD=''
-# customer shell account
-SHELL_ADMIN_PWD=''
-
-# -- Emails --
-EMAIL_SMTP_SERVER=''
-EMAIL_SMTP_USER=''
-EMAIL_SMTP_PWD=''
-EMAIL_SENDER=''
-# separate emails with comas in EMAIL_ADMINS
-EMAIL_ADMINS=''
-PREMIUM_SUPPORT='0'
-
-# -- Wowza --
-WOWZA_LIVE_PWD='test'
-WOWZA_MANAGER_PWD='test'
-WOWZA_LICENSE=''
-
-# -- MediaServer --
-# âš  MS_SERVER_NAME is used in ubicast-mediaserver package when adding an instance
-MS_SERVER_NAME='mediaserver'
-# âš  MS_ID is used in ubicast-mediaserver package when adding an instance
-MS_ID='fX_msuser'
-# âš  MS_API_KEY is used in ubicast-mediaserver package when adding an instance
-# API key looks like: s00pN-aRseu-dnfMq-678CV-9nS9E
-MS_API_KEY=
-# âš  MS_SECRET is used in ubicast-mediaserver package when adding an instance
-MS_SECRET='secret'
-# âš  MS_SUPERUSER_PWD is used in ubicast-mediaserver package when adding an instance
-MS_SUPERUSER_PWD='test'
-# âš  MS_ADMIN_PWD is used in ubicast-mediaserver package when adding an instance
-MS_ADMIN_PWD='test'
-
-# -- Monitor --
-# âš  MONITOR_SERVER_NAME is used in ubicast-monitor package postinst
-MONITOR_SERVER_NAME='monitor'
-# âš  MONITOR_SUPERUSER_PWD is used in ubicast-monitor package postinst
-MONITOR_SUPERUSER_PWD=''
-# âš  MONITOR_ADMIN_PWD is used in ubicast-monitor package postinst
-MONITOR_ADMIN_PWD=''
-# âš  MONITOR_SHELL_PWD is used in ubicast-monitor package postinst
-MONITOR_SHELL_PWD=''
-
-# -- Miris Manager --
-# âš  CM_SERVER_NAME is used in ubicast-skyreach package postinst and in ubicast-mediaserver package when adding an instance
-CM_SERVER_NAME='mirismanager'
-# âš  CM_SUPERUSER_PWD is used in ubicast-skyreach package at first installation
-CM_SUPERUSER_PWD='test'
-# âš  CM_ADMIN_PWD is used in ubicast-skyreach package at first installation
-CM_ADMIN_PWD='test'
-
-# -- Cache server --
-CACHE_SERVER_NAME=''
-CACHE_SOURCE=''
-
-# -- Database --
-# âš  DB_HOST is used in ubicast-mediaserver and ubicast-skyreach package postinst
-DB_HOST='127.0.0.1'
-# âš  DB_PORT is used in ubicast-mediaserver and ubicast-skyreach package postinst
-DB_PORT='5432'
-# âš  DB_PG_ROOT_PWD is used in ubicast-mediaserver and ubicast-skyreach package postinst
-# if no password is set, it will not be changed or set
-DB_PG_ROOT_PWD=''
-
-# -- Celerity --
-# âš  CELERITY_SIGNING_KEY is used in ubicast-mediaserver package when adding an instance
-CELERITY_SIGNING_KEY='test'
-# âš  CELERITY_SERVER is used in ubicast-mediaserver package when adding an instance
-CELERITY_SERVER='127.0.0.1'
-
-# -- Network configuration --
-# applied with client configuration step
-NETWORK_IP=''
-NETWORK_DNS=''
-NETWORK_MASK=''
-NETWORK_GATEWAY=''
-# define this if IP is NATed
-NETWORK_IP_NAT=''
-
-# -- Proxy configuration --
-PROXY_HTTP=''
-PROXY_HTTPS=''
-# PROXY_EXCLUDE is used in no_proxy env (a comma separated list of domains)
-PROXY_EXCLUDE=''
-
-# -- Fail2ban specific settings --
-FAIL2BAN_ENABLED='1'
-FAIL2BAN_SEND_EMAIL='0'
-FAIL2BAN_DEST_EMAIL=''
-FAIL2BAN_MAXRETRY='6'
-FAIL2BAN_BANTIME='30'
-
-# -- tmbackup.sh --
-BACKUP_SERVER=''
-# CSV separated
-LOCAL_BACKUP_FOLDERS=''
-
-# -- MediaImport --
-MEDIAIMPORT_USER=''
-MEDIAIMPORT_PASSWD=''
-
-# -- Tester config --
-# enable systemd timer to execute tests by default
-TESTER_ENABLE_SYSTEMD_TIMER='1'
-# separate values with commas
-TESTER_IGNORED_TESTS=''
-# separate values with commas
-TESTER_MS_INSTANCES=''
-# maximum number of instances to test
-# TESTER_MAX_INSTANCES is ignored if TESTER_MS_INSTANCES is set
-TESTER_MAX_INSTANCES=''
-# ignore routing rules in VOD/live/cache tests (enable this if the server cannot reach cache servers)
-TESTER_IGNORE_ROUTING_RULES='0'
-# ignore these dns names
-TESTER_DNS_RESOLUTION_IGNORED=''
-# ignore these nginx vhosts
-TESTER_VHOST_RESOLUTION_IGNORED=''
-# encode report attachment to base64
-TESTER_BASE64_ATTACH='0'
-
-# -- Constants --
-CYAN='\033[0;36m'
-PURPLE='\033[0;35m'
-BLUE='\033[0;34m'
-YELLOW='\033[0;32m'
-GREEN='\033[0;32m'
-RED='\033[0;31m'
-NC='\033[0;0m'
-
-# Upstream configuration override
-# -------------------------------
-# (file generated by Panel / Skyreach)
-if [ -f "/root/envsetup/auto-generated-conf.sh" ]; then
-	source "/root/envsetup/auto-generated-conf.sh"
-fi
-
-# Local configuration override
-# ----------------------------
-if [ -f "/root/envsetup/conf.sh" ]; then
-	source "/root/envsetup/conf.sh"
-fi
-
diff --git a/tests/__init__.py b/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/tests/pkgs_envsetup.py b/tests/pkgs_envsetup.py
deleted file mode 100755
index 35d5576c6577df653c3e74530051a5312e3776a6..0000000000000000000000000000000000000000
--- a/tests/pkgs_envsetup.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python3
-
-from subprocess import run, DEVNULL, PIPE, STDOUT
-
-PACKAGES = [
-    "bsd-mailx",  # for "mail" command used in tester
-    "python3-apt",  # for: test_apt
-    "python3-defusedxml",  # for: test_wowza
-    "python3-dnspython",  # for: test_caches
-    "python3-openssl",  # for: test_ssl
-    "python3-psutil",  # for: test_wowza
-    "python3-packaging",  # for: test_wowza
-    "python3-lxml",  # for: test_wowza
-    "python3-psycopg2",  # for: test_postgresql
-    "python3-pydbus",  # for: test_dns_records
-    "python3-requests",  # for: test_nginx_status, test_nginx_vhosts, test_ssl, test_apt_proxy, test_ubicast_packages_access
-    "python3-spf",  # for: test_email
-]
-
-
-def main():
-    for pkg in PACKAGES:
-        if (
-            run(
-                ["/usr/bin/dpkg", "-s", pkg],
-                shell=False,
-                stdout=DEVNULL,
-                stderr=DEVNULL,
-                stdin=PIPE,
-            ).returncode
-            != 0
-        ):
-            result = run(
-                ["/usr/bin/apt-get", "install", "-q", "-y", pkg],
-                shell=False,
-                stdout=PIPE,
-                stderr=STDOUT,
-                stdin=PIPE,
-                env={
-                    "DEBIAN_FRONTEND": "noninteractive",
-                    "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-                },
-            )
-            if result.returncode == 0:
-                print("{} install succeeded".format(pkg))
-            else:
-                print("{} install failed".format(pkg))
-                print(result.stdout.decode("utf-8"))
-        else:
-            print("{} already installed".format(pkg))
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tests/scripts/__init__.py b/tests/scripts/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/tests/scripts/test_apt.py b/tests/scripts/test_apt.py
deleted file mode 100755
index 3c81d6ce3548ec89dcb9a69ebcca34a65df259c4..0000000000000000000000000000000000000000
--- a/tests/scripts/test_apt.py
+++ /dev/null
@@ -1,174 +0,0 @@
-#!/usr/bin/env python3
-
-"""
-Criticality: Normal
-Check updates, apt state and unattended upgrade config.
-"""
-
-import apt as apt_mod
-import apt_pkg
-import os
-from pathlib import Path
-import requests
-import sys
-
-try:
-    from requests.packages.urllib3.exceptions import InsecureRequestWarning
-
-    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
-except ImportError:
-    requests.packages.urllib3.disable_warnings()
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-from utilities import logging as lg  # noqa: E402
-from utilities.apt import Apt  # noqa: E402
-from utilities.os import line_in_file  # noqa: E402
-
-
-def main():
-    warnings = 0
-    errors = 0
-
-    os.environ["DEBIAN_FRONTEND"] = "noninteractive"
-    os.environ["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-
-    lg.log("Checking APT state:")
-
-    try:
-        apt = Apt(update=True)
-    except apt_mod.cache.FetchFailedException as apt_cache_err:
-        if str(apt_cache_err).endswith("no longer has a Release file."):
-            lg.error("system out of support")
-            errors += 1
-        else:
-            lg.error("Apt error: {}".format(apt_cache_err))
-            errors += 1
-        apt = Apt()
-
-    # detect pending upgrade
-    upgradable = len(apt.upgradable_packages)
-    if upgradable:
-        lg.info("there is {} upgrade pending".format(upgradable))
-    else:
-        lg.success("system up-to-date")
-
-    # detect pending auto-remove
-    removable = len(apt.removable_packages)
-    if removable:
-        lg.info("there is {} auto-removable packages".format(removable))
-        for pkg in apt.removable_packages:
-            if "ubicast" in pkg:
-                lg.error("the ubicast package '%s' can be auto-removed!" % pkg)
-                errors += 1
-    else:
-        lg.success("system clean")
-
-    # detect rc state
-    purgeable = len(apt.purgeable_packages)
-    if purgeable:
-        lg.info("there is {} packages in rc state".format(purgeable))
-
-    # installation
-    try:
-        installed = apt.install("sl")
-    except apt_pkg.Error as apt_install_err:
-        lg.warning(apt_install_err)
-        warnings += 1
-    else:
-        if installed:
-            lg.success("installation successful")
-            apt.remove("sl")
-        else:
-            lg.error("installation failed")
-            errors += 1
-
-    # unattended-upgrades
-    if (
-        Path("/etc/apt/apt.conf.d/20auto-upgrades").exists()
-        and Path("/etc/apt/apt.conf.d/50unattended-upgrades").exists()
-        and line_in_file(
-            r'^APT::Periodic::Update-Package-Lists "1";$',
-            "/etc/apt/apt.conf.d/20auto-upgrades",
-        )
-        and line_in_file(
-            r'^APT::Periodic::Unattended-Upgrade "1";$',
-            "/etc/apt/apt.conf.d/20auto-upgrades",
-        )
-        and line_in_file(
-            r"^Unattended-Upgrade::(?:(?:Allowed-Origins)|(?:Origins-Pattern)) {$",
-            "/etc/apt/apt.conf.d/50unattended-upgrades",
-        )
-    ):
-        lg.success("automatic security updates enabled")
-    else:
-        lg.warning("automatic security updates not enabled")
-        warnings += 1
-
-    # check ubicast repository presence
-    ubicast_repo = Path("/etc/apt/sources.list.d/skyreach.list").exists()
-    ubicast_package = (
-        True
-        if apt.is_installed("ubicast-mediaserver")
-        or apt.is_installed("ubicast-monitor")
-        or apt.is_installed("ubicast-skyreach")
-        or apt.is_installed("ubicast-skyreach-erp")
-        or apt.is_installed("celerity-server")
-        or apt.is_installed("celerity-utils")
-        or apt.is_installed("celerity-workers")
-        else False
-    )
-    if ubicast_repo and ubicast_package:
-        lg.success("ubicast repository present")
-    elif not ubicast_repo and ubicast_package:
-        lg.warning("ubicast repository missing")
-        warnings += 1
-    elif not ubicast_repo and not ubicast_package:
-        lg.info("no ubicast repository and service installed")
-    else:
-        lg.info("no ubicast service installed")
-
-    if ubicast_repo:
-        # check ubicast repository url
-        regexp_repo = (
-            r"^deb (http[s]?://[A-Za-z0-9\.\-\_]+) packaging/apt/([A-Za-z0-9\.\-\_]+)/$"
-        )
-        repo_url_match = line_in_file(regexp_repo, "/etc/apt/sources.list.d/skyreach.list")
-        if repo_url_match:
-            url, apt_token = repo_url_match.groups()
-            lg.success("url: {}, token: {}[...]".format(url, apt_token[:8]))
-        else:
-            url, apt_token = None, None
-            lg.error("incorrect ubicast repository url or token")
-            errors += 1
-
-        # check server avalability
-        if url:
-            server_response = requests.get(url, verify=False, timeout=60)
-            if server_response.ok:
-                lg.success("request to {} succeeded".format(url))
-            else:
-                lg.error("request to {} failed: {}".format(url, server_response.text))
-                errors += 1
-
-        # check repository avalability
-        if url and apt_token:
-            apt_url = "{}/packaging/apt/{}/Packages".format(url, apt_token)
-            repo_response = requests.get(apt_url, verify=False, timeout=60)
-            apt_url = "{}/packaging/apt/{}[...]/Packages".format(url, apt_token[:8])
-            if repo_response.ok:
-                lg.success("request to {} succeeded".format(apt_url))
-            else:
-                lg.error("request to {} failed: {}".format(apt_url, repo_response.text))
-                errors += 1
-
-    if errors:
-        return 1
-    elif warnings:
-        return 3
-    else:
-        return 0
-
-
-if __name__ == "__main__":
-    exit(main())
diff --git a/tests/scripts/test_apt_proxy.py b/tests/scripts/test_apt_proxy.py
deleted file mode 100755
index 81186b7892482702bb33147207e29cb85058e7fe..0000000000000000000000000000000000000000
--- a/tests/scripts/test_apt_proxy.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2017, Florent Thiery
-'''
-Criticality: Normal
-Checks that packages mirror works for capture systems
-'''
-from pathlib import Path
-import os
-import requests
-import sys
-
-try:
-    from requests.packages.urllib3.exceptions import InsecureRequestWarning
-    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
-except ImportError:
-    requests.packages.urllib3.disable_warnings()
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-
-
-def main():
-    # get Miris Manager domain
-    path = '/etc/nginx/sites-enabled/skyreach.conf'
-    if not os.path.exists(path):
-        lg.log('Server not running Miris Manager, skipping test')
-        return 2
-    domain = None
-    with open(path, 'r') as fo:
-        for line in fo:
-            if line.strip().startswith('server_name'):
-                domain = line.strip()[len('server_name'):].strip(' \t;').split(' ')[0]
-    if not domain:
-        lg.error('Miris Manager domain not found in Nginx configuration.')
-        return 1
-
-    try:
-        url = 'https://%s/mirismanager.ubicast.eu/old-releases.ubuntu.com/ubuntu/dists/lucid/Release.gpg' % domain
-        lg.log('Checking url certificate "%s"...' % url)
-        response = requests.get(url, verify=False).text
-        if 'BEGIN PGP SIGNATURE' not in response:
-            lg.error('Unexpected content:\n%s' % response)
-            return 1
-        else:
-            lg.success('Test OK.')
-    except Exception as e:
-        lg.error('Package mirror not working: %s' % e)
-        return 1
-
-    return 0
-
-
-if __name__ == '__main__':
-    code = main()
-    sys.exit(code)
diff --git a/tests/scripts/test_backup.py b/tests/scripts/test_backup.py
deleted file mode 100755
index 0edf733a959357c97900210613b9cf906d2402cb..0000000000000000000000000000000000000000
--- a/tests/scripts/test_backup.py
+++ /dev/null
@@ -1,229 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2017, Florent Thiery
-
-"""
-Criticality: Normal
-Checks that the server backups are not older than a day.
-"""
-
-from datetime import datetime
-from pathlib import Path
-import os
-import socket
-import subprocess
-import sys
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-from utilities.config import load_conf  # noqa: E402
-
-MAX_AGE = 2
-
-
-def test_ssh(host: str) -> bool:
-    """Check that MediaVault server can reached.
-
-    :param ip: MediaVault hostname or IP address
-    :type ip: str
-    :return: Wether it can connect to server or not
-    :rtype: bool
-    """
-
-    lg.log("Checking connection to MediaVault ({}):".format(host))
-
-    cmd = "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no {} :".format(
-        host
-    )
-    try:
-        subprocess.check_output(cmd, shell=True, timeout=5)
-        lg.success("logged in successfully")
-    except subprocess.CalledProcessError:
-        lg.error("failed to login using SSH public key authentication")
-        return False
-    except subprocess.TimeoutExpired:
-        lg.error("timeout")
-        try:
-            cmd_port = "nc -z -w2 {} 22".format(host)
-            subprocess.check_output(cmd_port, shell=True, timeout=5)
-        except subprocess.CalledProcessError:
-            lg.error("failed to bind SSH port")
-            try:
-                cmd_ping = "ping -c2 -w4 {}".format(host)
-                subprocess.check_output(cmd_ping, shell=True, timeout=15)
-            except subprocess.CalledProcessError:
-                lg.error("failed to ping host")
-        return False
-
-    return True
-
-
-def test_last_backup_is_recent(server: str) -> bool:
-    """Check that the latest backup is recent enough.
-
-    :param server: MediaVault hostname or IP address
-    :type server: str
-    :return: Wether the latest backup is too old or not
-    :rtype: bool
-    """
-
-    lg.log("Checking latest backup age:")
-
-    client = socket.gethostname()
-
-    # set backup potential directories path
-    # TODO: add "/backup/{}/data/" and "/backup/{}/etc/"
-    paths = [
-        "/backup/{}/home/".format(client),
-        "/backup/data/",
-        "/backup/nas*/",
-        "/backup/ms*/",
-    ]
-
-    # test each possible path
-    for path in paths:
-        # build and run commands
-        find = "find -L {} {} {} {} {}".format(
-            path,
-            "-maxdepth 1",
-            "-xtype l",
-            "-name latest",
-            "-exec realpath {} +",  # must always be last arg
-        )
-        cmd = "ssh -o StrictHostKeyChecking=no {} '{}'".format(server, find)
-        status, out = subprocess.getstatusoutput(cmd)
-        # TODO: check all backups found instead of only the last of the list
-        #       maybe do a `split()` and `if len(lst) > 1`…
-        if status == 0 and out != "":
-            # get directory name and convert to datetime
-            last = out.strip().split("/")[-1]
-            date = datetime.strptime(last, "%Y-%m-%d-%H%M%S")
-            # check age
-            if (datetime.now() - date).days > MAX_AGE:
-                lg.error("older than {} days: {}".format(MAX_AGE, date))
-                return False
-            lg.success("less than {} days old".format(MAX_AGE))
-            return True
-
-    # if we reach here, nothing have been found
-    lg.error("latest backup directory not found")
-
-    return False
-
-
-def check_backup_is_incremental(path: str) -> bool:
-    """Check that backup is incremental.
-
-    :param path: Backup folder path
-    :type param: str
-    :return: Wether the backup is incremental or not
-    :rtype: bool
-    """
-
-    all_ok = True
-    for directory in os.listdir(path):
-        files_count = 0
-        folder_path = os.path.join(path, directory)
-        if os.path.isdir(folder_path):
-            files_count = len(os.listdir(folder_path))
-            if files_count == 0:
-                lg.error("folder {} is empty".format(folder_path))
-                os.rmdir(folder_path)
-                all_ok = False
-    if all_ok:
-        lg.success("no incrementation issue found")
-
-    return all_ok
-
-
-def check_local_backup(backup_folder: str) -> bool:
-    """Check that local backup is in a correct state.
-
-    :param path: Local backup folder path
-    :type path: str
-    :return: Wether local backup is correct or not
-    :rtype: bool
-    """
-
-    lg.log("Checking {}:".format(backup_folder))
-
-    all_ok = True
-    latest = backup_folder / "latest"
-    if backup_folder.name.endswith(".disabled"):
-        # skip if disabled
-        lg.info("disabled")
-    elif latest.exists():
-        # resolve symbolic link
-        latest = latest.resolve()
-        latest_date = latest.name
-        date = datetime.strptime(latest_date, "%Y-%m-%d-%H%M%S")
-        now = datetime.now()
-        diff_seconds = (now - date).total_seconds()
-        if diff_seconds > MAX_AGE * 24 * 3600:
-            lg.error("older than {} days: {}".format(MAX_AGE, date))
-            all_ok = False
-        else:
-            lg.success("less than {} days old".format(MAX_AGE))
-        if not check_backup_is_incremental(backup_folder):
-            all_ok = False
-    elif (backup_folder / "backup.inprogress").exists():
-        lg.warning("still running")
-        all_ok = False
-    else:
-        lg.error("not working")
-        all_ok = False
-
-    return all_ok
-
-
-def check_local_backups(paths: str) -> bool:
-    """Run check for all local backup paths.
-
-    :param paths: Comma separated list of backup paths
-    :type paths: str
-    :return: Wether all backups are good or not
-    :rtype: bool
-    """
-
-    all_ok = True
-    folders = paths.split(",")
-    for folder in folders:
-        backup_root = Path(folder)
-        subfolders = {x for x in backup_root.iterdir() if x.is_dir()}
-        processed = set()
-        for backup_marker in backup_root.glob("*/backup.marker"):
-            backup_folder = backup_marker.parent
-            lg.log("Checking local backups in %s" % backup_folder)
-            all_ok = min(check_local_backup(backup_folder), all_ok)
-            processed.add(backup_folder)
-        unprocessed = [str(x) for x in subfolders - processed]
-        if len(unprocessed):
-            lg.warning("Warning, found non-mediavault folders under : %s" % unprocessed)
-
-    return all_ok
-
-
-def main():
-    """Run all checks and exits with corresponding exit code."""
-
-    conf = load_conf()
-    backup_server = conf.get("BACKUP_SERVER")
-    local_backup_folders = conf.get("LOCAL_BACKUP_FOLDERS")
-    if backup_server:
-        if not test_ssh(backup_server):
-            exit(1)
-        else:
-            if not test_last_backup_is_recent(backup_server):
-                exit(1)
-            else:
-                exit(0)
-    elif local_backup_folders:
-        exit(not check_local_backups(local_backup_folders))
-    else:
-        lg.log("No backup_server defined in config, untestable")
-        exit(2)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tests/scripts/test_dns_records.py b/tests/scripts/test_dns_records.py
deleted file mode 100755
index d8ab2af2f592950d5d1ee379a21b96a9585f7ec5..0000000000000000000000000000000000000000
--- a/tests/scripts/test_dns_records.py
+++ /dev/null
@@ -1,189 +0,0 @@
-#!/usr/bin/env python3
-
-"""
-Criticality: Normal
-Checks that DNS records are provided by the customer servers are correctly set
-"""
-
-from pathlib import Path
-import re
-import subprocess
-import sys
-import dns.resolver
-
-try:
-    import pydbus
-except ImportError:
-    exit(2)
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-from utilities import logging as lg  # noqa: E402
-from utilities.config import load_conf  # noqa: E402
-from utilities.os import supported_platform  # noqa: E402
-
-
-def get_dns_servers() -> set:
-    servers = list()
-    ip_pattern = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
-
-    # dbus method
-    try:
-        bus = pydbus.SystemBus()
-        bus_client = bus.get("org.freedesktop.resolve1", "/org/freedesktop/resolve1")
-        servers.extend(
-            [".".join(map(str, dns[2])) for dns in bus_client.DNS if dns[1] == 2]
-        )  # IPv4
-        servers.extend(
-            [":".join(map(str, dns[2])) for dns in bus_client.DNS if dns[1] == 10]
-        )  # IPv6
-    except Exception:
-        pass
-
-    # network-manager method
-    if not len(servers) and subprocess.getstatusoutput("command -v nmcli")[0] == 0:
-        _, output = subprocess.getstatusoutput(
-            "nmcli -f all device show | grep IP4.DNS | awk '{ print $2 }'"
-        )
-        servers = [line for line in output.split("\n") if ip_pattern.match(line)]
-
-    # resolvconf method
-    if not len(servers) and Path("/etc/resolv.conf").exists():
-        with open("/etc/resolv.conf", "r") as fo:
-            content = fo.read().strip()
-            servers = [
-                line.split()[1] for line in content.split("\n") if line.startswith("nameserver")
-            ]
-
-    # systemd-resolved method
-    if "127.0.0.53" in servers:
-        servers.remove("127.0.0.53")
-        _, output = subprocess.getstatusoutput("systemd-resolve --status")
-        lines = [line.strip() for line in output.split("\n")]
-        dns_line = False
-        for line in lines:
-            if line.startswith("DNS Servers:"):
-                dns_line = True
-                servers.append(line.split()[-1])
-            elif dns_line and ip_pattern.match(line):
-                servers.append(line)
-            else:
-                dns_line = False
-
-    return set(servers)
-
-
-def get_result(output: str) -> str:
-    for line in output.split("\n"):
-        if "has address " in line:
-            return line.split("has address ")[1]
-
-
-def check_dns(hostname: str, expected_ip: str, resolvers: set) -> tuple:
-    warnings = 0
-    errors = 0
-
-    resolver = dns.resolver.Resolver(configure=False)
-    resolver.nameservers = list(resolvers)
-    try:
-        answers = [rdata.address for rdata in resolver.query(hostname)]
-    except Exception as dns_err:
-        lg.error("cannot resolve {}: {}".format(hostname, dns_err))
-        errors += 1
-    else:
-        for address in answers:
-            if address == expected_ip:
-                lg.success("{}".format(address))
-            else:
-                lg.error("{} instead of {}".format(address, expected_ip))
-                errors += 1
-
-    return warnings, errors
-
-
-def check_resolver(conf: dict, resolvers: set) -> tuple:
-    warnings = 0
-    errors = 0
-    resolver_set = False
-
-    conf_resolvers = conf.get("NETWORK_DNS").split(",")
-    if not conf_resolvers:
-        # backward compatibility
-        conf_resolvers = [
-            conf.get(r)
-            for r in ("NETWORK_DNS1", "NETWORK_DNS2", "NETWORK_DNS3")
-            if conf.get(r)
-        ]
-    for conf_resolver in conf_resolvers:
-        if conf_resolver:
-            resolver_set = True
-            if conf_resolver not in resolvers:
-                lg.warning("resolver {} not configured".format(conf_resolver))
-                warnings += 1
-            else:
-                lg.success("resolver {} configured".format(conf_resolver))
-
-    if not resolver_set:
-        lg.info("no resolver defined in envsetup")
-        exit(2)
-
-    return warnings, errors
-
-
-def main():
-    lg.log("Check DNS settings:")
-
-    if not supported_platform():
-        lg.info("platform not supported")
-        exit(2)
-
-    warnings = 0
-    errors = 0
-    conf = load_conf()
-    resolvers = get_dns_servers()
-    ip = conf.get("NETWORK_IP_NAT") or conf.get("NETWORK_IP")
-
-    check_resolver_warn, check_resolver_err = check_resolver(conf, resolvers)
-    if check_resolver_err:
-        errors += check_resolver_err
-    if check_resolver_warn:
-        warnings += check_resolver_warn
-
-    services_info = (
-        ("MS_SERVER_NAME", "mediaserver", "ubicast-mediaserver"),
-        ("MONITOR_SERVER_NAME", "monitor", "ubicast-monitor"),
-        ("CM_SERVER_NAME", "mirismanager", "ubicast-skyreach"),
-    )
-
-    if not ip:
-        lg.info("no ip address defined in envsetup")
-        exit(2)
-
-    for conf_name, default_domain, package in services_info:
-        domain = conf.get(conf_name)
-        resolution_ignored = conf.get("TESTER_DNS_RESOLUTION_IGNORED", "").split(",")
-        if (
-            domain
-            and domain not in ("localhost", default_domain)
-            and domain not in resolution_ignored
-        ):
-            # check that the service is installed on this system
-            status, _ = subprocess.getstatusoutput("dpkg -s {}".format(package))
-            if status == 0 and ip:
-                lg.info("resolving {}".format(domain))
-                check_dns_warn, check_dns_err = check_dns(domain, ip, resolvers)
-                if check_dns_err:
-                    errors += check_dns_err
-                if check_dns_warn:
-                    warnings += check_dns_warn
-            else:
-                lg.info("{} not installed, skip {}".format(package, domain))
-
-    if errors:
-        exit(1)
-    elif warnings:
-        exit(3)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tests/scripts/test_email.py b/tests/scripts/test_email.py
deleted file mode 100755
index 6adcc615a64ee2796260b12b47e1ed94833ef774..0000000000000000000000000000000000000000
--- a/tests/scripts/test_email.py
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/env python3
-
-"""
-Criticality: High
-Checks that emails can be sent.
-"""
-
-from ipaddress import ip_address
-from pathlib import Path
-import random
-import socket
-import subprocess
-import sys
-import time
-
-import spf
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-from utilities.config import load_conf  # noqa: E402
-from utilities.network import get_ip  # noqa: E402
-
-
-def check_listen() -> tuple:
-    """Check that Postfix is listening on 127.0.0.1:25.
-
-    :return: Exit return codes
-    :rtype: tuple
-    """
-
-    warnings = 0
-    errors = 0
-
-    # get listening state from ss
-    status, out = subprocess.getstatusoutput("ss -pant | grep master | grep ':25'")
-
-    if status != 0 or ("127.0.0.1:25" not in out and "[::1]:25" not in out):
-        lg.warning("Postfix is not listening on localhost:25")
-        warnings += 1
-    else:
-        lg.success("Postfix is listening on localhost:25")
-
-    return warnings, errors
-
-
-def check_relay(relay_host: str, relay_port: str, domain: str) -> tuple:
-    """Check that Postfix is not an open relay.
-
-    :param relay_host: Hostname or IP address of relay host
-    :type relay_host: str
-    :param relay_port: Port of relay host
-    :type relay_port: str
-    :param domain: Domain name under which mails will be send
-    :type domain: str
-    :return: Exit return codes
-    :rtype: tuple
-    """
-
-    warnings = 0
-    errors = 0
-
-    # get relayhost value from Postfix config
-    status, out = subprocess.getstatusoutput("grep -E '^relayhost' /etc/postfix/main.cf")
-
-    if status == 0:
-        configured_relay = (
-            out.replace("relayhost", "").strip(" \t=").replace("[", "").replace("]", "")
-        )
-    else:
-        configured_relay = ""
-
-    if not configured_relay:
-        # check domain origin
-        if Path("/etc/mailname").exists():
-            with open("/etc/mailname", "r") as mailname:
-                myorigin = mailname.read().strip()
-        else:
-            out = subprocess.getoutput("grep -E '^myorigin' /etc/postfix/main.cf")
-            myorigin = out.replace("myorigin", "").strip()
-        # possible origin names
-        origins = (
-            domain or None,
-            socket.gethostname(),
-            socket.getfqdn(),
-        )
-        if myorigin not in origins:
-            lg.warning('"myorigin" setting does not contain a valid domain')
-            warnings += 1
-        else:
-            lg.success('"myorigin" setting is valid')
-
-    relay = "{}:{}".format(relay_host, relay_port) if relay_port else relay_host
-    if relay != configured_relay:
-        lg.error("STMP relay must be {}".format(relay))
-        errors += 1
-
-    if not errors and not warnings:
-        lg.success("STMP relay is properly set")
-
-    return warnings, errors
-
-
-def check_send(sender: str) -> tuple:
-    """Check that Postfix can send email.
-
-    :param sender: Sender mail address
-    :type sender: str
-    :return: Exit return codes
-    :rtype: tuple
-    """
-
-    warnings = 0
-    errors = 0
-
-    # send email
-    email = "noreply+{}-{}@ubicast.eu".format(time.time(), random.randint(0, 1000))
-    if sender:
-        sender = "-a 'From: {}' ".format(sender)
-    else:
-        lg.info("Sender address is not set")
-    cmd = "echo 'test email' | mail -s 'Email used to test configuration.' {}{}".format(
-        sender, email
-    )
-    subprocess.getoutput(cmd)
-
-    # find logs
-    if Path("/var/log/maillog").is_file():
-        cmd = "grep '{}' /var/log/maillog".format(email)
-    elif Path("/var/log/mail.log").is_file():
-        cmd = "grep '{}' /var/log/mail.log".format(email)
-    else:
-        lg.info("/var/log/mail.log not found, trying journalctl")
-        cmd = "journalctl -t postfix/smtp | grep {}".format(email)
-    lg.log("Using following command to search for sending log:\n{}".format(cmd))
-
-    # init vars
-    timeout = 120
-    waited = 0
-    delay = 1
-    timed_out = False
-    out = ""
-    lg.log("Email sending timeout is {} seconds.".format(timeout))
-
-    # logs polling
-    sys.stdout.write("Waiting for sending log")
-    sys.stdout.flush()
-    while not timed_out:
-        # wait
-        time.sleep(delay)
-        waited += delay
-        delay *= 2
-        # run command
-        status, out = subprocess.getstatusoutput(cmd)
-        # log loop
-        sys.stdout.write(".")
-        sys.stdout.flush()
-        # found
-        if status == 0:
-            out = out.strip().split("\n")[-1]
-            if "status=deferred" not in out:
-                break
-        # timeout
-        timed_out = waited >= timeout
-    sys.stdout.write("\n")
-    sys.stdout.flush()
-
-    # check if the sending has timed out
-    if timed_out:
-        lg.error("Failed to send email (timed out).")
-        if out:
-            lg.info("> sending log line:\n{}".format(out))
-        else:
-            lg.info("> no log entry found.")
-        errors += 1
-
-    # check output for errors
-    elif "bounced" in out or "you tried to reach does not exist" in out:
-        lg.error("Failed to send email")
-        lg.info("> sending log line:\n{}".format(out))
-        errors += 1
-
-    if not errors:
-        lg.success("Can send email")
-
-    return warnings, errors
-
-
-def check_spf(ip_addr: str, sender: str, domain: str) -> tuple:
-    """Check that SPF records passes.
-
-    :param ip_addr: Host ip address of server or relay
-    :type ip_addr: str
-    :param sender: Sender mail address
-    :type sender: str
-    :param domain: Domain name under which mails will be send
-    :type domain: str
-    :return: Exit return codes
-    :rtype: tuple
-    """
-
-    warnings = 0
-    errors = 0
-
-    if ip_address(ip_addr).is_private:
-        lg.info("{} is a private address, cannot check SPF".format(ip_addr))
-    elif ip_addr and sender:
-        # check spf
-        result, _ = spf.check2(i=ip_addr, s=domain, h="")
-        if result in ("pass", "neutral"):
-            lg.success("SPF for {} in {}: {}".format(ip_addr, domain, result))
-        elif result == "none":
-            lg.info("SPF for {} in {}: {}".format(ip_addr, domain, result))
-        else:
-            lg.warning("SPF for {} in {}: {}".format(ip_addr, domain, result))
-            warnings += 1
-    else:
-        lg.info("IP or sender not set, cannot check SPF")
-
-    return warnings, errors
-
-
-def main():
-    """Run all checks and exits with corresponding exit code."""
-
-    warnings = 0
-    errors = 0
-
-    lg.log("Checking email settings:")
-
-    if not Path("/etc/postfix").exists():
-        lg.info("postfix is not installed")
-        exit(2)
-
-    # get settings
-    conf = load_conf()
-    relay = conf.get("EMAIL_SMTP_SERVER", "").replace("[", "").replace("]", "")
-    relay_host = relay.split(":")[0] if ":" in relay else relay
-    relay_port = relay.split(":")[-1] if ":" in relay else ""
-    ip_addr = (
-        (socket.gethostbyname(relay_host) if relay_host else None)
-        or conf.get("NETWORK_IP_NAT")
-        or conf.get("NETWORK_IP")
-        or get_ip()
-        or None
-    )
-    sender = conf.get("EMAIL_SENDER") or ""
-    if not sender and Path("/etc/postfix/generic").exists():
-        with open("/etc/postfix/generic") as sender_fo:
-            sender = sender_fo.readline().split()[-1]
-    domain = sender.split("@")[-1] or None
-
-    # check that we are not an open relay
-    check_warn, check_err = check_listen()
-    warnings += check_warn if check_warn else warnings
-    errors += check_err if check_err else errors
-
-    # check that relayhost is correct
-    check_warn, check_err = check_relay(relay_host, relay_port, domain)
-    warnings += check_warn if check_warn else warnings
-    errors += check_err if check_err else errors
-
-    # check that we can send emails
-    check_warn, check_err = check_send(sender)
-    warnings += check_warn if check_warn else warnings
-    errors += check_err if check_err else errors
-
-    # check that spf record is correct
-    check_warn, check_err = check_spf(ip_addr, sender, domain)
-    warnings += check_warn if check_warn else warnings
-    errors += check_err if check_err else errors
-
-    if errors:
-        exit(1)
-    elif warnings:
-        exit(3)
-
-    exit(0)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tests/scripts/test_fail2ban.py b/tests/scripts/test_fail2ban.py
deleted file mode 100755
index a09fed8ae7c132fab51d667e2159b05bb63e95b0..0000000000000000000000000000000000000000
--- a/tests/scripts/test_fail2ban.py
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/usr/bin/env python3
-
-"""
-Criticality: Low
-Checks the current state of the fail2ban service.
-"""
-
-from pathlib import Path
-import subprocess
-import sys
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-from utilities.commands import exec_cmd  # noqa: E402
-
-
-def get_service_state(name: str) -> tuple:
-    """Get the systemd service state.
-
-    :param name: Service name
-    :type name: str
-    :return: Active state, and running state.
-    :rtype: tuple
-    """
-
-    try:
-        # pylint: disable=E0401
-        import dbus
-    except ImportError:
-        returncode, output = exec_cmd(
-            "systemctl status fail2ban | grep 'Active:'", log_output=False
-        )
-        if returncode != 0:
-            active = "no"
-            state = "not installed"
-        else:
-            info = output.replace("Active:", "").strip().split(" ")
-            if len(info) > 1:
-                active = info[0]
-                state = info[1].strip("()")
-            else:
-                active = "no"
-                state = "unknown"
-    else:
-        bus = dbus.SystemBus()
-        systemd = bus.get_object(
-            "org.freedesktop.systemd1", "/org/freedesktop/systemd1"
-        )
-        manager = dbus.Interface(systemd, "org.freedesktop.systemd1.Manager")
-        unit = manager.LoadUnit("{}.service".format(name))
-        proxy = bus.get_object("org.freedesktop.systemd1", str(unit))
-
-        active = proxy.Get(
-            "org.freedesktop.systemd1.Unit",
-            "ActiveState",
-            dbus_interface="org.freedesktop.DBus.Properties",
-        )
-        state = proxy.Get(
-            "org.freedesktop.systemd1.Unit",
-            "SubState",
-            dbus_interface="org.freedesktop.DBus.Properties",
-        )
-
-    return str(active), str(state)
-
-
-def check_service_running(name: str) -> bool:
-    """Check that the given service is active and running.
-
-    :param name: Service name
-    :type name: str
-    :return: Wether the service active and running or not
-    :rtype: bool
-    """
-
-    active, state = get_service_state(name)
-
-    if active != "active" or state != "running":
-        return False
-
-    return True
-
-
-def get_active_jails() -> list:
-    """Get the list of active jails.
-
-    :return: List of jails
-    :rtype: list
-    """
-
-    _, output = exec_cmd(
-        "fail2ban-client status | grep 'Jail list'", log_output=False
-    )
-    if ":" not in output:
-        return list()
-
-    jails_str = output.split(":")[1].strip().replace(" ", "").strip(",")
-    if not jails_str:
-        return list()
-
-    jails = jails_str.split(",")
-
-    return jails
-
-
-def check_jail_banned(name: str) -> int:
-    """Check if there is currently banned hosts.
-
-    :param name: Jail name
-    :type name: str
-    :return: Number of banned hosts
-    :rtype: int
-    """
-
-    _, output = exec_cmd(
-        "fail2ban-client status {} | grep 'Currently banned'".format(name),
-        log_output=False,
-    )
-    if ":" not in output:
-        return 0
-
-    banned = output.split(":")[1].strip()
-
-    if banned:
-        return int(banned)
-
-    return 0
-
-
-def main():
-    """Run all checks and exits with corresponding exit code."""
-
-    if subprocess.call(["which", "fail2ban-server"], stdout=subprocess.DEVNULL) != 0:
-        lg.info("fail2ban not installed, skipping test")
-        exit(2)
-
-    # init
-    errors = 0
-    warnings = 0
-
-    lg.log("Checking fail2ban state:")
-    if not check_service_running("fail2ban"):
-        lg.warning("fail2ban is not running")
-        warnings += 1
-        # warning exit if not running
-        exit(3)
-    else:
-        lg.success("fail2ban is running")
-
-    lg.log("Checking fail2ban jails:")
-    jails = get_active_jails()
-    for jail in jails:
-        banned = check_jail_banned(jail)
-        lg.info("{} jail is running ({} banned host)".format(jail, banned))
-
-    if "sshd" not in jails:
-        lg.error("sshd jail is not running")
-        exit(3)
-
-    if errors:
-        exit(1)
-    elif warnings:
-        exit(3)
-
-    exit(0)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tests/scripts/test_live_service.sh b/tests/scripts/test_live_service.sh
deleted file mode 100755
index 8e266bc35312d35fd5e12a8396ae0fc5bd03c689..0000000000000000000000000000000000000000
--- a/tests/scripts/test_live_service.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env bash
-# Criticality: High
-# Checks that the Live service is OK
-set -e
-
-PKG='ubicast-live'
-error=false
-
-# We check if the live package is installed
-if dpkg -s ${PKG} &>/dev/null; then
-    live_installed=true
-else
-    live_installed=false
-fi
-
-# If the live package is installed we check that it is OK
-if ${live_installed}; then
-        # For each live usage port (80 => HTTP, 443 => HTTPS, 1935 => RTMP)
-        for port in 80 443 1935; do
-                echo -n "Local port ${port}: "
-                # We check that the port can be reached with a bash embedded tcp call
-                if ! >/dev/tcp/127.0.0.1/${port} &>/dev/null; then
-                        echo "KO"
-                        error=true
-                else
-                        echo "OK"
-                fi
-        done
-
-        # If one port was not reachable we exit with an error
-        ${error} && exit 1
-else
-        # Code 2 is => not testable
-        exit 2
-fi
-
-# Everything went fine
-exit 0
diff --git a/tests/scripts/test_mediaworker.py b/tests/scripts/test_mediaworker.py
deleted file mode 100755
index 6202a9bfd0038a16c3dd6626ffdd3fdf3e3fe49d..0000000000000000000000000000000000000000
--- a/tests/scripts/test_mediaworker.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-'''
-Criticality: High
-Checks that MediaWorker can be reached using SSH and that it can reach the tasks server
-'''
-from pathlib import Path
-import requests
-import sys
-
-try:
-    from requests.packages.urllib3.exceptions import InsecureRequestWarning
-
-    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
-except ImportError:
-    requests.packages.urllib3.disable_warnings()
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-
-
-def check_workers():
-    '''
-    Check remote workers using the Celerity API.
-    '''
-    try:
-        import mediaserver
-    except ImportError:
-        lg.log('MediaServer is not installed, skipping test.')
-        return 2
-    else:
-        lg.log('MediaServer version: %s.' % mediaserver.__version__)
-
-    healthy_list = list()
-    offline_list = list()
-    need_update_list = list()
-    try:
-        from celerity_utils import api
-        success, response = api.list_workers('celerity_config_updater')
-        if not success:
-            raise Exception(str(response))
-        for worker in response['workers']:
-            if not worker.get('online'):
-                offline_list.append(worker['name'])
-            elif not worker.get('up_to_date'):
-                need_update_list.append(worker['name'])
-            else:
-                healthy_list.append(worker['name'])
-    except Exception as e:
-        lg.error('Failed to get workers list using celerity API: %s' % e)
-        return 1
-    if need_update_list:
-        lg.error('The celerity version in following MediaWorkers is not the same as in MediaServer:\n  %s' % ', '.join(need_update_list))
-        return 1
-    if offline_list:
-        lg.warning('Some MediaWorkers are offline:\n  %s' % ', '.join(offline_list))
-    if not healthy_list:
-        lg.error('No MediaWorker is online and up to date.')
-        return 1
-    lg.success('The following MediaWorkers are online and up to date:\n  %s' % ', '.join(healthy_list))
-    return 0
-
-
-if __name__ == '__main__':
-    code = check_workers()
-    sys.exit(code)
diff --git a/tests/scripts/test_monitoring.py b/tests/scripts/test_monitoring.py
deleted file mode 100755
index 53892c2c50b642166d85bb99f7289f513cd26884..0000000000000000000000000000000000000000
--- a/tests/scripts/test_monitoring.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-'''
-Criticality: Low
-Check that the monitoring graphs work.
-'''
-from datetime import datetime
-from pathlib import Path
-import os
-import subprocess
-import sys
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-
-
-MUNIN_WWW_PATH = '/var/cache/munin/www/'
-
-
-def check_munin_node():
-    lg.log('Checking if Munin node works...')
-
-    # check if package is installed
-    p = subprocess.run(['dpkg', '-s', 'munin-node'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
-    if p.returncode != 0:
-        lg.info('The command "dpkg -s munin-node" returned code %s, assuming that the package is not installed. Test skipped.' % p.returncode)
-        return -1
-
-    # check that no plugin should be activated or disabled
-    lg.log('Checking that no plugin should be enabled/disabled.')
-    if not os.path.exists('/usr/sbin/munin-node-configure'):
-        lg.info('The Munin bin "/usr/sbin/munin-node-configure" does not exist. Test skipped.')
-        return -1
-    lg.log('/usr/sbin/munin-node-configure --shell --remove-also  # stderr is hidden')
-    p = subprocess.run(['/usr/sbin/munin-node-configure', '--shell', '--remove-also'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
-    out = p.stdout.decode('utf-8').strip()
-    lg.log(out)
-    if out:
-        to_be_enabled_count = out.count('ln -s')
-        if to_be_enabled_count:
-            lg.warning('%s plugins should be enabled.' % to_be_enabled_count)
-        to_be_disabled_count = out.count('rm -f')
-        if to_be_disabled_count:
-            lg.warning('%s plugins should be disabled.' % to_be_disabled_count)
-        lg.log('''To enable/disable correct plugins, please run this command:
-munin-node-configure --shell --remove-also 2>/dev/null | sh; systemctl restart munin 2>/dev/null; systemctl restart munin-node;
-''')
-    else:
-        lg.success('The plugins list is correct.')
-    return 0
-
-
-def check_munin():
-    lg.log('Checking if Munin graph works...')
-
-    # check if package is installed
-    p = subprocess.run(['dpkg', '-s', 'munin'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
-    if p.returncode != 0:
-        lg.info('The command "dpkg -s munin" returned code %s, assuming that the package is not installed. Test skipped.' % p.returncode)
-        return -1
-
-    # get Munin www dir names
-    if os.path.exists(MUNIN_WWW_PATH):
-        names = os.listdir(MUNIN_WWW_PATH)
-        names.sort()
-    else:
-        names = []
-    if not names:
-        lg.error('No Munin directory found in "%s".' % MUNIN_WWW_PATH)
-        return 1
-
-    # get cpu day graph of each host
-    paths = list()
-    for name in names:
-        if not name.endswith('.html') and name != 'static' and os.path.isfile(os.path.join(MUNIN_WWW_PATH, name, 'index.html')):
-            for sub_name in os.listdir(os.path.join(MUNIN_WWW_PATH, name)):
-                path = os.path.join(MUNIN_WWW_PATH, name, sub_name, 'cpu-day.png')
-                if sub_name != 'index.html' and os.path.exists(path):
-                    paths.append(path)
-    if not paths:
-        lg.error('No Munin host directory was found in "%s".' % MUNIN_WWW_PATH)
-        return 1
-
-    # check graph mtime
-    error = False
-    for path in paths:
-        lg.log('Checking graph "%s" modification date...' % path)
-        mtime = os.path.getmtime(path)
-        d = datetime.fromtimestamp(mtime)
-        now = datetime.now()
-        diff_seconds = (now - d).total_seconds()
-        if diff_seconds > 3600:
-            lg.error('The graph is older than 1 hour. The monitoring is probably not working.')
-            error = True
-        else:
-            lg.success('The graph is not older than 1 hour.')
-    return 1 if error else 0
-
-
-if __name__ == '__main__':
-    code1 = check_munin_node()
-    code2 = check_munin()
-    code = max(code1, code2)
-    sys.exit(2 if code == -1 else code)
diff --git a/tests/scripts/test_nginx_conf_valid.sh b/tests/scripts/test_nginx_conf_valid.sh
deleted file mode 100755
index 79998b0bd23654d0cc7ff1ba062e8ace3b85c6e9..0000000000000000000000000000000000000000
--- a/tests/scripts/test_nginx_conf_valid.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-# Criticality: High
-# Checks that the webserver configuration has no errors.
-set -e
-
-if ( which nginx >/dev/null ); then
-	nginx -t
-else
-	exit 2
-fi
diff --git a/tests/scripts/test_nginx_status.py b/tests/scripts/test_nginx_status.py
deleted file mode 100755
index 3249bab695408e67c14af8884e582d5d5af7a822..0000000000000000000000000000000000000000
--- a/tests/scripts/test_nginx_status.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python3
-
-'''
-Criticality: Normal
-Checks that the webserver is running.
-'''
-
-from pathlib import Path
-import re
-import requests
-import sys
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-
-
-def main():
-    lg.log('Checking nginx status:')
-
-    stats_file = Path('/etc/nginx/sites-enabled/stats.conf')
-    if not stats_file.exists():
-        lg.info('The Nginx status vhost does not exist, skipping test.')
-        return 2
-
-    with open(stats_file, 'r') as fo:
-        content = fo.read()
-    listen_s = re.search(r'listen (\d+);', content)
-    if not listen_s:
-        lg.info('The Nginx status vhost has no listen directive, skipping test.')
-        return 2
-    port = int(listen_s.group(1))
-
-    try:
-        req = requests.get(
-            'http://127.0.0.1:%s/nginx_status' % port,
-            proxies={'http': '', 'https': ''},
-            timeout=5,
-        )
-        if req.status_code != 200:
-            raise Exception('Status code: %s.' % req.status_code)
-        if 'Active connections' not in req.text:
-            raise Exception('invalid response from nginx status url.')
-    except Exception as e:
-        lg.error(str(e))
-        return 1
-
-    lg.success('Status code: %s.' % req.status_code)
-    return 0
-
-
-if __name__ == '__main__':
-    rc = main()
-    sys.exit(rc)
diff --git a/tests/scripts/test_nginx_vhosts.py b/tests/scripts/test_nginx_vhosts.py
deleted file mode 100755
index c01be6eb4baf7b171e28e4e4cb3af825352e2d1d..0000000000000000000000000000000000000000
--- a/tests/scripts/test_nginx_vhosts.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/env python3
-
-'''
-Criticality: High
-Tests that all webserver services (vhosts) are available and reachable.
-'''
-
-from pathlib import Path
-import re
-import requests
-import socket
-import sys
-
-if sys.version_info < (3, 6, 0):
-    sys.exit(2)
-
-try:
-    from requests.packages.urllib3.exceptions import InsecureRequestWarning
-
-    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
-except ImportError:
-    requests.packages.urllib3.disable_warnings()
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-from utilities.config import load_conf  # noqa: E402
-
-'''
-This script checks for all enabled vhosts in Nginx conf that:
-* The response status code is 200, 401 or 403.
-* The host is resolved as 127.0.0.1.
-* The Wowza response is correct on /streaming/ (only for mediaserver vhosts).
-'''
-
-
-def get_configs(path: str) -> list:
-    configs_dir = Path(path)
-    configs = [c.resolve() for c in configs_dir.glob('*.conf')]
-
-    return configs
-
-
-def get_vhosts(config: Path) -> list:
-    # remove comments and blank lines
-    sanitize = re.compile(r'(?:\s*#\s*.*)|(?:^\s*)', re.M)
-    # capture server blocks
-    servers = re.compile(r'^server\s+{(?:\s*(?!server\s{).)+', re.M)
-
-    with open(str(config)) as config_fo:
-        config_content = sanitize.sub(r'', config_fo.read())
-        vhosts = servers.findall(config_content)
-
-    return vhosts
-
-
-def get_hostnames(vhost: str) -> list:
-    # extract hostname(s) from server_name values
-    server_names = re.compile(r'^\s*server_name\s+(.*);')
-
-    hostnames = []
-    for line in vhost.splitlines():
-        if server_names.match(line):
-            hostnames.extend(server_names.match(line).group(1).split())
-
-    return hostnames
-
-
-def get_ports(vhost: str) -> list:
-    # extract port(s) from listen values
-    listen_re = re.compile(r'^\s*listen\s+(.*);')
-    port_re = re.compile(r'^(?:.*:)?(\d+)$')
-
-    ports = []
-    for line in vhost.splitlines():
-        if listen_re.match(line):
-            parts = listen_re.match(line).group(1).replace('\t', ' ').split(' ')
-            protocol = 'https' if 'ssl' in parts else 'http'
-            for part in parts:
-                p_m = port_re.match(part)
-                if p_m:
-                    ports.append((p_m.group(1), protocol))
-
-    return ports
-
-
-def test_vhost(
-    ports_info=None,
-    domains=None,
-    resolution_ignored=None,
-    nginx_file=None,
-    wowza_dir=None,
-    tested=0,
-):
-    errors = 0
-    warnings = 0
-    name = nginx_file.name
-    for port, proto in ports_info or [(80, 'http')]:
-        for domain in domains or ['localhost']:
-            url = '%s://%s:%s' % (proto, domain, port)
-            lg.info('testing url "%s" from %s' % (url, name))
-            # test domain IP
-            ip_error = None
-            ip_warning = None
-            try:
-                ip = socket.gethostbyname(domain)
-            except Exception as e:
-                ip_error = '%s not resolved: %s' % (domain, e)
-            else:
-                if ip != '127.0.0.1' and ip != '127.0.1.1':
-                    ip_warning = '%s resolve to %s instead of 127.0.0.1' % (domain, ip)
-            if ip_error:
-                if resolution_ignored and domain in resolution_ignored:
-                    lg.info('%s (ignored)' % ip_error)
-                    ip_error = None
-                else:
-                    lg.error(ip_error)
-            elif ip_warning:
-                if resolution_ignored and domain in resolution_ignored:
-                    lg.info('%s (ignored)' % ip_warning)
-                    ip_warning = None
-                else:
-                    lg.warning(ip_warning)
-            # test url
-            req_error = False
-            try:
-                req = requests.get(
-                    url, verify=False, proxies={'http': '', 'https': ''}, timeout=30
-                )
-                req_time = int(1000 * req.elapsed.total_seconds())
-            except Exception as e:
-                code = str(e)
-                req_time = 0
-            else:
-                code = req.status_code
-            if (
-                domain != 'localhost'
-                and code not in (200, 401, 403)
-                or domain == 'localhost'
-                and code not in (200, 401, 403, 404)
-            ):
-                lg.error('%s status: %s, %s ms' % (domain, code, req_time))
-                req_error = True
-            else:
-                if req_time > 10000:
-                    lg.warning('%s status: %s, %s ms' % (domain, code, req_time))
-                    warnings += 1
-                else:
-                    lg.success('%s status: %s, %s ms' % (domain, code, req_time))
-                if 'mediaserver' in name and wowza_dir:
-                    # test /streaming url
-                    try:
-                        req = requests.get(
-                            url + '/streaming/',
-                            verify=False,
-                            proxies={'http': '', 'https': ''},
-                            timeout=30,
-                        )
-                        req_time = int(1000 * req.elapsed.total_seconds())
-                    except Exception as e:
-                        code = str(e)
-                        req_time = 0
-                    else:
-                        code = req.status_code
-                    if code != 404 and code != 200:
-                        # 404 are ignored (appears when Wowza is still installed with MS >= 9.8.0)
-                        lg.error('%s streaming: %s, %s ms' % (domain, code, req_time))
-                        req_error = True
-                    elif req_time > 10000:
-                        lg.warning('%s streaming: %s, %s ms' % (domain, code, req_time))
-                        warnings += 1
-            tested += 1
-
-            if ip_warning:
-                warnings += 1
-
-            if ip_error or req_error:
-                errors += 1
-
-    return tested, warnings, errors
-
-
-def main():
-    lg.log('Check that nginx vhosts are well configured:')
-    # check that Nginx dir exists
-    nginx_dir = '/etc/nginx/sites-enabled'
-    if not Path(nginx_dir).exists():
-        lg.info('nginx dir does not exists ("%s"), test skipped.' % nginx_dir)
-        exit(2)
-
-    # check that Wowza is installed
-    wowza_dir = '/usr/local/WowzaStreamingEngine'
-    if not Path(wowza_dir).exists():
-        wowza_dir = None
-
-    # get envsetup conf
-    conf = load_conf()
-
-    # get enabled vhosts
-    resolution_ignored = conf.get('TESTER_VHOST_RESOLUTION_IGNORED', '').split(',')
-    errors = 0
-    warnings = 0
-    nginx_confs = get_configs(nginx_dir)
-    for nginx_conf in nginx_confs:
-        tested = 0
-        vhosts = get_vhosts(nginx_conf)
-        for vhost in vhosts:
-            lg.log('Vhost "%s":' % nginx_conf)
-            hostnames = get_hostnames(vhost)
-            ports = get_ports(vhost)
-            t, w, e = test_vhost(
-                ports,
-                hostnames,
-                resolution_ignored,
-                nginx_conf,
-                wowza_dir,
-                tested,
-            )
-            tested += t
-            warnings += w
-            errors += e
-
-    if errors:
-        exit(1)
-    elif warnings:
-        exit(3)
-    if not tested:
-        lg.error('no url found in nginx sites-enabled dir')
-        exit(1)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/scripts/test_ntp.py b/tests/scripts/test_ntp.py
deleted file mode 100755
index 8c977cd1c857523b367e79d30670659e4c87a7cd..0000000000000000000000000000000000000000
--- a/tests/scripts/test_ntp.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2017, Florent Thiery
-'''
-Criticality: Low
-Checks that the server is synchronized with the configured NTP server.
-'''
-from pathlib import Path
-import os
-import re
-import subprocess
-import sys
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-from utilities.config import load_conf  # noqa: E402
-
-
-def main():
-    # Check that ntpd is synced
-    if os.path.isfile('/usr/bin/ntpq'):
-        cmd = 'LANG=C ntpq -p -d -4'
-        expected = 'remote'
-        ntpconf = '/etc/ntp.conf'
-        ntpconf_expected = r'^(?:server|pool)\s([a-zA-Z0-9\._-]+)(?:\s*iburst)?$'
-    else:
-        cmd = 'LANG=C timedatectl'
-        expected = 'NTP synchronized'
-        ntpconf = '/etc/systemd/timesyncd.conf'
-        ntpconf_expected = r'^NTP=(.*)$'
-
-    lg.log('Running %s' % cmd)
-    status = subprocess.getoutput(cmd)
-    if expected not in status:
-        lg.error('NTP not working: %s' % status)
-        return 1
-    lg.success('System is NTP synchronized.')
-
-    lg.log('Checking NTP server conforms to conf...')
-    conf = load_conf()
-    expected_servers = None
-    if conf.get('NTP_SERVER'):
-        expected_servers = [s.strip() for s in conf['NTP_SERVER'].split(',')]
-    if not expected_servers:
-        if 'Ubuntu' in subprocess.getoutput('lsb_release -a'):
-            expected_servers = ['ntp.ubuntu.com']
-        else:
-            expected_servers = ['0.debian.pool.ntp.org']
-    with open(ntpconf, 'r') as fo:
-        content = fo.read()
-    servers = list()
-    for line in content.split('\n'):
-        m = re.match(ntpconf_expected, line)
-        if m:
-            servers.append(m.groups()[0].strip())
-    for expected_server in expected_servers:
-        if expected_server not in servers:
-            lg.warning('Warning: Expected NTP server %s not found in %s, found %s instead.' % (expected_server, ntpconf, ', '.join(servers)))
-            return 3
-        else:
-            lg.log('Expected NTP server %s found in configuration (total servers: %s).' % (expected_server, len(servers)))
-    lg.success('NTP OK.')
-
-    return 0
-
-
-if __name__ == '__main__':
-    code = main()
-    sys.exit(code)
diff --git a/tests/scripts/test_partitions.py b/tests/scripts/test_partitions.py
deleted file mode 100755
index 6897bd95a13cfb3a78c4600978dc5ace9ce3b3ce..0000000000000000000000000000000000000000
--- a/tests/scripts/test_partitions.py
+++ /dev/null
@@ -1,221 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2017, Florent Thiery
-'''
-Criticality: Normal
-Checks that partitions are in conformity with expected norms, and that sufficient free space is available
-'''
-import subprocess
-import sys
-import os
-
-YELLOW = '\033[93m'
-GREEN = '\033[92m'
-RED = '\033[91m'
-DEF = '\033[0m'
-
-
-PATHS = [
-    {
-        'mount_point': '/',
-        'condition': '/var',  # ignore this mount point check if /var is on the same device
-        'recommended_types': ('ext4', 'zfs', 'xfs'),
-        'min_size_gbytes': 6,
-        'reco_size_gbytes': 9,
-        'min_available_gbytes': 2,
-    },
-    {
-        'mount_point': '/',
-        'condition': '!/var',  # ignore this mount point check if /var is not on the same device
-        'recommended_types': ('ext4', 'zfs', 'xfs'),
-        'min_size_gbytes': 9,
-        'reco_size_gbytes': 14,
-        'min_available_gbytes': 4,
-    },
-    {
-        'mount_point': '/var',
-        'recommended_types': ('ext4', 'zfs', 'xfs'),
-        'min_size_gbytes': 2,
-        'reco_size_gbytes': 4,
-        'min_available_gbytes': 0.5,
-    },
-    {
-        'mount_point': '/home/msuser/msinstance',
-        'recommended_types': ('ext4', 'zfs', 'nfs', 'nfs4', 'xfs'),
-        'min_size_gbytes': 5,
-        'reco_size_gbytes': 300,
-        'min_available_gbytes': 5,
-    },
-    {
-        'mount_point': '/home/skyreach',
-        'recommended_types': ('ext4', 'zfs', 'nfs', 'nfs4', 'xfs'),
-        'min_size_gbytes': 5,
-        'reco_size_gbytes': 9,
-        'min_available_gbytes': 2,
-    },
-    {
-        'type': 'memory',
-        'min_size_gbytes': 0.1,
-        'reco_size_gbytes': 0.2,
-    },
-]
-
-# Memory expected size depending on installed softwares
-MEMORY_EXPECTED_SIZE = {
-    # Ordering in this dict is important
-    '/etc/mediaserver': {
-        'label': 'for MediaServer',
-        'min_size_gbytes': 1,
-        'reco_size_gbytes': 3,
-    },
-    '/home/celerity': {
-        'label': 'for Celerity workers',
-        'min_size_gbytes': 1,
-        'reco_size_gbytes': 3,
-    },
-    '/home/skyreach': {
-        'label': 'for Miris Manager',
-        'min_size_gbytes': 1,
-        'reco_size_gbytes': 2,
-    },
-}
-
-
-def to_gbytes(size_bytes):
-    return int(round(size_bytes / (1024 * 1024 * 1024)))
-
-
-def get_memory_gbytes():
-    memory_gbytes = 0
-    with open('/proc/meminfo', 'r') as fo:
-        for line in fo:
-            if 'MemTotal:' in line:
-                memory = line.split('MemTotal:')[1].strip()
-                memory_kbytes, unit = memory.split(' ')
-                if unit != 'kB':
-                    print('Warning, unexpected unit %s.' % unit)
-                memory_gbytes = int(round(int(memory_kbytes) / (1024 * 1024)))
-    if not memory_gbytes:
-        print('Failed to get memory size.')
-    return memory_gbytes
-
-
-def get_path_fs(path):
-    # Example of "df" output:
-    # Filesystem     Type   1B-blocks        Used   Available
-    # /dev/loop2     ext4 52710469632 38253940736 11755397120
-    status, output = subprocess.getstatusoutput('df --output="source,fstype,size,avail" -B 1 %s | tail -n 1' % path)
-    if status == 0:
-        dev, fstype, size, available = output.split()
-    else:
-        dev = fstype = size = available = None
-    # Example of "mount" output:
-    # /dev/sdb2 on / type ext4 (rw,relatime,errors=remount-ro)
-    status, output = subprocess.getstatusoutput("mount | grep '%s '" % dev)
-    if status == 0:
-        params = output.split()[-1].strip('()').split(',')
-    else:
-        params = list()
-    return dev, fstype, params, to_gbytes(int(size)), to_gbytes(int(available))
-
-
-def check_allocation(dev):
-    root_dev = os.path.basename(dev)[:3]
-    if not root_dev:
-        return True
-    dev_partitions = list()
-    with open('/proc/partitions', 'r') as fo:
-        for line in fo:
-            if root_dev in line:
-                dev_partitions.append(line)
-
-    max_size = 0
-    total_size = 0
-    for p in dev_partitions:
-        major, minor, blocks, name = p.split()
-        size = int(blocks) * 512
-        if name == root_dev:
-            max_size = size
-            if root_dev.startswith('md'):
-                total_size += size
-        else:
-            total_size += size
-    unallocated = max_size - total_size
-    unallocated_gbytes = to_gbytes(unallocated)
-    if unallocated_gbytes > 1:
-        print('Warning: %s%s GB are unallocated on %s.%s' % (YELLOW, unallocated_gbytes, root_dev, DEF))
-        return False
-    return True
-
-
-def test_partitions():
-    error = False
-    warning = False
-    for part_info in PATHS:
-        psize = None
-        if part_info.get('mount_point'):
-            mount_point = part_info['mount_point']
-            if os.path.exists(mount_point):
-                mount_point = os.path.realpath(mount_point)
-                name = 'Partition of %s' % mount_point
-                dev, fstype, params, psize, available = get_path_fs(mount_point)
-                condition = part_info.get('condition')
-                if condition:
-                    subdev = get_path_fs(condition.strip('!'))[0]
-                    if subdev == dev and not condition.startswith('!'):
-                        print('Skipping check of %s with condition %s.' % (mount_point, condition))
-                        continue
-                    elif subdev != dev and condition.startswith('!'):
-                        print('Skipping check of %s with condition %s.' % (mount_point, condition))
-                        continue
-                if fstype not in part_info.get('recommended_types'):
-                    print('Warning: %s fs type not recommended %s(current: %s, recommended: %s)%s.' % (name, YELLOW, fstype, part_info['recommended_types'], DEF))
-                    warning = True
-                if 'nfs' not in fstype:
-                    warning = not check_allocation(dev)
-                if 'acl' in params:
-                    print('%sThe device %s is mounted using ACL.%s Please set "noacl" mount parameter in fstab and reboot.' % (RED, dev, DEF))
-                    error = True
-                min_available_gbytes = part_info.get('min_available_gbytes')
-                if min_available_gbytes and available < min_available_gbytes:
-                    print('%s has less than %s GB available %s(%s GB available)%s.' % (name, min_available_gbytes, RED, available, DEF))
-                    error = True
-                else:
-                    print('%s has more than %s GB available %s(%s GB available)%s.' % (name, min_available_gbytes, GREEN, available, DEF))
-            else:
-                print('%s not found, cannot check.' % mount_point)
-        elif part_info.get('type') == 'memory':
-            for path, values in MEMORY_EXPECTED_SIZE.items():
-                if os.path.exists(path):
-                    part_info.update(values)
-                    break
-            name = 'System memory'
-            if part_info.get('label'):
-                name += ' (%s)' % part_info['label']
-            psize = get_memory_gbytes()
-
-        if psize:
-            if psize < part_info['min_size_gbytes']:
-                print('%s is smaller than the minimum required size %s(%s GB < %s GB)%s.' % (name, RED, psize, part_info['min_size_gbytes'], DEF))
-                error = True
-            elif psize < part_info['reco_size_gbytes']:
-                print('%s is smaller than the recommended size %s(%s GB < %s GB)%s.' % (name, YELLOW, psize, part_info['reco_size_gbytes'], DEF))
-                warning = True
-            else:
-                print('%s is bigger than recommended size %s(%s GB >= %s GB)%s.' % (name, GREEN, psize, part_info['reco_size_gbytes'], DEF))
-
-    if error:
-        print('Errors found.')
-        code = 1
-    elif warning:
-        print('Some warnings were found.')
-        code = 3
-    else:
-        print(GREEN + 'All OK.' + DEF)
-        code = 0
-    return code
-
-
-if __name__ == '__main__':
-    code = test_partitions()
-    sys.exit(code)
diff --git a/tests/scripts/test_postgresql.py b/tests/scripts/test_postgresql.py
deleted file mode 100755
index 2253213cba94bec73eb5d4949aa0270888e55abd..0000000000000000000000000000000000000000
--- a/tests/scripts/test_postgresql.py
+++ /dev/null
@@ -1,417 +0,0 @@
-#!/usr/bin/env python3
-'''
-Criticality: High
-Checks the current state of the PostgreSQL database cluster.
-'''
-
-from pathlib import Path
-import re
-import socket
-import subprocess
-import sys
-import time
-import urllib
-import uuid
-
-try:
-    import psycopg2
-except ImportError:
-    sys.exit(2)
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-from utilities import logging as lg  # noqa: E402
-from utilities.apt import Apt  # noqa: E402
-from utilities.config import load_conf  # noqa: E402
-
-
-def check_listen(host: str, port: int) -> bool:
-    '''Check if server is listening (TCP only).
-
-    :param host: The hostname or IP address to bind
-    :param port: The port number to bind
-    :type host: str
-    :type port: int
-    :return: Wether the `host` is listening on TCP/`port`
-    :rtype: bool
-    '''
-
-    # try to connect to the port used by psql-primary frontend
-    client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    result = client.connect_ex((host, port))
-    client.close()
-
-    return result == 0
-
-
-def get_haproxy_conf(path: str = '/etc/haproxy/haproxy.cfg') -> dict:
-    '''Get HAProxy configuration in a dictionary.
-
-    :param path: HAProxy configuration file, defaults to '/etc/haproxy/haproxy.cfg'
-    :type path: str
-    :return: HAProxy configuration file content
-    :rtype: dict
-    '''
-
-    # init configuration dictionary
-    conf = {}
-
-    # load configuration file
-    try:
-        with open(path) as data:
-            lines = data.readlines()
-    except EnvironmentError:
-        return conf
-
-    # define patterns
-    pattern_block = re.compile(r'^([a-zA-Z0-9_.-]+ *[a-zA-Z0-9_.-]+)')
-    pattern_param = re.compile(r'^\s+([ /:()|a-zA-Z0-9_.-]+)')
-
-    # parse configuration file
-    for line in lines:
-        match_block = pattern_block.match(line)
-        if match_block:
-            block = match_block.group(1)
-            conf[block] = []
-        else:
-            match_param = pattern_param.match(line)
-            if match_param:
-                param = match_param.group(1)
-                conf[block].append(param)
-
-    return conf
-
-
-def get_nodes() -> dict:
-    '''Get the list of nodes from HAProxy configuration.
-
-    :return: The list of nodes found in HAProxy configuration
-    :rtype: dict
-    '''
-
-    # get haproxy conf
-    conf = get_haproxy_conf()
-
-    servers = {}
-
-    for item in conf.keys():
-        if 'pgsql-primary' in item:
-            # filter `server` lines
-            server_lines = [x for x in conf[item] if x.startswith('server ')]
-            for line in server_lines:
-                # split line
-                elements = line.split()
-
-                # get needed elements
-                name = elements[1]
-                address = elements[2].split(':')
-                host = address[0]
-                port = int(address[1])
-                rephacheck = elements[7]
-
-                # update dictionary
-                servers.update(
-                    {name: {'host': host, 'port': port, 'rephacheck': rephacheck}}
-                )
-
-    return servers
-
-
-def get_node_state(host: str, port: int) -> str:
-    '''Get the curent state of node from its RepHACheck daemon.
-
-    :param node: The node's hostname or IP address
-    :param port: The node's port on which RepHACheck is listening
-    :type node: str
-    :type port: int
-    :return: The current state of the node accordind to its RepHACheck daemon
-    :rtype: str
-    '''
-
-    # connect and get tcp stream data
-    client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    client.connect((host, port))
-    data = client.recv(1024)
-    client.close()
-    # extract string from data output
-    state = data.decode('utf-8').rstrip()
-
-    return state
-
-
-def check_fenced(nodes: dict) -> tuple:
-    '''Check if the cluster have a fenced node.
-
-    :param nodes: The dictionary containing nodes and their informations
-    :type nodes: dict
-    :return: Wether the nodes list contains a fenced server
-    :rtype: tuple
-    '''
-
-    for node in nodes.keys():
-        host = nodes[node]['host']
-        port = int(nodes[node]['rephacheck'])
-        if get_node_state(host, port) == 'fenced':
-            return True, node
-
-    return False, None
-
-
-def check_psql(db_conn: dict, query: str) -> tuple:
-    '''Check if we can write data on this node.
-
-    :param db_conn: Database connection parameters
-    :type db_conn: dict
-    :param query: Query to execute
-    :type query: str
-    :return: Wether the query can be executed or not
-    :rtype: tuple
-    '''
-
-    # build database connection uri
-    if 'password' in db_conn:
-        uri = 'postgresql://{}:{}@{}:{}/{}'.format(
-            db_conn['user'],
-            urllib.parse.quote_plus(db_conn['password']),
-            db_conn['host'],
-            db_conn['port'],
-            db_conn['dbname'],
-        )
-    else:
-        uri = 'postgresql:///{}'.format(db_conn['dbname'])
-
-    # format command
-    command = ['su -l postgres -c "psql {} -c \'{}\'"'.format(uri, query)]
-
-    # execute
-    try:
-        subprocess.check_output(command, shell=True)
-    except subprocess.CalledProcessError as psql_error:
-        return False, str(psql_error).rstrip()
-
-    return True, None
-
-
-def check_replication(primary: dict, standby: dict) -> tuple:
-    '''Check if replication is working between the primary and standby servers.
-
-    :param primary: Database connection parameters for primary server
-    :type primary: dict
-    :param standby: Database connection parameters for standby server
-    :type standby: dict
-    :return: Wether replication between primary/stanbdy is working or not
-    :rtype: tuple
-    '''
-
-    # connections
-    try:
-        primary_client = psycopg2.connect(**primary)
-        standby_client = psycopg2.connect(**standby)
-    except psycopg2.Error as repl_conn_error:
-        return False, str(repl_conn_error).rstrip()
-
-    # random id
-    rand = uuid.uuid4().hex
-    write_query = 'CREATE TABLE es_test_{} (id serial PRIMARY KEY);'.format(rand)
-    read_query = 'SELECT * FROM es_test_{};'.format(rand)
-
-    # write
-    try:
-        primary_psql = primary_client.cursor()
-        primary_psql.execute(write_query)
-    except psycopg2.Error as repl_write_error:
-        return False, str(repl_write_error).rstrip()
-
-    # read
-    max_time = 6.0
-    timer = 0.0
-    while timer < max_time:
-        time.sleep(timer)
-        timer += 0.2
-        try:
-            standby_psql = primary_client.cursor()
-            standby_psql.execute(read_query)
-            msg = 'took ~{}s'.format(str(timer))
-            break
-        except psycopg2.Error as repl_read_error:
-            msg = str(repl_read_error).rstrip()
-    else:
-        return False, msg
-
-    # delete
-    try:
-        primary_psql.execute('DROP TABLE es_test_{};'.format(rand))
-    except psycopg2.Error:
-        pass
-
-    # close
-    primary_psql.close()
-    standby_psql.close()
-    primary_client.close()
-    standby_client.close()
-
-    return True, msg
-
-
-def check_ha(db_conn: dict, errors: int = 0) -> int:
-    '''Run all tests for a highly-available setup.
-
-    :param db_conn: Database connection parameters
-    :type db_conn: dict
-    :param errors: Error counter, defaults to 0
-    :param errors: int, optional
-    :return: Number of errors
-    :rtype: int
-    '''
-
-    # get nodes data
-    nodes = get_nodes()
-
-    # check haproxy
-    lg.log('Checking local HAProxy frontends:')
-    primary_listening = check_listen(db_conn['host'], 54321)
-    if primary_listening:
-        lg.success('HAProxy pgsql-primary frontend is listening.')
-    else:
-        lg.error('HAProxy pgsql-primary frontend is not listening.')
-        errors += 1
-    standby_listening = check_listen(db_conn['host'], 54322)
-    if standby_listening:
-        lg.info('HAProxy pgsql-standby frontend is listening.')
-    else:
-        lg.info('HAProxy pgsql-standby frontend is not listening.')
-
-    # check remotes
-    lg.log('Checking remote PostgreSQL nodes:')
-    for node in nodes:
-        node_host = nodes[node]['host']
-        node_port = nodes[node]['port']
-        if not check_listen(node_host, node_port):
-            lg.error('Cannot bind {}:{}.'.format(node_host, node_port))
-            errors += 1
-        else:
-            lg.success('Can bind {}:{}.'.format(node_host, node_port))
-
-    # check fenced
-    lg.log('Checking cluster state:')
-    fenced, node = check_fenced(nodes)
-    if fenced:
-        lg.error('Node `{}` is fenced'.format(node))
-        errors += 1
-    else:
-        lg.success('No fenced node found.')
-
-    # check replication
-    lg.log('Checking replication state:')
-    if primary_listening and standby_listening:
-        primary = db_conn.copy()
-        primary['port'] = 54321
-        standby = db_conn.copy()
-        standby['port'] = 54322
-        status, info = check_replication(primary, standby)
-        if not status:
-            lg.error('Cannot replicate between primary/standby ({}).'.format(info))
-            errors += 1
-        else:
-            lg.success('Can replicate between primary/standby ({}).'.format(info))
-    else:
-        lg.info('Cannot check replication because primary and standby are not both accessible.')
-
-    return errors
-
-
-def check_local(db_conn: dict, errors: int = 0) -> int:
-    '''Run all tests for a highly-available setup.
-
-    :param db_conn: Database connection parameters
-    :type db_conn: dict
-    :param errors: Error counter, defaults to 0
-    :param errors: int, optional
-    :return: Number of errors
-    :rtype: int
-    '''
-
-    host = db_conn['host']
-    port = db_conn['port']
-    user = db_conn['user']
-
-    # check listen
-    lg.log('Checking local PostgreSQL node:')
-    if not check_listen(host, port):
-        lg.error('Cannot connect to {}:{}.'.format(host, port))
-        errors += 1
-    else:
-        lg.success('Can connect to {}:{}.'.format(host, port))
-
-    # check read
-    lg.log('Checking read operation:')
-    read_query = 'SELECT 1;'
-    status, info = check_psql(db_conn, read_query)
-    if not status:
-        lg.error('Cannot read from {}@{}:{} ({}).'.format(user, host, port, info))
-        errors += 1
-    else:
-        lg.success('Can read from {}@{}:{}.'.format(user, host, port))
-
-    # get replication state if available
-    if check_listen('127.0.0.1', 8543):
-        state = get_node_state('127.0.0.1', 8543)
-    else:
-        state = 'primary'
-
-    # check write
-    lg.log('Checking write operation:')
-    if state != 'primary':
-        lg.info('This database is in {} state.'.format(state))
-    else:
-        rand = uuid.uuid4().hex
-        write_query = 'CREATE TABLE es_test_{} (id serial PRIMARY KEY);'.format(rand)
-        status, info = check_psql(db_conn, write_query)
-        if not status:
-            lg.error('Cannot write on {}@{}:{} ({}).'.format(user, host, port, info))
-            errors += 1
-        else:
-            lg.success('Can write on {}@{}:{}.'.format(user, host, port))
-            # remove test table
-            check_psql(db_conn, 'DROP TABLE es_test_{};'.format(rand))
-
-    return errors
-
-
-def main():
-    '''Run all checks and exits with corresponding exit code.'''
-
-    apt = Apt()
-    pattern_postgresql_client = re.compile(r'postgresql-client.*')
-    if not list(filter(pattern_postgresql_client.match, apt.installed_packages)):
-        exit(2)
-
-    # load configuration
-    conf = load_conf()
-
-    # get database configuration
-    db_host = conf.get('DB_HOST') if conf.get('DB_HOST') else '127.0.0.1'
-    db_port = 54321 if check_listen('127.0.0.1', 54321) else 5432
-    db_user = conf.get('DB_USER') if conf.get('DB_USER') else 'postgres'
-    db_pass = conf.get('DB_PG_ROOT_PWD')
-    db_conn = {'dbname': db_user, 'host': db_host, 'port': db_port, 'user': db_user}
-    if db_pass:
-        db_conn.update({'password': db_pass})
-
-    # determine if HA setup and run according tests
-    lg.log('Checking availibility mode:')
-    if check_listen('127.0.0.1', 54321):
-        lg.info('This setup is using a primary and standby database.')
-        errors = check_ha(db_conn)
-    else:
-        lg.info('This setup is using a single database.')
-        errors = check_local(db_conn)
-
-    if errors:
-        sys.exit(1)
-    else:
-        sys.exit(0)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/scripts/test_raid.py b/tests/scripts/test_raid.py
deleted file mode 100755
index 6a946221eda272234bd0b9cd8227ba74de2d77b1..0000000000000000000000000000000000000000
--- a/tests/scripts/test_raid.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2017, Florent Thiery
-'''
-Criticality: High
-Checks that the server RAID array is fine.
-'''
-import glob
-import subprocess
-import sys
-import os
-
-GREEN = '\033[92m'
-RED = '\033[91m'
-DEF = '\033[0m'
-
-
-def print_red(string):
-    print(RED + string + DEF)
-
-
-def print_green(string):
-    print(GREEN + string + DEF)
-
-
-def check_raid(dev):
-    cmd = '/sbin/mdadm -D %s' % dev
-    status, output = subprocess.getstatusoutput(cmd)
-    if status != 0:
-        print_red('The mdadm command on %s failed:' % dev)
-    elif 'degraded' in output:
-        print_red('The %s RAID partition is degraded:' % dev)
-    else:
-        print_green('The %s RAID partition is ok.' % dev)
-        return True
-    print(cmd)
-    print(output)
-    return False
-
-
-if os.path.isfile('/proc/mdstat'):
-    all_ok = True
-    for r in glob.glob('/dev/md*'):
-        # ignore /dev/md folder
-        if os.path.exists(r) and not os.path.isdir(r):
-            all_ok = min(check_raid(r), all_ok)
-    if all_ok:
-        print_green('Everything fine')
-    else:
-        print_red('Some array is in bad shape')
-        with open('/proc/mdstat', 'r') as fo:
-            print(fo.read())
-    sys.exit(int(not all_ok))
-else:
-    print('No software RAID array found, untestable')
-    sys.exit(2)
diff --git a/tests/scripts/test_ssl.py b/tests/scripts/test_ssl.py
deleted file mode 100755
index ea5633ab557826b1b8e6d5f105bad49964bdd397..0000000000000000000000000000000000000000
--- a/tests/scripts/test_ssl.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python3
-
-"""
-Criticality: Normal
-Checks that TLS certificates are valid; if invalid, the user will have to add an exception in his browser
-"""
-
-import datetime
-from pathlib import Path
-import requests
-import ssl
-import subprocess
-import sys
-
-import OpenSSL
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-from utilities.config import load_conf  # noqa: E402
-
-
-def main():
-    lg.log("Check TLS settings:")
-
-    if subprocess.call(["which", "nginx"], stdout=subprocess.DEVNULL) != 0:
-        lg.info("nginx not found, skipping test")
-        exit(2)
-
-    conf = load_conf()
-
-    conf_servers = (
-        ("MS_SERVER_NAME", "mediaserver"),
-        ("CM_SERVER_NAME", "mirismanager"),
-        ("MONITOR_SERVER_NAME", "monitor"),
-    )
-
-    all_ok = True
-    failure = False
-
-    with open("/etc/hosts", "r") as fo:
-        hosts = fo.read()
-
-    for setting, default in conf_servers:
-        name = conf.get(setting)
-        if name == default:
-            # vhost is using default value, the service is surely not installed
-            continue
-        if name not in hosts:
-            # the domain is not in the hosts file, the service is surely not installed
-            continue
-
-        # check if custom port is used
-        v_split = name.split(":")
-        if len(v_split) > 1:
-            server_name = v_split[0]
-            port = int(v_split[1])
-        else:
-            server_name = name
-            port = 443
-
-        conn = ssl.create_connection((server_name, port))
-        context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
-        sock = context.wrap_socket(conn, server_hostname=server_name)
-        cert = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
-        x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
-        not_after = x509.get_notAfter().decode("ascii")
-
-        expires = datetime.datetime.strptime(not_after, "%Y%m%d%H%M%SZ")
-        remaining = expires - datetime.datetime.utcnow()
-
-        if remaining < datetime.timedelta(days=0):
-            lg.error("{}: expired since {}".format(server_name, str(remaining)))
-            # if mediaserver (the only cert that is mandatory)
-            if setting == conf_servers[0]:
-                failure = True
-        elif remaining < datetime.timedelta(days=7):
-            lg.error("{}: expire in {}".format(server_name, str(remaining)))
-            # if mediaserver (the only cert that is mandatory)
-            if setting == conf_servers[0]:
-                failure = True
-        elif remaining < datetime.timedelta(days=30):
-            lg.warning("{}: expire in {}".format(server_name, str(remaining)))
-            # if mediaserver (the only cert that is mandatory)
-            if setting == conf_servers[0]:
-                all_ok = False
-        else:
-            lg.success("{}: expire in {}".format(server_name, str(remaining)))
-
-        try:
-            url = "https://{}".format(name)
-            requests.get(url)
-            lg.success("{}: trusted certificate".format(name))
-        except requests.exceptions.SSLError:
-            lg.warning("{}: untrusted certificate".format(name))
-            # if mediaserver (the only cert that is mandatory)
-            if setting == conf_servers[0]:
-                all_ok = False
-
-    if failure:
-        exit(1)
-    if not all_ok:
-        exit(3)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tests/scripts/test_wowza.py b/tests/scripts/test_wowza.py
deleted file mode 100755
index a08178c8474eaa4579c03b87a8d9398e3da05731..0000000000000000000000000000000000000000
--- a/tests/scripts/test_wowza.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/env python3
-
-'''
-Criticality: Normal
-Checks that the streaming server (Wowza) is running correctly.
-'''
-
-from pathlib import Path
-import re
-import subprocess  # nosec: B404
-import sys
-
-from defusedxml.ElementTree import parse
-from psutil import net_connections
-from packaging.version import parse as parse_version
-from lxml import etree
-
-sys.path.append(str(Path(__file__).parents[1].resolve()))
-
-# pylint: disable=wrong-import-position
-from utilities import logging as lg  # noqa: E402
-
-LATEST_VERSION = '4.8.5'
-WOWZA_TUNE_FILE = '/usr/local/WowzaStreamingEngine/conf/Tune.xml'
-WOWZA_RECOMMENDED_HEAP_SIZE = 2000
-
-
-def main():
-    '''Run all checks and exits with corresponding exit code.'''
-
-    lg.log('Checking Wowza settings:')
-    warnings = 0
-    errors = 0
-
-    # check if wowza is installed
-    if not check_installed_and_enabled():
-        exit(2)
-
-    # check wowza version
-    check_warn, check_err = check_version()
-    if check_err:
-        errors += 1
-    elif check_warn:
-        warnings += 1
-
-    # check wowza heap size
-    check_warn, check_err = check_heap_size()
-    if check_err:
-        errors += 1
-    elif check_warn:
-        warnings += 1
-
-    # check that wowza is running
-    check_warn, check_err = check_running()
-    if check_err:
-        errors += 1
-    elif check_warn:
-        warnings += 1
-
-    # check that wowza is listening
-    check_warn, check_err = check_listening()
-    if check_err:
-        errors += 1
-    elif check_warn:
-        warnings += 1
-
-    if errors:
-        exit(1)
-    elif warnings:
-        exit(3)
-
-    exit(0)
-
-
-def check_installed_and_enabled() -> bool:
-    '''Check that Wowza is installed and enabled.
-
-    :return: Exit return codes
-    :rtype: bool
-    '''
-
-    cmd = 'dpkg --get-selections "wowza*"'
-    out = subprocess.getoutput(cmd).strip()
-    state = out.split('\t')[-1]
-    if state != 'install':
-        lg.info('not installed, skip test')
-        return False
-
-    cmd = 'systemctl is-enabled WowzaStreamingEngine'
-    out = subprocess.getoutput(cmd).strip()
-    if not out.endswith('enabled'):
-        lg.info('not enabled, skip test')
-        return False
-
-    return True
-
-
-def check_version() -> tuple:
-    '''Check the Wowza version installed.
-
-    :return: Exit return codes
-    :rtype: bool
-    '''
-
-    warnings = 0
-    errors = 0
-
-    cmd = 'dpkg --get-selections "wowza*"'
-    out = subprocess.getoutput(cmd)
-    version = None
-
-    for line in out.split('\n'):
-        if line.split()[-1] == 'install':
-            if version:
-                lg.error('many Wowza versions installed, keep only latest')
-                errors += 1
-            version = '.'.join(re.findall(r'\d', line))
-
-    if not version:
-        lg.error('cannot find Wowza version')
-        errors += 1
-
-    if parse_version(version) < parse_version(LATEST_VERSION):
-        lg.info(
-            'using outdated version: {0} < {1} (recommended)'
-            .format(version, LATEST_VERSION)
-        )
-    elif parse_version(version) > parse_version(LATEST_VERSION):
-        lg.success(
-            'using newer version than the recommended: {0} > {1} (recommended)'
-            .format(version, LATEST_VERSION)
-        )
-    else:
-        lg.success('using the recommended version: {0}'.format(version))
-
-    return warnings, errors
-
-
-def check_heap_size() -> tuple:
-    '''Check the heap size configured.
-
-    :return: Exit return codes
-    :rtype: bool
-    '''
-
-    warnings = 0
-    errors = 0
-
-    # Configured wowza heap size extraction
-    try:
-        tune_xml = etree.parse(WOWZA_TUNE_FILE)
-        value = tune_xml.find('Tune/HeapSize').text[0:-1]
-        heap_size = int(value)
-    except Exception as e:
-        lg.info(
-            'failed to get heap size value: {0}'
-            .format(e)
-        )
-    else:
-        if heap_size < WOWZA_RECOMMENDED_HEAP_SIZE:
-            lg.error(
-                'not using recommended heap size: {0}M < {1}M (recommended)'
-                .format(heap_size, WOWZA_RECOMMENDED_HEAP_SIZE)
-            )
-            errors += 1
-        else:
-            lg.success(
-                'using recommended heap size or above: {0}M'
-                .format(heap_size)
-            )
-
-    return warnings, errors
-
-
-def check_running() -> tuple:
-    '''Check that Wowza is running.
-
-    :return: Exit return codes
-    :rtype: bool
-    '''
-
-    warnings = 0
-    errors = 0
-
-    cmd = 'systemctl status WowzaStreamingEngine'
-    out = subprocess.getoutput(cmd)
-    if 'Active: active (running)' not in out:
-        lg.error('service not running')
-        errors += 1
-    else:
-        lg.success('service running')
-
-    return warnings, errors
-
-
-def check_listening() -> tuple:
-    '''Check that Wowza is listening on configured port.
-
-    :return: Exit return codes
-    :rtype: bool
-    '''
-
-    warnings = 0
-    errors = 0
-
-    # get port number in Wowza config
-    conf = parse('/usr/local/WowzaStreamingEngine/conf/VHost.xml').getroot()
-    port = conf.findall('VHost/HostPortList/HostPort/Port')[0].text
-
-    # get listening ports
-    listening = set(
-        c.laddr[1] for c in net_connections(kind='inet') if c.status == 'LISTEN'
-    )
-
-    # check that system is listening on this port
-    if int(port) not in listening:
-        lg.error('not listening on port {}'.format(port))
-        errors += 1
-    else:
-        lg.success('listening on port {}'.format(port))
-
-    return warnings, errors
-
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/systemd/envsetup-tester.service b/tests/systemd/envsetup-tester.service
deleted file mode 100644
index add1bc4eb300c566e489c6d8b4c7f7b83186affe..0000000000000000000000000000000000000000
--- a/tests/systemd/envsetup-tester.service
+++ /dev/null
@@ -1,7 +0,0 @@
-[Unit]
-Description=Envsetup tests service
-
-[Service]
-Type=simple
-EnvironmentFile=/etc/environment
-ExecStart=/bin/bash -c -- 'if [ -f /root/envsetup/tests/tester.py ]; then mkdir -p /root/envsetup/tests/logs; /root/envsetup/tests/tester.py -e >/root/envsetup/tests/logs/tester_service.log 2>&1; fi'
diff --git a/tests/systemd/envsetup-tester.timer b/tests/systemd/envsetup-tester.timer
deleted file mode 100644
index 0ab5111bb6761849d2d583206ff4618b60c5c8b2..0000000000000000000000000000000000000000
--- a/tests/systemd/envsetup-tester.timer
+++ /dev/null
@@ -1,8 +0,0 @@
-[Unit]
-Description=Envsetup tests timer
-
-[Timer]
-OnCalendar=*-*-* 6:15:00
-
-[Install]
-WantedBy=timers.target
diff --git a/tests/tester.py b/tests/tester.py
index d8fcbc0c14e3ea9fb114305d6fbd3c87ded72a84..3fe8b145b78f50acdd4a58b5e040da6b4a73b9a0 100755
--- a/tests/tester.py
+++ b/tests/tester.py
@@ -1,559 +1,97 @@
 #!/usr/bin/env python3
-
 '''
-Script to start tests and to manage their results
+Transition script to UbiCast security packages.
 '''
 
-from io import StringIO
-import argparse
-import base64
-import datetime
-import glob
-import os
-import socket
 import subprocess
 import sys
-import time
-import uuid
-
-from utilities.config import load_conf, get_conf
-from utilities.logging import strip_colors, escape_html
-from utilities.os import get_dir
-from utilities.systemd import check_systemd_setup
-
-OUT_OF_SUPPORT_TEXT = '''\033[93mWarning:
-The system is out of support, UbiCast will not be notified if errors are detected.
-Please contact UbiCast sales team (sales@ubicast.eu) to renew the support contract.\033[0m'''
-
-
-class Logger(object):
-    '''
-    Class to duplicate output in a buffer
-    '''
-    def __init__(self, stream, log_buffer):
-        self.stream = stream
-        self.log_buffer = log_buffer
-
-    def write(self, text):
-        self.stream.write(text)
-        self.stream.flush()
-        self.log_buffer.write(text)
-        self.log_buffer.flush()
-
-    def flush(self):
-        pass
-
 
-log_buffer = StringIO()
-sys.stdout = Logger(sys.stdout, log_buffer)
-sys.stderr = sys.stdout
 
-
-def raid_idle():
-    '''
-    Function to test that the RAID is not running any task
-    '''
-    idle = True
-    devs = glob.glob('/sys/block/md*/md/sync_action')
-    for d in devs:
-        with open(d, 'r') as f:
-            sync_state = f.read().strip()
-            if sync_state != 'idle':
-                idle = False
-                print('State in %s is %s' % (d, sync_state))
-    return idle
-
-
-class Tester():
-    '''
-    Main tester class
-    '''
-    MAX_LOG_FILES = 50
+class SecurityMigration():
 
     def __init__(self):
-        print('\033[96m-------------------------------\033[0m')
-        print('\033[96m- UbiCast applications tester -\033[0m')
-        print('\033[96m-------------------------------\033[0m')
-        # parse args
-        parser = argparse.ArgumentParser(description=__doc__.strip())
-        parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='Debug mode (can be started with non root users).')
-        parser.add_argument('-e', '--email', dest='send_email', action='store_true', help='Send tests report by email.')
-        parser.add_argument('-b', '--basic', dest='basic_tests', action='store_true', help='Run only basic tests (exclude mediaserver tests).')
-        parser.add_argument('-n', '--no-update', dest='no_update', action='store_true', help='Do not update envsetup repository.')
-        parser.add_argument('-p', '--no-packages', dest='no_packages', action='store_true', help='Do not install packages.')
-        parser.add_argument('msuser', nargs='?', help='The unix user of the MediaServer instance to test. Default is user specified in configuration or all users if not set.')
-        args = parser.parse_args()
-        # Check current dir
-        root_dir = get_dir(__file__)
-        if root_dir != '':
-            os.chdir(root_dir)
-        self.root_dir = root_dir
-        # Add to python path
-        if root_dir not in sys.path:
-            sys.path.append(root_dir)
-        # Check that this script is run by root
-        if os.getuid() != 0 and not args.debug:
-            print('This script should be run as root user.')
-            sys.exit(1)
-        # Update envsetup files
-        if not args.no_update:
-            tester_path = os.path.join(root_dir, os.path.basename(__file__))
-            mtime = os.path.getmtime(tester_path)
-            subprocess.run(['python3', 'update_envsetup.py'], timeout=1800)
-            if mtime != os.path.getmtime(tester_path):
-                print('The script has changed, restarting it...')
-                os.execl('/usr/bin/python3', 'python3', tester_path, '-n', *sys.argv[1:])
-                sys.exit(1)  # not reachable
-        # Install utilities packages
-        if not args.no_packages:
-            subprocess.run(['python3', 'pkgs_envsetup.py'], timeout=1800)
-        # Load conf
-        conf = load_conf()
-        if not conf:
-            print('No configuration loaded.')
-            sys.exit(1)
-        # Check RAID status
-        if not raid_idle():
-            print('A RAID check or operation is in progress, aborting tests')
-            sys.exit(1)
-        # Get tests to run
-        tests = self.discover_tests(args.basic_tests, msuser=args.msuser, no_update=args.no_update)
-        if not tests:
-            print('No test to run.')
-            sys.exit(1)
-        # Print system info
-        self.print_system_info()
-        # Create logs dir
-        self.logs_dir = os.path.join(self.root_dir, 'logs')
-        os.makedirs(self.logs_dir, exist_ok=True)
-        print('Logs dir is "%s".' % self.logs_dir)
-        # Check systemd service and timer
-        check_systemd_setup()
-        # Run tests
-        now, failures, out_of_support, log_content, html_report = self.run_tests(tests)
-        if args.send_email:
-            failures += self.send_report_email(now, failures, out_of_support, log_content, html_report)
-        sys.exit(1 if failures > 0 else 0)
-
-    def print_system_info(self):
-        print('System information:')
-        print('- Date: %s UTC.' % datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
-        print('- FQDN: %s.' % socket.getfqdn())
-        p = subprocess.run(['ip', '-br', 'addr'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, timeout=1800)
-        print('- IP config:\n%s' % p.stdout.decode('utf-8'))
-
-    def get_log_name(self, now):
-        hostname = socket.gethostname()
-        log_name = 'results_%s_%s.log' % (
-            hostname or 'noname',
-            now.strftime('%Y-%m-%d_%H-%M-%S'),
-        )
-        return log_name
-
-    def parse_file_header(self, path):
-        with open(path, 'r') as fo:
-            content = fo.read()
-        description = ''
-        if path.endswith('.py'):
-            start = (
-                content.find("'''")
-                if content.find("'''") != -1
-                else content.find('"""')
-            )
-            if start > 0:
-                start += 3
-                end = (
-                    content.find("'''", start)
-                    if content.find("'''", start) != -1
-                    else content.find('"""', start)
-                )
-                if end > 0:
-                    description = content[start:end]
+        print(__doc__)
+        # Rename UbiCast source list and get miris manager url
+        subprocess.run('mv /etc/apt/sources.list.d/skyreach.list /etc/apt/sources.list.d/ubicast.list', shell=True)
+        if subprocess.run('grep -E "^deb https://skyreach.ubicast.net" /etc/apt/sources.list.d/ubicast.list', shell=True).returncode == 0:
+            mirismanager_url = 'https://skyreach.ubicast.net'
         else:
-            for line in content.split('\n'):
-                if line.startswith('#!'):
-                    continue
-                elif line.startswith('#'):
-                    description += line[1:].strip() + '\n'
-                else:
-                    break
-        description = description.strip()
-        if description.startswith('Criticality:'):
-            criticality, *description = description.split('\n')
-            criticality = criticality[len('Criticality:') :].strip()  # noqa: E203
-            description = '\n'.join(description)
-        else:
-            criticality = 'not specified'
-        return criticality, description
-
-    def discover_tests(self, basic_only=False, msuser=None, no_update=False):
-        ignored_tests = get_conf('TESTER_IGNORED_TESTS', '').split(',')
-        ignored_tests.append('__init__.py')
-        if basic_only:
-            tests = self.discover_basic_tests(ignored_tests)
-        elif msuser:
-            tests = self.discover_mediaserver_tests(msuser, no_update, ignored_tests)
-        else:
-            tests = self.discover_basic_tests(ignored_tests)
-            tests.extend(self.discover_mediaserver_tests(msuser, no_update, ignored_tests))
-        criticalities_map = {'Low': 1, 'Normal': 2, 'High': 3}
-        tests.sort(key=lambda i: (-criticalities_map.get(i[1], 0), i[0]))
-        return tests
-
-    def discover_basic_tests(self, ignored_tests=None):
-        # Get standard tests
-        test_dir = os.path.join(self.root_dir, 'scripts')
-        if not os.path.isdir(test_dir):
-            print('The tests dir is missing ("%s").' % test_dir)
-            return list()
-        names = os.listdir(test_dir)
-        names.sort()
-        if not names:
-            print('The tests dir is empty ("%s").' % test_dir)
-            return list()
-        tests = list()
-        for name in names:
-            if ignored_tests and name in ignored_tests:
-                continue
-            test_path = os.path.join(test_dir, name)
-            if os.path.isfile(test_path):
-                criticality, description = self.parse_file_header(test_path)
-                tests.append((name, criticality, description, [test_path], None))
-        return tests
-
-    def discover_mediaserver_tests(self, msuser=None, no_update=False, ignored_tests=None):
-        # Get MS instances
-        ms_users = list()
-        for user in os.listdir('/home'):
-            if os.path.exists('/home/%s/msinstance' % user) and (
-                not msuser or user == msuser
-            ):
-                ms_users.append(user)
-        if not ms_users:
-            return list()
-        ms_users.sort()
-        cleaned_list = list()
-        instances_to_test = get_conf('TESTER_MS_INSTANCES', '').split(',')
-        if instances_to_test:
-            for val in instances_to_test:
-                val = val.strip()
-                if not val:
-                    continue
-                if val in ms_users:
-                    cleaned_list.append(val)
-                else:
-                    print(
-                        'An inexisting instance has been requested for tests: "%s".'
-                        % val
-                    )
-        if cleaned_list:
-            ms_users = cleaned_list
-        else:
-            try:
-                max_instances = int(get_conf('TESTER_MAX_INSTANCES') or 2)
-            except Exception as e:
-                print('TESTER_MAX_INSTANCES has an invalid value: %s' % e)
-                max_instances = 2
-            if len(ms_users) > max_instances:
-                ms_users = ms_users[:max_instances]
-        print('Instances that will be tested: %s.' % ', '.join(ms_users))
-        # Clone testing suite
-        ms_path = os.path.join(self.root_dir, 'scripts', 'ms-testing-suite')
-        if not os.path.exists(ms_path):
-            print('Cloning ms-testing-suite in "%s".' % ms_path)
-            subprocess.run([
-                'git',
-                'clone',
-                '--recursive',
-                'https://mirismanager.ubicast.eu/git/mediaserver/ms-testing-suite.git',
-                ms_path,
-            ], timeout=1800)
-        if not os.path.exists(ms_path):
-            print('The ms-testing-suite dir "%s" does not exist, no MediaServer test will be run.' % ms_path)
-            return list()
-        # Update testing suite if allowed
-        if not no_update:
-            print('Updating ms-testing-suite in "%s".' % ms_path)
-            os.chdir(ms_path)
-            branch = get_conf('ENVSETUP_BRANCH') or 'stable'
-            if branch:
-                subprocess.run(['git', 'checkout', branch], timeout=1800)
-            subprocess.run(['git', 'fetch', '--recurse-submodules', '--all'], timeout=1800)
-            subprocess.run(['git', 'reset', '--hard', 'origin/{}'.format(branch)], timeout=1800)
-            subprocess.run(['git', 'pull', '--recurse-submodules'], timeout=1800)
-            subprocess.run(['git', 'submodule', 'update', '--init', '--recursive'], timeout=1800)
-            os.chdir(self.root_dir)
-        # Build tests list
-        print('Add MediaServer tests if available.')
-        wowza_dir = '/usr/local/WowzaStreamingEngine'
-        etc_lives_conf = '/etc/mediaserver/lives.json'
-        local_lives_conf = '/home/%s/msinstance/conf/lives.json'
-        old_etc_lives_conf = '/etc/mediaserver/lives_conf.py'
-        old_local_lives_conf = '/home/%s/msinstance/conf/lives_conf.py'
-        tests = list()
-        for user in ms_users:
-            ms_tests = ['ms_vod_tester.py', 'test_caches.py']
-            # Check if live tests should be started
-            if (
-                os.path.exists(wowza_dir)
-                or os.path.exists(etc_lives_conf)
-                or os.path.exists(local_lives_conf % user)
-                or os.path.exists(old_etc_lives_conf)
-                or os.path.exists(old_local_lives_conf % user)
-            ):
-                ms_tests.append('test_wowza_secure.py')
-                ms_tests.append('ms_live_tester.py')
-            ignore_rules = get_conf('TESTER_IGNORE_ROUTING_RULES', '0')
-            for name in ms_tests:
-                if ignored_tests and name in ignored_tests:
-                    continue
-                test_path = os.path.join(ms_path, name)
-                if os.path.exists(test_path):
-                    criticality, description = self.parse_file_header(test_path)
-                    tests.append((
-                        '%s (%s)' % (name, user),
-                        criticality,
-                        description,
-                        [test_path, user],
-                        {'IGNORE_ROUTING_RULES': ignore_rules},
-                    ))
-        return tests
-
-    def run_tests(self, tests):
-        # Run all tests
-        successes = 0
-        failures = 0
-        total_duration = None
-        report_rows = [('Test', 'Criticality', 'Result', 'Duration', 'Description')]
-        report_rows_length = [len(t) for t in report_rows[0]]
-        out_of_support = False
-        for name, criticality, description, command, env in tests:
-            print('\033[1;95m-- Test "%s" --\033[0;0m' % name)
-            start_date = datetime.datetime.utcnow()
-            print('Test start: %s UTC.' % start_date.strftime('%Y-%m-%d %H:%M:%S'))
-            # Run test
-            count = 0
-            returncode = None
-            while count < 3:
-                if count > 0:
-                    wait_time = 5 * count * count
-                    print('Waiting %s s...' % wait_time)
-                    time.sleep(wait_time)
-                count += 1
-                print('Attempt: %s' % str(count))
-                test_env = dict(os.environ)
-                if env:
-                    test_env.update(env)
-                try:
-                    p = subprocess.run(
-                        command,
-                        env=test_env,
-                        stdin=subprocess.DEVNULL,
-                        stdout=subprocess.PIPE,
-                        stderr=subprocess.STDOUT,
-                        timeout=1800,
-                    )
-                    out = p.stdout.decode('utf-8', 'replace').strip()
-                    print(out)
-                    out_of_support = out_of_support or 'out of support' in out
-                    returncode = p.returncode
-                    if returncode in (0, 2, 3):
-                        break
-                except Exception as e:
-                    print('Command failed: %s' % e)
-                    returncode = None
-            if returncode == 0:
-                status = '\033[92msuccess\033[0m'
-                successes += 1
-            elif returncode == 2:
-                status = '\033[94mnot testable\033[0m'
-            elif returncode == 3:
-                status = '\033[93mwarning\033[0m'
-            else:
-                status = '\033[91mfailure\033[0m'
-                failures += 1
-                print('Command exited with code %s.' % returncode)
-            # Get duration
-            end_date = datetime.datetime.utcnow()
-            duration = end_date - start_date
-            if total_duration:
-                total_duration += duration
-            else:
-                total_duration = duration
-            print(
-                'Test end: %s UTC (duration: %s).'
-                % (end_date.strftime('%Y-%m-%d %H:%M:%S'), duration)
-            )
-            # Prepare report
-            report_rows.append((name, criticality, status, str(duration), description))
-            report_rows_length = [
-                max(len(strip_colors(t)), report_rows_length[i])
-                for i, t in enumerate(report_rows[-1])
-            ]
-        # Display results
-        #     results as text
-        print('\nTests results:')
-        log_report = ''
-        for row in report_rows:
-            if not log_report:
-                log_report += '-' * 50
-            for i, val in enumerate(row):
-                if i == len(row) - 1:
-                    break
-                if i == 0:
-                    # merge name and description
-                    log_report += '\n\033[96m%s\033[0m  \033[37m%s\033[0m\n' % (
-                        val,
-                        row[-1],
-                    )
-                else:
-                    nb_sp = report_rows_length[i] - len(strip_colors(val))
-                    log_report += '  %s%s' % (val, ' ' * nb_sp)
-            log_report += '\n' + '-' * 50
-        if out_of_support:
-            log_report = OUT_OF_SUPPORT_TEXT + '\n' + log_report
-        print(log_report.strip())
-        print('Total tests duration: %s.\n' % total_duration)
-        #     results as html
-        html_report = ''
-        for row in report_rows:
-            html_cell = 'th' if not html_report else 'td'
-            html_report += '\n <tr>'
-            for i, val in enumerate(row):
-                html_report += ' <%s>%s</%s>' % (html_cell, escape_html(val), html_cell)
-            html_report += ' </tr>'
-        html_report = '<table border="1">%s\n</table>' % html_report
-        if out_of_support:
-            html_report = '<p>' + escape_html(OUT_OF_SUPPORT_TEXT) + '</p>\n' + html_report
-        # Store locally results
-        now = datetime.datetime.utcnow()
-        history_file = os.path.join(self.logs_dir, 'tests_history.txt')
-        add_header = not os.path.exists(history_file)
-        with open(history_file, 'a') as fo:
-            if add_header:
-                fo.write('Date | Result | Succeeded | Failed | Not testable\n')
-            fo.write('%s | %s | %s | %s | %s\n' % (
-                now.strftime('%Y-%m-%d %H:%M:%S'),
-                'KO' if failures > 0 else 'OK',
-                successes,
-                failures,
-                len(tests) - successes - failures,
-            ))
-        # Search for old logs to remove
-        names = os.listdir(self.logs_dir)
-        names.sort()
-        for name in list(names):
-            if not name.startswith('results_'):
-                names.remove(name)
-        while len(names) > self.MAX_LOG_FILES - 1:
-            name = names.pop(0)
-            try:
-                print('Removing old log "%s".' % os.path.join(self.logs_dir, name))
-                os.remove(os.path.join(self.logs_dir, name))
-            except Exception as e:
-                print('Failed to remove old log: %s' % e)
-        # Write log to file
-        log_content = strip_colors(log_buffer.getvalue())
-        with open(os.path.join(self.logs_dir, self.get_log_name(now)), 'w') as fo:
-            fo.write(log_content)
-        return now, failures, out_of_support, log_content, html_report
-
-    def send_report_email(self, now, failures, out_of_support, log_content, html_report):
-        hostname = socket.gethostname()
-        if not hostname:
-            print('Failed to get hostname (required to send email).')
-            return 1
-        fqdn = socket.getfqdn()
-        log_content_encoding = 'utf-8'
-        # Get sender and recipients
-        recipients = get_conf('EMAIL_ADMINS') or ''
-        system_domain = get_conf('MS_SERVER_NAME')
-        system_type = 'MediaServer'
-        if not system_domain or system_domain == 'mediaserver':
-            system_domain = get_conf('CM_SERVER_NAME')
-            system_type = 'MirisManager'
-            if not system_domain or system_domain == 'mirismanager':
-                system_domain = get_conf('MONITOR_SERVER_NAME')
-                system_type = 'Server'
-                if not system_domain or system_domain == 'monitor':
-                    system_domain = fqdn
-        if '.' in system_domain:
-            top_domain = '.'.join(system_domain.split('.')[-2:])
-        elif '.' in fqdn:
-            top_domain = '.'.join(fqdn.split('.')[-2:])
-        else:
-            top_domain = system_domain + '.local'
-        sender = hostname + '@' + top_domain
-        print('Sender address: %s' % sender)
-        # Prepare email contant
-        if out_of_support:
-            system_domain = '[OUT OF SUPPORT] %s' % system_domain
-            recipients = recipients.replace('sysadmin@ubicast.eu', '').replace(
-                ',,', ','
-            )
-        elif get_conf('PREMIUM_SUPPORT') != '0':
-            system_domain = '[PREMIUM] %s' % system_domain
-            recipients = recipients.replace('sysadmin@ubicast.eu', '').replace(
-                ',,', ','
-            )
-            recipients += ',sysadmin+premium@ubicast.eu'
-        recipients = recipients.strip(',')
-        if not recipients:
-            print('No recipients defined for email sending. Set a value for EMAIL_ADMINS.')
-            return 0
-        boundary = str(uuid.uuid4())
-        if get_conf('TESTER_BASE64_ATTACH') != '0':
-            log_content_encoding = 'base64'
-            log_content = base64.b64encode(log_content.encode('utf-8')).decode()
-        mail = '''From: %(hostname)s <%(sender)s>
-To: %(recipients)s
-Subject: %(system_domain)s (%(hostname)s) %(system_type)s health report: %(status)s
-Mime-Version: 1.0
-Content-type: multipart/related; boundary="%(boundary)s"
-
---%(boundary)s
-Content-Type: text/html; charset=UTF-8
-Content-transfer-encoding: utf-8
-
-<p><b>Date: %(date)s UTC</b></p>
-<p>FQDN: %(fqdn)s</p>
-%(report)s
-
---%(boundary)s
-Content-type: text/plain; name="%(log_name)s"; charset=UTF-8
-Content-disposition: attachment; filename="%(log_name)s"
-Content-transfer-encoding: %(log_content_encoding)s
-
-%(log_content)s''' % dict(
-            boundary=boundary,
-            sender=sender,
-            hostname=hostname,
-            recipients=recipients,
-            status=('KO (%s tests failed)' % failures) if failures > 0 else 'OK',
-            date=now.strftime('%Y-%m-%d %H:%M:%S'),
-            fqdn=fqdn,
-            report=html_report,
-            log_name=self.get_log_name(now).replace('.log', '.txt'),
-            log_content_encoding=log_content_encoding,
-            log_content=log_content,
-            system_domain=system_domain,
-            system_type=system_type,
-        )
-        # Send email
-        p = subprocess.Popen(
-            ['/usr/sbin/sendmail', '-t'],
-            stdin=subprocess.PIPE,
-            stdout=sys.stdout.stream,
-            stderr=sys.stderr.stream,
-        )
-        p.communicate(input=mail.encode('utf-8'), timeout=1800)
+            mirismanager_url = 'https://mirismanager.ubicast.eu'
+
+        # Add UbiCast security repository and packages
+        subprocess.run('apt-get update', shell=True)
+        subprocess.run('apt-get install -y apt-transport-https curl', shell=True)
+        subprocess.run('curl -s -o- %s/media/public.gpg | apt-key add -' % mirismanager_url, shell=True)
+        with open('/etc/apt/sources.list.d/ubicast-secu.list', 'w') as fo:
+            fo.write('deb %s packaging/apt/ubicast-security-updates/' % mirismanager_url)
+        subprocess.run('apt-get update', shell=True)
+        subprocess.run('apt-get install -y ubicast-env ubicast-tester ubicast-ssh-access', shell=True)
+
+        # Mark old test packages as automatically installed
+        subprocess.run('apt-mark auto bsd-mailx python3-apt python3-defusedxml python3-dnspython python3-openssl python3-psutil python3-packaging python3-lxml python3-psycopg2 python3-pydbus python3-requests python3-spf openjdk-8-jre-headless openjdk-11-jre-headless', shell=True)
+
+        # Check that security automatic updates are enabled
+        subprocess.run('apt-get install -y unattended-upgrades grep', shell=True)
+        p = subprocess.run('grep -r \'APT::Periodic::Unattended-Upgrade "1"\' /etc/apt/apt.conf.d/', shell=True)
         if p.returncode != 0:
-            print('Failed to send email.')
-            return 1
+            with open('/etc/apt/apt.conf.d/20auto-upgrades', 'w') as fo:
+                fo.write('APT::Periodic::Update-Package-Lists "1";\nAPT::Periodic::Unattended-Upgrade "1";')
+
+        # Enable UbiCast security repository in automatic updates
+        unattended_conf = '/etc/apt/apt.conf.d/50unattended-upgrades'
+        with open(unattended_conf, 'r') as fo:
+            content = fo.read()
+        if 'Unattended-Upgrade::Origins-Pattern {\n' in content:
+            # Unattended upgrades 1.11+ (Debian 10)
+            if '"origin=UbiCast,label=UbiCast-Security";' not in content:
+                new_content = content.replace(
+                    'Unattended-Upgrade::Origins-Pattern {\n',
+                    'Unattended-Upgrade::Origins-Pattern {\n        "origin=UbiCast,label=UbiCast-Security";\n')
+                with open(unattended_conf, 'w') as fo:
+                    fo.write(new_content)
+                print('Updated "%s"' % unattended_conf)
+        elif 'Unattended-Upgrade::Allowed-Origins {\n' in content:
+            # Unattended upgrades 1.1 (Ubuntu 18)
+            if '"UbiCast:UbiCast-Security";' not in content:
+                new_content = content.replace(
+                    'Unattended-Upgrade::Allowed-Origins {\n',
+                    'Unattended-Upgrade::Allowed-Origins {\n        "UbiCast:UbiCast-Security";\n')
+                with open(unattended_conf, 'w') as fo:
+                    fo.write(new_content)
+                print('Updated "%s"' % unattended_conf)
         else:
-            print('Email sent to: %s' % recipients)
-            return 0
+            print('Unrecognized unattended-upgrades version.')
+
+        # Remove old tester files
+        to_remove = [
+            '/root/envsetup/.docker',
+            '/root/envsetup/.flake8',
+            '/root/envsetup/.git',
+            '/root/envsetup/.githooks',
+            '/root/envsetup/.gitignore',
+            '/root/envsetup/.gitlab-ci.yml',
+            '/root/envsetup/.lint',
+            '/root/envsetup/ansible',
+            '/root/envsetup/doc',
+            '/root/envsetup/Makefile',
+            '/root/envsetup/README.md',
+            '/root/envsetup/tests',  # the file itself
+        ]
+        for path in to_remove:
+            subprocess.run(['rm', '-rf', path])
+
+        # Disable and remove systemd scripts
+        subprocess.run(['systemctl', 'disable', 'envsetup-tester.timer'])
+        subprocess.run(['systemctl', 'stop', 'envsetup-tester.timer'])
+        subprocess.run(['systemctl', 'disable', 'envsetup-tester.service'])
+        subprocess.run(['rm', '-rf', '/lib/systemd/system/envsetup-tester.timer'])
+        subprocess.run(['rm', '-rf', '/lib/systemd/system/envsetup-tester.service'])
+
+        # Run tester
+        subprocess.run(['python3', '/root/ubicast-tester/tester.py', '-e'])
+
+        sys.exit(0)
 
 
 if __name__ == '__main__':
-    Tester()
+    SecurityMigration()
diff --git a/tests/update_envsetup.py b/tests/update_envsetup.py
deleted file mode 100755
index 1d6ee51123ab4e7580d1f01ef5e22d596bda0a5a..0000000000000000000000000000000000000000
--- a/tests/update_envsetup.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import subprocess
-import sys
-
-from utilities.config import get_conf
-
-
-if __name__ == '__main__':
-    branch = get_conf('ENVSETUP_BRANCH') or 'stable'
-    os.chdir(os.path.dirname(os.path.abspath(os.path.expanduser(__file__))))
-    sys.stdout.write('Updating envsetup: ')
-    sys.stdout.flush()
-    subprocess.call('find . -name *.pyc -type f -delete', shell=True)
-    subprocess.call('find . -name __pycache__ -type d -delete', shell=True)
-    subprocess.check_call(['git', 'fetch', '--recurse-submodules', '--all'])
-    subprocess.check_call(['git', 'reset', '--hard', 'origin/{}'.format(branch)])
-    if branch:
-        subprocess.check_call(['git', 'checkout', branch])
-    subprocess.check_call(['git', 'pull', '--recurse-submodules'])
-    subprocess.check_call(['git', 'submodule', 'update', '--init', '--recursive'])
-    subprocess.call('find . -type d -empty -delete', shell=True)
diff --git a/tests/utilities/__init__.py b/tests/utilities/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/tests/utilities/apt.py b/tests/utilities/apt.py
deleted file mode 100644
index 6ba29be4a7bb6f18deb188c2831a77726c2cce3e..0000000000000000000000000000000000000000
--- a/tests/utilities/apt.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/usr/bin/env python3
-
-'''
-A wrapper of apt module that is actually usable.
-'''
-
-import os
-
-try:
-    import apt
-except ModuleNotFoundError:
-    print('Python apt module not found.')
-    exit(2)
-
-
-class AptInstallProgress(apt.progress.base.InstallProgress):
-    def fork(self):
-        pid = os.fork()
-        if pid == 0:
-            logfd = os.open('/tmp/envsetup-dpkg.log', os.O_RDWR | os.O_CREAT, 0o644)
-            os.dup2(logfd, 1)
-            os.dup2(logfd, 2)
-        return pid
-
-
-class Apt:
-    # cache: apt.cache.Cache
-    # packages: list
-    # _installed_packages: list
-    # installed_packages: list
-    # _removable_packages: list
-    # removable_packages: list
-    # _purgeable_packages: list
-    # purgeable_packages: list
-    # _upgradable_packages: list
-    # upgradable_packages: list
-
-    def __init__(self, update: bool = False):
-        os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
-        os.environ[
-            'PATH'
-        ] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
-        self.cache = self.get_cache(update)
-        self.packages = self.get_packages()
-        self._installed_packages = self.get_installed_packages()
-        self.installed_packages = list(map(str, self._installed_packages))
-        self._removable_packages = self.get_removable_packages()
-        self.removable_packages = list(map(str, self._removable_packages))
-        self._purgeable_packages = self.get_purgeable_packages()
-        self.purgeable_packages = list(map(str, self._purgeable_packages))
-        self._upgradable_packages = self.get_upgradable_packages()
-        self.upgradable_packages = list(map(str, self._upgradable_packages))
-
-    def install(self, name: str) -> bool:
-        '''Install a package with APT.
-
-        :param name: Package name
-        :type name: str
-        :return: Wether installation is successful or not
-        :rtype: bool
-        '''
-
-        pkg = self.cache[name]
-        if not pkg.installed:
-            pkg.mark_install(auto_fix=False)
-        success = self.cache.commit(install_progress=AptInstallProgress())
-        self.reload()
-
-        return success
-
-    def is_installed(self, name: str) -> bool:
-        '''Check a package is installed.
-
-        :param name: Package name
-        :type name: str
-        :return: Wether package is installed or not
-        :rtype: bool
-        '''
-
-        if name in self.cache:
-            return self.cache[name].installed
-
-        return False
-
-    def remove(self, name: str, purge: bool = False) -> bool:
-        '''Remove a package with APT.
-
-        :param name: Package name
-        :type name: str
-        :param purge: Wether to purge package configuration files ot not, default=False
-        :type purge: str
-        :return: Wether uninstallation is successful or not
-        :rtype: bool
-        '''
-
-        pkg = self.cache[name]
-        if pkg.installed:
-            pkg.mark_delete(auto_fix=False, purge=purge)
-        success = self.cache.commit(install_progress=AptInstallProgress())
-        self.reload()
-
-        return success
-
-    def purge(self, name: str) -> bool:
-        '''Purge a package with APT.
-
-        :param name: Package name
-        :type name: str
-        :return: Wether uninstallation is successful or not
-        :rtype: bool
-        '''
-
-        return self.remove(name, purge=True)
-
-    def reload(self):
-        '''Reload object.'''
-
-        self.cache.clear()
-        self.__init__()
-
-    def get_cache(self, update: bool = False) -> apt.cache.Cache:
-        '''Get an eventually updated Cache object.
-
-        :param update: Wether to update cache or not, default=False
-        :type update: bool
-        :return: An APT Cache object
-        :rtype: apt.cache.Cache
-        '''
-
-        apt_cache = apt.cache.Cache()
-        if update:
-            apt_cache.update()
-            apt_cache.open()
-
-        return apt_cache
-
-    def get_packages(self) -> list:
-        '''Get packages list.
-
-        :return: Packages list
-        :rtype: list
-        '''
-
-        packages = list(self.cache)
-
-        return packages
-
-    def get_installed_packages(self) -> list:
-        '''Get installed packages list.
-
-        :return: Installed packages list
-        :rtype: list
-        '''
-
-        _installed_packages = [p for p in self.packages if p.is_installed]
-
-        return _installed_packages
-
-    def get_removable_packages(self) -> list:
-        '''Get auto-removable packages list.
-
-        :return: Auto-removable packages list
-        :rtype: list
-        '''
-
-        _removable_packages = [
-            p for p in self._installed_packages if p.is_auto_removable
-        ]
-
-        return _removable_packages
-
-    def get_purgeable_packages(self) -> list:
-        '''Get auto-removable packages list.
-
-        :return: Auto-removable packages list
-        :rtype: list
-        '''
-
-        _removable_packages = [
-            p for p in self.packages if not p.is_installed and p.has_config_files
-        ]
-
-        return _removable_packages
-
-    def get_upgradable_packages(self) -> list:
-        '''Get upgradable packages list.
-
-        :return: Upgradable packages list
-        :rtype: list
-        '''
-
-        _upgradable_packages = [p for p in self._installed_packages if p.is_upgradable]
-
-        return _upgradable_packages
diff --git a/tests/utilities/commands.py b/tests/utilities/commands.py
deleted file mode 100644
index 79db81bdd581e8014985244e35665aaecff0b4b0..0000000000000000000000000000000000000000
--- a/tests/utilities/commands.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/env python3
-
-'''EnvSetup commands utilities.'''
-
-from pathlib import Path
-import subprocess
-import sys
-from typing import Any
-
-from .logging import log
-
-
-def exec_cmd(cmd: Any, log_output: bool = True, get_output: bool = True) -> tuple:
-    '''Execute the given command.
-
-    :param cmd: Command to run
-    :type cmd: Any
-    :param log_output: Wether to log output or not, defaults to True
-    :type log_output: bool, optional
-    :param get_output: Wether to return output or not, defaults to True
-    :type get_output: bool, optional
-    :return: Return code and output
-    :rtype: tuple
-    '''
-
-    shell = not isinstance(cmd, (tuple, list))
-    stdout = subprocess.PIPE if get_output or not log_output else sys.stdout
-    stderr = subprocess.PIPE if get_output or not log_output else sys.stderr
-
-    # execute
-    p = subprocess.Popen(
-        cmd, stdin=sys.stdin, stdout=stdout, stderr=stderr, shell=shell
-    )
-    out, err = p.communicate()
-
-    # send to the correct output
-    if get_output:
-        out = out.decode('utf-8').strip() if out else ''
-        if err:
-            if out:
-                out += '\n'
-            out += err.decode('utf-8').strip()
-        out = out.strip()
-        if log_output:
-            log(out)
-    elif log_output:
-        sys.stdout.flush()
-        sys.stderr.flush()
-
-    return p.returncode, out
-
-
-def check_cmd(cmd: Any, log_output: bool = False) -> int:
-    '''Get the return code of the given command.
-
-    :param cmd: Command to execute
-    :type cmd: Any
-    :param log_output: Wether to log output or not, defaults to False
-    :type log_output: bool, optional
-    :return: Return code
-    :rtype: int
-    '''
-
-    code, _ = exec_cmd(cmd, log_output, False)
-
-    return code
-
-
-def run_commands(cmds: list):
-    '''Run a serie of successive commands.
-
-    :param cmds: List of commands
-    :type cmds: list
-    :raises Exception: Houston we have a problem
-    '''
-
-    try:
-        # Execute commands
-        for cmd in cmds:
-            if not isinstance(cmd, dict):
-                cmd = dict(line=cmd)
-            if cmd.get('cond'):
-                cond = cmd['cond']
-                negate = cmd.get('cond_neg')
-                skip = cmd.get('cond_skip')
-                code = check_cmd(cond)
-                valid = code != 0 if negate else code == 0
-                if not valid:
-                    msg = 'Condition for command "%s" not fullfilled.' % cmd['line']
-                    if skip:
-                        log('%s Command skipped.' % msg)
-                        continue
-                    raise Exception(msg)
-            if cmd['line'] == 'write':
-                if not cmd.get('target'):
-                    raise Exception('No target file to write in.')
-                if (
-                    cmd.get('backup')
-                    and Path(cmd['target']).exists()
-                    and not Path(cmd['target'] + '.back').exists()
-                ):
-                    Path(cmd['target']).rename(Path(cmd['target'] + '.back'))
-                    log('A backup file has been created for:\n%s' % cmd['target'])
-                # Load content from template if any
-                content = cmd.get('content', '')
-                if cmd.get('template'):
-                    if not Path(cmd['template']).exists():
-                        raise Exception(
-                            'Template file does not exist: %s.' % cmd['template']
-                        )
-                    with open(cmd['template'], 'r') as fd:
-                        content = fd.read()
-                    if cmd.get('params'):
-                        for k, v in cmd['params']:
-                            content = content.replace(k, v)
-                # Write target file
-                with open(cmd['target'], 'w+') as fd:
-                    fd.write(content)
-                log('File %s written' % cmd['target'])
-            elif cmd['line'] == 'backup':
-                if not cmd.get('target'):
-                    raise Exception('No target file to backup.')
-                if not Path(cmd['target'] + '.back').exists():
-                    Path(cmd['target']).rename(Path(cmd['target'] + '.back'))
-                    log('A backup file has been created for:\n%s' % cmd['target'])
-                else:
-                    log('A backup file already exist for:\n%s' % cmd['target'])
-            else:
-                log('>>> ' + cmd['line'])
-                code = check_cmd(cmd['line'], log_output=True)
-                if code != 0:
-                    raise Exception('Command exited with code %s.' % code)
-    except Exception as e:
-        log('Command failed:\n%s' % e)
-        raise
diff --git a/tests/utilities/config.py b/tests/utilities/config.py
deleted file mode 100644
index 52809487659d847b46baa59fa9aa03e5a35949ae..0000000000000000000000000000000000000000
--- a/tests/utilities/config.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env python3
-
-'''
-EnvSetup config utilities.
-'''
-
-from collections import OrderedDict
-from pathlib import Path
-import re
-
-from .logging import log
-from .os import get_dir
-
-DEFAULT_CONF_PATH = 'global-conf.sh'
-AUTO_CONF_PATH = 'auto-generated-conf.sh'
-CONF_PATH = 'conf.sh'
-
-_conf_cache = None
-
-
-def load_conf() -> dict:
-    '''Load EnvSetup configuration settings.
-
-    :return: Configuration settings
-    :rtype: dict
-    '''
-
-    conf = {}
-    base_dir = str(Path(get_dir(__file__)).parent.parent)
-    files = (
-        (str(Path(base_dir, DEFAULT_CONF_PATH)), True),
-        (str(Path(base_dir, AUTO_CONF_PATH)), False),
-        (str(Path(base_dir, CONF_PATH)), False),
-    )
-    only_default = True
-    override = OrderedDict()
-    for path, is_default in files:
-        if not Path(path).exists():
-            if is_default:
-                log(
-                    'The configuration file "{}" does not exist.'.format(path),
-                    error=True,
-                )
-                return dict()
-            continue
-        # Load conf
-        with open(path, 'r') as fo:
-            content = fo.read()
-        # Parse conf
-        for line in content.split('\n'):
-            line = line.strip()
-            if line and not line.startswith('#') and '=' in line:
-                name, *val = line.split('=')
-                name = name.strip(' \t\'"')
-                val = ('='.join(val)).strip(' \t\'"')
-                conf[name] = val
-                if is_default:
-                    override[name] = False
-                else:
-                    only_default = False
-                    override[name] = True
-    conf['_override'] = override
-    # Check a value to know if the config file has been changed
-    if only_default:
-        log('\033[93mWarning:\033[0m')
-        log('The configuration is using only default values.')
-        log('Perhaps you forget to change the configuration.')
-        log('Path of configuration file: %s' % str(Path(base_dir, CONF_PATH)))
-        log('Perhaps you want to quit this script to change the configuration?\n')
-    global _conf_cache
-    _conf_cache = conf
-    return conf
-
-
-def get_conf(name: str, default: str = None) -> str:
-    '''Get the given configuration parameter.
-
-    :param name: Parameter name
-    :type name: str
-    :param default: Default parameter value, defaults to None
-    :type default: str, optional
-    :return: Parameter value
-    :rtype: str
-    '''
-
-    global _conf_cache
-    if _conf_cache is None:
-        load_conf()
-
-    return _conf_cache.get(name, default)
-
-
-def set_conf(key: str, value: str, override: bool = False) -> bool:
-    '''Write the given configuration option in `conf.sh`.
-
-    :param key: Option name
-    :type key: str
-    :param value: Option value
-    :type value: str
-    :param override: Wether to override the option if it already exists, defaults to False
-    :type override: bool, optional
-    :return: True if the option have changed, False otherwise
-    :rtype: bool
-    '''
-
-    base_dir = Path(__file__).resolve().parent
-    conf_path = Path(base_dir, CONF_PATH).resolve()
-    # read conf.sh
-    with open(conf_path) as read_conf_fh:
-        conf = read_conf_fh.read()
-
-    # check if option already exists
-    regex = re.compile(r'^' + key.upper() + '=(.*)$', flags=re.M)
-    match = regex.search(conf)
-    if match and override:
-        # override option
-        conf = regex.sub('{}=\'{}\''.format(key.upper(), str(value)), conf)
-        with open(conf_path, 'w') as conf_fh:
-            conf_fh.write(conf)
-        success = True
-    elif not match:
-        # add option
-        with open(conf_path, 'a') as conf_fh:
-            conf_fh.write('\n{}=\'{}\'\n'.format(key.upper(), str(value)))
-        success = True
-    else:
-        # no match or no override
-        success = False
-    # reload conf
-    load_conf()
-    return success
diff --git a/tests/utilities/logging.py b/tests/utilities/logging.py
deleted file mode 100644
index 4e9d6f5380a7f18caf0a7f92da5a33e3cdcca337..0000000000000000000000000000000000000000
--- a/tests/utilities/logging.py
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/usr/bin/env python3
-
-'''EnvSetup logging utilities.'''
-
-import sys
-import re
-
-RED = '\033[91m'
-GREEN = '\033[92m'
-YELLOW = '\033[93m'
-BLUE = '\033[94m'
-DEF = '\033[0m'
-
-
-def log(text: str, error: bool = False):
-    '''Output log message to stout or stderr.
-
-    :param text: Message to log
-    :type text: str
-    :param error: Wether it should output to stderr or not, defaults to False
-    :type error: bool, optional
-    '''
-
-    fo = sys.stderr if error else sys.stdout
-    print(text, file=fo)
-    fo.flush()
-
-
-def info(message: str):
-    '''Print formatted info message.
-
-    :param message: Message to print
-    :type message: str
-    '''
-
-    log(' {}ℹ{} {}'.format(BLUE, DEF, message))
-
-
-def success(message: str):
-    '''Print formatted success message.
-
-    :param message: Message to print
-    :type message: str
-    '''
-
-    log(' {}✔{} {}'.format(GREEN, DEF, message))
-
-
-def warning(message: str):
-    '''Print formatted warning message.
-
-    :param message: Message to print
-    :type message: str
-    '''
-
-    log(' {}âš {} {}'.format(YELLOW, DEF, message), True)
-
-
-def error(message: str):
-    '''Print formatted error message.
-
-    :param message: Message to print
-    :type message: str
-    '''
-
-    log(' {}✖{} {}'.format(RED, DEF, message), True)
-
-
-def strip_colors(text: str):
-    '''Drop console colors.
-
-    :param text: Text in which colors should be dropped
-    :type text: str
-
-    :return: Cleaned text
-    :rtype: str
-    '''
-    return re.sub(r'\033\[[\d;]+m', '', text)
-
-
-def escape_html(text: str):
-    '''Escape HTML tags in a text.
-
-    :param text: Text to clean
-    :type text: str
-
-    :return: Cleaned text
-    :rtype: str
-    '''
-    html = text.strip()
-    html = html.replace('<', '&lt;')
-    html = html.replace('>', '&gt;')
-    html = html.replace('\033[90m', '<span style="color: gray;">')
-    html = html.replace('\033[91m', '<span style="color: red;">')
-    html = html.replace('\033[92m', '<span style="color: green;">')
-    html = html.replace('\033[93m', '<span style="color: orange;">')
-    html = html.replace('\033[94m', '<span style="color: blue;">')
-    html = html.replace('\033[95m', '<span style="color: purple;">')
-    html = strip_colors(html)
-    return html
diff --git a/tests/utilities/network.py b/tests/utilities/network.py
deleted file mode 100644
index b4199e9d16fa081b7db05cedfe2c1cce09cd1e18..0000000000000000000000000000000000000000
--- a/tests/utilities/network.py
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/usr/bin/env python3
-
-'''EnvSetup network utilities.'''
-
-from pathlib import Path
-import socket
-import subprocess
-
-from .commands import exec_cmd
-from .logging import log
-
-
-def add_hosts_to_localhost(hosts: list):
-    '''Add a list of hosts to 127.0.0.1 in /etc/hosts.
-
-    :param hosts: List of commands
-    :type hosts: list
-    :raises Exception: Houston we have a problem
-    '''
-
-    rc, hostname = exec_cmd('hostname')
-    if rc == 0 and hostname not in hosts:
-        hosts.insert(0, hostname)
-    with open('/etc/hosts', 'r') as fo:
-        content = fo.read()
-    new_content = list()
-    found_127 = False
-    for line in content.split('\n'):
-        if not found_127 and line.startswith('127.0.0.1'):
-            found_127 = True
-            for host in hosts:
-                if ' ' + host not in line:
-                    line += ' ' + host
-                    log('Adding host %s to /etc/hosts 127.0.0.1 aliases.' % host)
-        new_content.append(line)
-    if not found_127:
-        new_content.append('127.0.0.1 %s' % ' '.join(hosts))
-    new_content = '\n'.join(new_content)
-    if new_content != content:
-        with open('/etc/hosts', 'w') as fo:
-            fo.write(new_content)
-        log('/etc/hosts updated.')
-    else:
-        log('/etc/hosts is already up to date.')
-
-
-OPENSSL_CONFIG_TEMPLATE = '''
-[ req ]
-
-prompt             = no
-default_bits       = 4096
-default_keyfile    = envsetup.csr.pem
-distinguished_name = subject
-req_extensions     = req_ext
-x509_extensions    = x509_ext
-string_mask        = utf8only
-
-[ subject ]
-
-C            = FR
-ST           = IDF
-L            = Paris
-O            = UbiCast
-CN           = MediaServer
-emailAddress = root@localhost
-
-[ x509_ext ]
-
-subjectKeyIdentifier   = hash
-authorityKeyIdentifier = keyid,issuer
-basicConstraints       = CA:FALSE
-keyUsage               = digitalSignature, keyEncipherment
-subjectAltName         = @alternate_names
-nsComment              = "OpenSSL Generated Certificate"
-
-[ req_ext ]
-
-subjectKeyIdentifier = hash
-basicConstraints     = CA:FALSE
-keyUsage             = digitalSignature, keyEncipherment
-subjectAltName       = @alternate_names
-
-[ alternate_names ]
-
-'''
-
-
-def mkcert(
-    domains: list,
-    ecc: bool = True,
-    days: int = 3650,
-    config_tpl: str = OPENSSL_CONFIG_TEMPLATE,
-):
-    '''Generate a self-signed certificate for the domains list.
-
-    :param domains: Domains for which the certificates will be self-signed
-    :type domains: list
-    :param ecc: Wether to use Elliptic Curve cryptography or not, defaults to True, if Fasle RSA is used
-    :type ecc: bool, optional
-    :param days: Validity lifetime of the certificate, defaults to 3650
-    :type days: int, optional
-    :param config_tpl: OpenSSL config file template, defaults to OPENSSL_CONFIG_TEMPLATE
-    :type config_tpl: str, optional
-    '''
-
-    # create certs dir
-    cert_dir = '/etc/ssl/envsetup'
-    Path(cert_dir).mkdir(mode=0o755, parents=True, exist_ok=True)
-    # populate template with domains
-    for i, domain in enumerate(domains, start=1):
-        config_tpl = config_tpl + 'DNS.{} = {}\n'.format(i, domain)
-    # write openssl config file
-    with open(cert_dir + '/conf', 'w') as config_fh:
-        config_fh.write(config_tpl)
-    # key type: elliptic curve (default) or rsa
-    if ecc:
-        subprocess.check_call(
-            ['openssl', 'ecparam', '-name', 'secp384r1', '-out', cert_dir + '/ecparam']
-        )
-        keytype = 'ec:' + cert_dir + '/ecparam'
-    else:
-        keytype = 'rsa'
-    # execute openssl to generate keypair
-    subprocess.check_call(
-        [
-            'openssl',
-            'req',
-            '-config',
-            cert_dir + '/conf',
-            '-new',
-            '-x509',
-            '-sha256',
-            '-nodes',
-            '-newkey',
-            keytype,
-            '-keyout',
-            cert_dir + '/key.pem',
-            '-days',
-            str(days),
-            '-out',
-            cert_dir + '/cert.pem',
-        ]
-    )
-
-
-def get_ip() -> str:
-    '''Get the 'primary' ip address, the one used by the default route.
-
-    :return: IP address
-    :rtype: str
-    '''
-
-    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-    try:
-        # doesn't have to be reachable
-        s.connect(('10.255.255.255', 1))
-        IP = s.getsockname()[0]
-    except Exception:
-        IP = '127.0.0.1'
-    finally:
-        s.close()
-
-    return IP
diff --git a/tests/utilities/os.py b/tests/utilities/os.py
deleted file mode 100644
index b6ccd41ce34b436a4324b74383d2864817e16a02..0000000000000000000000000000000000000000
--- a/tests/utilities/os.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python3
-
-'''EnvSetup system utilities.'''
-
-from configparser import ConfigParser
-from pathlib import Path
-import re
-
-SUPPORTED_PLATFORMS = (('debian', '10'), ('ubuntu', '18.04'))
-
-
-def get_dir(file_path: str) -> str:
-    '''Get the absolute directory path for the given file.
-
-    :param file_path: File path
-    :type file_path: str
-    :return: Absolute directory path
-    :rtype: str
-    '''
-
-    return str(Path(file_path).expanduser().resolve().parent)
-
-
-def dist() -> tuple:
-    '''Return distribution name and version.
-
-    :return: Distribution name and version
-    :rtype: tuple
-    '''
-
-    parser = ConfigParser()
-    with open('/etc/os-release') as os_release:
-        parser.read_string('[os]\n{}'.format(os_release.read()))
-
-    return (parser['os']['ID'], parser['os']['VERSION_ID'].strip('"'))
-
-
-def supported_platform() -> bool:
-    '''Let you know if the current platform is supported.
-
-    :return: Wether the platform is supported or not
-    :rtype: bool
-    '''
-
-    return dist() in SUPPORTED_PLATFORMS
-
-
-def line_in_file(line: str, file: str) -> bool:
-    '''Search for a line in the given file.
-
-    :param line: String or pattern to search
-    :type line: str
-    :param file: File to check
-    :type file: str
-    :return: Wether the line is present or not
-    :rtype: bool
-    '''
-
-    with open(file) as fh:
-        file_lines = fh.read()
-
-    return re.search(line, file_lines, re.MULTILINE)
diff --git a/tests/utilities/systemd.py b/tests/utilities/systemd.py
deleted file mode 100644
index 38823706c22446ba78aecaaf1295cf3e1784f011..0000000000000000000000000000000000000000
--- a/tests/utilities/systemd.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python3
-
-'''
-EnvSetup systemd handler.
-'''
-
-import os
-import subprocess
-
-from .config import get_conf
-
-
-def check_systemd_setup():
-    '''
-    Add envsetup timer and service and enable them if enabled in configuration.
-    Confiugration key is TESTER_ENABLE_SYSTEMD_TIMER.
-    '''
-    # Write systemd files if needed
-    template_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'systemd')
-    files_to_write = (
-        ('/lib/systemd/system/envsetup-tester.service', os.path.join(template_dir, 'envsetup-tester.service')),
-        ('/lib/systemd/system/envsetup-tester.timer', os.path.join(template_dir, 'envsetup-tester.timer')),
-    )
-    for path, template in files_to_write:
-        content = ''
-        if os.path.exists(path):
-            with open(path, 'r') as fo:
-                content = fo.read()
-        with open(template, 'r') as fo:
-            expected = fo.read()
-        if not content or content != expected:
-            # Replace file if different
-            with open(path, 'w') as fo:
-                fo.write(expected)
-            print('File "%s" updated.' % path)
-        else:
-            print('File "%s" is already up to date.' % path)
-
-    # Enable systemd timer if needed
-    if get_conf('TESTER_ENABLE_SYSTEMD_TIMER') == '1':
-        print('Checking status of envsetup systemd timer...')
-        p = subprocess.run(['systemctl', 'is-enabled', 'envsetup-tester.timer'])
-        if p.returncode != 0:
-            subprocess.run(['systemctl', 'enable', 'envsetup-tester.timer'])
-            subprocess.run(['systemctl', 'restart', 'envsetup-tester.timer'])
-            print('Enabled "envsetup-tester.timer" in systemd.')
-
-    # Remove old files
-    deprecated_files = (
-        '/lib/systemd/system/envsetup-tests.timer',
-        '/lib/systemd/system/envsetup-tests.service',
-        '/etc/systemd/system/envsetup-tests.timer',
-        '/etc/systemd/system/envsetup-tests.service',
-        '/etc/systemd/system/ubicast-config.service'
-    )
-    for path in deprecated_files:
-        if os.path.exists(path):
-            # Disable service/timer
-            name = path.rsplit('/', 1)[-1]
-            subprocess.run(['systemctl', 'disable', name])
-            subprocess.run(['systemctl', 'stop', name])
-            # Remove service/timer
-            os.remove(path)
-            print('Removed deprecated file: "%s".' % path)
diff --git a/tools/benchmark/live/README.md b/tools/benchmark/live/README.md
deleted file mode 100644
index a18a0cadbb18699f34e31c1af0c1f3a52378cee7..0000000000000000000000000000000000000000
--- a/tools/benchmark/live/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# Description
-
-The `benchmark/live` tools aims to provide a way to find out the maximum number of streams that a targeted Ubicast Live solution can handle before it shows anomalies.
-An anomalie is considered to be an m3u8 playlist that has taken more than 4s to update on the Live server.
-
-# Prerequisites
-
-To be able to use the script, you must:
-* Be able to directly join the live server defined in the configuration through SSH (the script uses the equivalent of `ssh -l root <LIVE_IP value in the config file>`)
-* Be able to directly join the MediaServer defined in the configuration through SSH (the script uses the equivalent of `ssh -l root <MS_IP value in the config file>`)
-* The live server and the mediaserver should have the latest version of the Ubicast envsetup repository in the `/root` folder
-* The mediaserver should be able to reach the live server through the live server "server_name" and through tcp/80, tcp/443 and tcp/1935 (used to bench the theorical network bandwidth between the servers)
-* Have the following debian packages installed on the computer running the bench script: `ksh` (for the nmon graph tool `lib/nmonchart`)
-* Have the following debian packages installed on the MediaServer and the Live server: `nmon`
-* Make sure that there are no lives currently started and that the MediaServer is not used
-
-# Usage
-
-## Global configuration
-
-Before beeing able to make a bench, copy the example configuration file
-
-```
-cd <envsetup_dir>/tools/benchmark/live
-cp config.example config
-```
-
-Then open the `config` file with a text editor and adapt the variables with your values.
-
-## Single number of streams benchmark
-
-Usage
-```
-./bench-live.sh [OPTS] STREAM_NUMBER
-
-OPTIONS:
-   -t --test-duration    INT  test duration in seconds (default: 30)
-   -l --log-file         PATH path for the log file (by default a log is created in the script "output" dir)
-   -n --no-header             omit the servers hardware informations
-   -m --nmon-monitoring       launch nmon on Mediaserver and Live server
-   -h --help                  show this help
-```
-
-## Multiple number of streams benchmark
-
-Usage
-```
-./full-bench.sh [OPTS] -- STREAMS_TARGETS1 STREAMS_TARGET2 ...
-
-OPTIONS:
-   -t --test-duration  INT  test duration in seconds (default: 300)
-   -h --help                show this help
-
-STREAMS_TARGETS is a list (space separated), consisting of numbers of streams you want to benchmark.
-For example, if you want to benchmark the solution for 100, 200, 300 and 400 streams:
-./full-bench.sh -- 100 200 300 400
-
-You can also use bash string extention, like this
-./full-bench.sh -- {1,2,3,4}00
-```
-
-Note: The automatic bench is setup to leave if 10 or more anomalies are detected with the current number of streams
-
-# Result analyzing
-
-The output of the bench can be consulted in the `output` directory of the tool.
-Depending on the arguments provided during the bench you can find several files in the corresponding `output` subfolder:
-* `sysinfos.txt`: informations about the systems that are being benched (streams quality used, servers CPU/RAM informations, theorical bandwidth measurement)
-* `bench.log`: log file of the bench, you can find informations about the execution of the bench in this file. There is also some metrics not provided by the graphs in this file (load average before/after and size of the HLS folder on the live server, number of anomalies detected)
-* `bench-ms.nmon` and `bench-live.nmon`: nmon metrics databases for the benchmark
-* `graph-ms.html` and `graph-live.html`: HTML pages presenting graphs from the nmon metrics (independent from the nmon DB files)
-
-You can consult the HTML pages `graph-ms.html` and `graph-live.html` by publishing them with a simple static web server. For example by running `python3 -m http.server` in the benchmark output directory and then connecting on the port 8000 with a web browser.
diff --git a/tools/benchmark/live/bench-live.sh b/tools/benchmark/live/bench-live.sh
deleted file mode 100755
index 633e844637aaafee5aafdcd2d5974d6125cf5582..0000000000000000000000000000000000000000
--- a/tools/benchmark/live/bench-live.sh
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/env bash
-
-# -------------------------------- Global variables --------------------------------
-
-DIR="$(dirname "$(readlink -e "${0}")")"
-CONFIG_FILE="${DIR}/config"
-TESTER_SCRIPT="/root/envsetup/tests/scripts/ms-testing-suite/ms_live_streamer.py"
-TIME_DIFF_SCRIPT="/root/envsetup/tools/benchmark/live/resources/time_diff_test.sh"
-SYS_INFOS_SCRIPT="/root/envsetup/tools/benchmark/live/resources/sysinfos.sh"
-NMON_CMD="nmon -f -s 1 && pgrep nmon"
-LOADAVG_CMD="cat /proc/loadavg | awk '{print \"load average:\",\$1\" (1m)\",\$2\" (5m)\",\$3\" (15m)\"}'"
-HLS_TMPFS_SIZE_CMD="du -h -d0 /var/tmp/nginx-rtmp/msuser | awk '{print \$1}'"
-
-# -- Script prerequisites tests --
-
-# Configuration loading
-if [[ -e "${CONFIG_FILE}" ]]; then
-    source "${CONFIG_FILE}"
-else
-    echo "The configuration file \"${CONFIG_FILE}\" is missing"
-    exit 1
-fi
-
-# -------------------------------- Functions --------------------------------
-
-# Loading global functions
-source "${DIR}/lib/functions.sh"
-
-# Function to cleanup the tests files on script termination
-cleanup() {
-    msg "white" "Stopping every live streams found on the MediaServer"
-    ssh -l root ${MS_IP} -- pkill -f gst-launch
-    msg "white" "Purging any remaining playlists on the live server"
-    ssh -l root ${LIVE_IP} -- rm -f "${LIVE_HLS_FOLDER}/"*
-    msg "white" "Restarting nginx on the live server"
-    ssh -l root ${LIVE_IP} -- systemctl restart nginx
-}
-
-cleanup_trap() {
-    cleanup
-
-    exit 1
-}
-
-# Launch X streams during 30s on the MediaServer (with X being the number of streams to spawn as argument)
-launch_stream() {
-    nb_streams="${1}"
-    timeout_tester="${2}"
-
-    read -r pid < <(ssh -l root ${MS_IP} -- "${TESTER_SCRIPT} --num-streamers ${nb_streams} ${MS_INSTANCE} &>/dev/null & echo \$!")
-
-    echo ${pid}
-}
-
-usage() {
-    cat << EOF
-Usage: ./$(basename "${0}") [OPTS] STREAM_NUMBER
-
-OPTIONS:
-   -t --test-duration    INT  test duration in seconds (default: 30)
-   -l --log-file         PATH path for the log file (by default a log is created in the script "output" dir)
-   -n --no-header             omit the servers hardware informations
-   -m --nmon-monitoring       launch nmon on Mediaserver and Live server
-   -h --help                  show this help
-EOF
-}
-
-# -------------------------------- Parameters --------------------------------
-
-if [[ $# -eq 0 ]]; then
-    usage
-
-    exit 0
-fi
-
-TEST_DURATION=30
-NO_HEADER=false
-NMON=false
-
-while [[ $# -ne 0 ]]; do
-    case "${1}" in
-        -t|--test-duration)    shift; TEST_DURATION=${1};;
-        -l|--log-file)         shift; A_LOG_FILE=${1};;
-        -n|--no-header)        NO_HEADER=true;;
-        -n|--nmon-monitoring)  NMON=true;;
-        -h|--help)             usage; exit 0;;
-        --)                    shift; break;;
-        -*)                    echo "unknown ${1} option"; exit 1;;
-        *)                     STREAM_NUMBER=${1};;
-    esac
-    shift
-done
-
-if [[ ! "${STREAM_NUMBER}" =~ ^[0-9]+$ ]]; then
-    usage
-    exit 1
-fi
-
-# -------------------------------- Main --------------------------------
-
-# The trap is played on associated signals calls to cleanup bench files
-trap cleanup_trap SIGTERM SIGINT
-
-# Before launching the bench we check that everything is setup correctly
-# We check that the live chunks are generated in a tmpfs
-hls_fs_type="$(ssh -l root ${LIVE_IP} -- findmnt ${LIVE_HLS_FOLDER} -o FSTYPE | tail -n1)"
-
-if [[ "${hls_fs_type}" != "tmpfs" ]]; then
-    echo -e "${RED}On the live server, the chunks folder \"${LIVE_HLS_FOLDER}\" is not mounted on a tmpfs as required${NC}"
-
-    exit 1
-fi
-
-# We check that the RTMP module default limit has been overwritten
-if ! ssh -l root ${LIVE_IP} -- "grep -qR 'max_streams 1000;' /etc/nginx/{global-conf.d,rtmp.d}"; then
-    echo -e "${RED}The rtmp \"max_streams\" limit should be set to 1000 for the bench${NC}"
-    echo -e "${RED}On the live server, add the \"max_streams 1000;\" parameter in the \"rtmp\" block of the \"/etc/nginx/global-conf.d/rtmp.conf\" configuration file under the \"chunk_size\" parameter.Then restart nginx${NC}"
-
-    exit 1
-fi
-
-# Preparing the output reports variables
-OUTPUT_DIR="${DIR}/output/$(date +'%Y%m%d%H%M')-$(basename "${0%%.sh}")"
-if [[ -z ${A_LOG_FILE} ]]; then
-    LOG_FILE="${OUTPUT_DIR}/bench.log"
-else
-    if [[ -e $(dirname ${A_LOG_FILE}) ]]; then
-        LOG_FILE="${A_LOG_FILE}"
-    else
-        msg "red" "The directory for the log file does not exist"
-        exit 1
-    fi
-fi
-
-NMON_FILE="${OUTPUT_DIR}/bench.nmon"
-
-if [[ -z ${A_LOG_FILE} ]] || ${NMON} || ! ${NO_HEADER}; then
-    # Creation of the logs directory
-    if [[ ! -e "${OUTPUT_DIR}" ]]; then
-        mkdir -p "${OUTPUT_DIR}"
-    fi
-fi
-
-# We print the configuration (CPU, RAM) of the benched servers
-if ! ${NO_HEADER}; then
-    ${DIR}/resources/sysinfos.sh ${OUTPUT_DIR}/sysinfos.txt ${LOG_FILE}
-fi
-
-# Test variables
-failure=false
-
-# Print load average and tmpfs size
-LOAD_MS="$(ssh -l root ${MS_IP} -- "${LOADAVG_CMD}")"
-LOAD_LIVE="$(ssh -l root ${LIVE_IP} -- "${LOADAVG_CMD}")"
-SIZE_LIVE="$(ssh -l root ${LIVE_IP} -- "${HLS_TMPFS_SIZE_CMD}")"
-
-msg "green" "MediaServer ${LOAD_MS}"
-msg "green" "Live server ${LOAD_LIVE}"
-msg "green" "Live server ${LIVE_HLS_FOLDER} size: ${SIZE_LIVE}"
-
-if ${NMON}; then
-    # We launch the dstat command on the Live server to collect the system performance datas
-    msg "white" "Launching the system performances data collection on the Live server"
-    #ssh -l root ${LIVE_IP} -- "[[ -e /tmp/bench.csv ]] && rm /tmp/bench.csv"
-    ssh -l root ${LIVE_IP} -- 'rm -f /tmp/*.nmon'
-    nmon_pid=$(ssh -l root ${LIVE_IP} -- "cd /tmp && ${NMON_CMD}")
-fi
-
-# Launch X streams (X being the current step value)
-msg "blue" "MediaServer - Launching the tests with ${STREAM_NUMBER} streams..."
-tester_pid=$(launch_stream ${STREAM_NUMBER})
-
-# Waiting for the streams to spawn correctly
-msg "white" "Waiting for streams to spawn on the mediaserver"
-wanted_playlist_nb=$(( 1 * STREAM_NUMBER ))
-real_playlist_nb=$(ssh -l root ${LIVE_IP} -- ls -1 "${LIVE_HLS_FOLDER}/"*.m3u8 2>/dev/null | wc -l)
-
-while [[ ${real_playlist_nb} -ne ${wanted_playlist_nb} ]]; do
-    sleep 1
-    real_playlist_nb=$(ssh -l root ${LIVE_IP} -- ls -1 "${LIVE_HLS_FOLDER}/"*.m3u8 2>/dev/null | wc -l)
-done
-
-# We wait a bit for the streams to start
-sleep 10
-
-# Launching the checks on the live server
-msg "blue" "Live server - Checking the playlists refreshing times every seconds for ${TEST_DURATION} checks (~1s per check)"
-ret=$(ssh -l root ${LIVE_IP} -- ${TIME_DIFF_SCRIPT} "${LIVE_HLS_FOLDER}" ${TEST_DURATION} ${wanted_playlist_nb})
-
-if [[ "$?" == "1" ]]; then
-    failure=true
-fi
-
-if [[ "${ret}" != "" ]]; then
-    msg "orange" "${ret}"
-fi
-
-# Print load average and tmpfs size
-LOAD_MS="$(ssh -l root ${MS_IP} -- "${LOADAVG_CMD}")"
-LOAD_LIVE="$(ssh -l root ${LIVE_IP} -- "${LOADAVG_CMD}")"
-SIZE_LIVE="$(ssh -l root ${LIVE_IP} -- "${HLS_TMPFS_SIZE_CMD}")"
-
-msg "green" "MediaServer ${LOAD_MS}"
-msg "green" "Live server ${LOAD_LIVE}"
-msg "green" "Live server ${LIVE_HLS_FOLDER} size: ${SIZE_LIVE}"
-
-# We stop the tester
-msg "white" "Stopping the MediaServer test streams and waiting for them to despawn"
-ssh -l root ${MS_IP} -- "kill -s SIGINT ${tester_pid}"
-
-# We stop nmon on the Live server and we retrieve the resulting CSV
-if ${NMON}; then
-    msg "white" "Stopping the collect of the system performance datas on the Live server"
-    ssh -l root ${LIVE_IP} -- "kill ${nmon_pid}"
-    msg "white" "Retrieving the nmon report from the Live server"
-    last_nmon="$(ssh -l root ${LIVE_IP} -- 'ls -1t /tmp/*.nmon | head -n1')"
-    scp "root@${LIVE_IP}:${last_nmon}" "${NMON_FILE}"
-
-    # Generation of the HTML Graphs page
-    ${DIR}/lib/nmonchart ${NMON_FILE} ${OUTPUT_DIR}/graph.html
-fi
-
-# Print the output files path
-if ${NMON}; then
-    msg "white" "Bench NMON file:  ./$(realpath --relative-to ${DIR} ${NMON_FILE})"
-    msg "white" "Bench graph page: ./$(realpath --relative-to ${DIR} ${OUTPUT_DIR}/graph.html)"
-fi
-msg "white" "Bench logs:       ./$(realpath --relative-to ${DIR} ${LOG_FILE})"
-
-sleep 2
-
-# We purge the playlists files on the live server and we restart nginx to be in an initial case
-cleanup
-
-if ${failure}; then
-    msg "red" "Anomalies has been detected"
-    exit 1
-else
-    msg "green" "No anomalies detected"
-    exit 0
-fi
diff --git a/tools/benchmark/live/config.example b/tools/benchmark/live/config.example
deleted file mode 100644
index 2ba9774488fc421520b410033e75ca615863596d..0000000000000000000000000000000000000000
--- a/tools/benchmark/live/config.example
+++ /dev/null
@@ -1,7 +0,0 @@
-# MediaServer infos (choose one of the cluster for HA cases)
-MS_IP="X.X.X.X"
-MS_INSTANCE="msuser"
-
-# Live server infos (choose the active one of the cluster for HA cases)
-LIVE_IP="X.X.X.X"
-LIVE_HLS_FOLDER="/var/tmp/nginx-rtmp/msuser"
diff --git a/tools/benchmark/live/full-bench.sh b/tools/benchmark/live/full-bench.sh
deleted file mode 100755
index 6e5bc22964f1595ed2005cf9debe92100e542aa6..0000000000000000000000000000000000000000
--- a/tools/benchmark/live/full-bench.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env bash
-
-DIR="$(dirname "$(readlink -e "${0}")")"
-CONFIG_FILE="${DIR}/config"
-OUTPUT_DIR="${DIR}/output/$(date +"%Y%m%d%H%M")-$(basename "${0%%.sh}")"
-LOG_FILE="${OUTPUT_DIR}/bench.log"
-
-# Loading global functions
-source "${DIR}/lib/functions.sh"
-
-# Print usage function
-usage() {
-    cat << EOF
-Usage: ./$(basename "${0}") [OPTS] -- STREAMS_TARGETS1 STREAMS_TARGET2 ...
-
-OPTIONS:
-   -t --test-duration  INT  test duration in seconds (default: 300)
-   -h --help                show this help
-
-STREAMS_TARGETS is a list (space separated), consisting of numbers of streams you want to benchmark.
-For example, if you want to benchmark the solution for 100, 200, 300 and 400 streams:
-./$(basename "${0}") -- 100 200 300 400
-
-You can also use bash string extention, like this
-./$(basename "${0}") -- {1,2,3,4}00
-EOF
-}
-
-cleanup() {
-    # Kill the running nmon monitors
-    ssh -l root ${MS_IP}   -- "pkill -f nmon"
-    ssh -l root ${LIVE_IP} -- "pkill -f nmon"
-
-    # We retrieve the nmon metrics DB
-    scp -q root@mstest:/tmp/*.nmon ${OUTPUT_DIR}/bench-ms.nmon
-    scp -q root@livetest:/tmp/*.nmon ${OUTPUT_DIR}/bench-live.nmon
-
-    # Clean the metrics DB on the remotes
-    ssh -l root ${MS_IP}   -- "rm -f /tmp/*.nmon"
-    ssh -l root ${LIVE_IP} -- "rm -f /tmp/*.nmon"
-
-    # Create graphs from the metrics
-    ${DIR}/lib/nmonchart ${OUTPUT_DIR}/bench-ms.nmon   ${OUTPUT_DIR}/graph-ms.html
-    ${DIR}/lib/nmonchart ${OUTPUT_DIR}/bench-live.nmon ${OUTPUT_DIR}/graph-live.html
-}
-
-cleanup_trap() {
-    cleanup
-
-    exit 1
-}
-
-trap cleanup_trap SIGINT
-
-# Configuration loading
-if [[ -e "${CONFIG_FILE}" ]]; then
-    source "${CONFIG_FILE}"
-else
-    echo "The configuration file \"${CONFIG_FILE}\" is missing"
-    exit 1
-fi
-
-TEST_DURATION=300
-
-# Arguments parsing
-while [[ $# -ne 0 ]]; do
-    case "${1}" in
-        -t|--test-duration)    shift; TEST_DURATION=${1};;
-        -h|--help)             usage; exit 0;;
-        --)                    shift; break;;
-        -*)                    echo "unknown ${1} option"; exit 1;;
-        *)                     break;;
-    esac
-    shift
-done
-
-STREAMS_TARGETS="$@"
-
-if [[ ! ${STREAMS_TARGETS} =~ ^[0-9]+([[:blank:]][0-9]+)*$ ]]; then
-    usage
-    exit 1
-fi
-
-# Creating the output dir
-[[ ! -e ${OUTPUT_DIR} ]] && mkdir -p ${OUTPUT_DIR}
-
-# Getting the system informations
-${DIR}/resources/sysinfos.sh ${OUTPUT_DIR}/sysinfos.txt ${LOG_FILE}
-
-# Clean previous benchmarks metrics
-ssh -l root ${MS_IP}   -- "rm -f /tmp/*.nmon"
-ssh -l root ${LIVE_IP} -- "rm -f /tmp/*.nmon"
-
-# Start new metrics generation with nmon
-ssh -l root ${MS_IP}   -- "cd /tmp && nmon -f -s 1 -c 9999999"
-ssh -l root ${LIVE_IP} -- "cd /tmp && nmon -f -s 1 -c 9999999"
-
-# Launch a bench for each provided stream number targets
-for stream_target in ${STREAMS_TARGETS}; do
-    msg "orange" "Begining of the bench for ${stream_target} streams"
-    ./bench-live.sh --log-file ${LOG_FILE} --no-header --test-duration ${TEST_DURATION} ${stream_target}
-
-    # If there are more than 10 anomalies in the current test => stop the bench
-    anomalies_nb=$(grep -F 'anomalie(s) has been found during the test' ${LOG_FILE} | \
-                       grep -oP '[0-9]+(?= anomalie\(s\))' | \
-                       sort | \
-                       tail -n1)
-    if [[ ${anomalies_nb} -gt 10 ]]; then
-        msg "red" "More than 10 anomalies has been found with the current streams number"
-        msg "red" "Stopping the bench"
-        break
-    fi
-
-    # If it is not the last bench
-    if [[ "${stream_target}" != "${STREAMS_TARGETS##* }" ]]; then
-        msg "orange" "We wait 5min before the next bench (to stabilize the load average)"
-        sleep 300
-    fi
-done
-
-cleanup
-
-exit 0
diff --git a/tools/benchmark/live/lib/functions.sh b/tools/benchmark/live/lib/functions.sh
deleted file mode 100644
index 2acb3e7289b8d375f5db082c09219133e5448899..0000000000000000000000000000000000000000
--- a/tools/benchmark/live/lib/functions.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env bash
-
-RED='\033[0;31m'    # Red color
-GREEN='\033[0;32m'  # Green color
-ORANGE='\033[0;33m' # Orange color
-BLUE='\033[0;34m'   # Blue color
-NC='\033[0m'        # Normal color
-
-# Formatting functions
-msg() {
-    color="${1}"
-    msg="${2}"
-    date="$(date +'%d/%m/%Y %H:%M:%S')"
-
-    COLOR="${NC}"
-
-    case "${color}" in
-        "white")   COLOR="${NC}";;
-        "red")     COLOR="${RED}";;
-        "green")   COLOR="${GREEN}";;
-        "blue")    COLOR="${BLUE}";;
-        "orange")  COLOR="${ORANGE}";;
-    esac
-
-    while IFS= read -r line;do
-        echo -e "${date} - ${COLOR}${line}${NC}"
-        echo "${date} - ${line}" >> ${LOG_FILE}
-    done <<< "${msg}"
-
-}
-
-# Print the configuration of a distant system
-get_system_config() {
-    system="${1}"
-    name="${2}"
-
-    msg "white" "Getting the system informations on the ${name}"
-    cpuinfos=$(ssh -l root ${system} -- "lscpu | grep '^CPU(s)\|Thread(s)\|Core(s)\|Model name\|MHz'")
-    raminfos=$(ssh -l root ${system} -- "free -h")
-
-    echo -e "${GREEN}${name} (${system}):${NC}"
-    echo "# ${name} (${system}):" >> $SYSINFO_FILE
-    echo -e "${BLUE}CPU infos:${NC}"
-    echo "## CPU infos:" >> $SYSINFO_FILE
-    echo "${cpuinfos}" | tee -a $SYSINFO_FILE
-    echo -e "${BLUE}RAM infos:${NC}"
-    echo "RAM infos:" >> $SYSINFO_FILE
-    echo "${raminfos}" | tee -a $SYSINFO_FILE
-    echo | tee -a $SYSINFO_FILE
-}
-
-# Network bench tool
-get_net_bandwidth() {
-    ms_ip="${1}"
-    live_ip="${2}"
-
-    msg "white" "Stopping nginx on the live server"
-    ssh -l root ${live_ip} -- "systemctl stop nginx"
-
-    msg "white" "Getting the servername of the live server in the nginx config"
-    live_dns="$(ssh -l root ${live_ip} -- 'grep -oP "server_name \K[^;]+" /etc/nginx/sites-enabled/live.conf | head -n1')"
-    msg "white" "Starting netcat in listening mode on tcp/80 on the live server"
-    nc_pid=$(ssh -l root ${live_ip} -- "nc -lp 80 1>/dev/null &")
-    msg "white" "Copying 1GB of data to the servername listening netcat on the live server to measure bandwidth"
-    dd_infos="$(ssh -l root ${ms_ip} -- "dd if=/dev/zero bs=1M count=1K | nc -q 0 ${live_dns} 80" 2>&1)"
-
-    msg "white" "Restarting nginx on the live server"
-    ssh -l root ${live_ip} -- "systemctl start nginx"
-
-    echo -ne "${GREEN}Measured maximal bandwidth from ${ms_ip} to ${live_ip}: ${NC}"
-    echo -n "# Measured maximal bandwidth from ${ms_ip} to ${live_ip}: " >> ${SYSINFO_FILE}
-    echo "${dd_infos}" | grep -oP "[0-9\.]+ MB\/s" | tee -a ${SYSINFO_FILE}
-    echo | tee -a ${SYSINFO_FILE}
-}
-
-get_stream_infos() {
-    ms_ip="${1}"
-    ms_instance="${2}"
-    script="/root/envsetup/tools/benchmark/live/resources/get-stream-infos.sh"
-
-    echo -e "${GREEN}Tested stream quality infos: ${NC}"
-    echo "# Tested stream quality infos: " >> ${SYSINFO_FILE}
-    echo -e "$(ssh -l root ${ms_ip} -- "${script} ${ms_instance}")\n" | tee -a ${SYSINFO_FILE}
-}
diff --git a/tools/benchmark/live/lib/nmon.txt b/tools/benchmark/live/lib/nmon.txt
deleted file mode 100644
index b1656456fb7d03b0e3986f4e4d7d00ff633d7be6..0000000000000000000000000000000000000000
--- a/tools/benchmark/live/lib/nmon.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-# Sources
-
-[nmonchart](http://nmon.sourceforge.net/pmwiki.php?n=Site.Nmonchart)
-[nmonmerge2](http://nmon.sourceforge.net/pmwiki.php?n=Site.Nmonmerge)
-[nmonmerge2 debian patch:](https://sourceforge.net/p/nmon/patches/13/)
diff --git a/tools/benchmark/live/lib/nmonchart b/tools/benchmark/live/lib/nmonchart
deleted file mode 100755
index 92c04846a7da3061532d6da854652259db763d6a..0000000000000000000000000000000000000000
--- a/tools/benchmark/live/lib/nmonchart
+++ /dev/null
@@ -1,1555 +0,0 @@
-#!/usr/bin/ksh  
-
-# License GNU General Public License version 3.0 (GPLv3)
-# (c) Copyright 2015. Nigel Griffiths
-
-export nmonchart_version=40
-
-# Set wantCONFIG to 0 (zero) to switch off config button or 1 to switch it on
-wantCONFIG=1
-
-# Set wantZOOM to 0 (zero) to switch off  the zoom function or 1 to switch it on
-wantZOOM=1
-
-# Set if you want the new fast mode using awk instead of slower sed loops
-fastmode=1
-
-# Work around syntax differences in AIX has the aixpert command and Linux which does not
-if [[ -e /usr/sbin/aixpert ]]
-then
-# echo running on  AIX
-export ECHO="echo "
-export SORTARG=" -t, +2 "
-export PRINTN="print -n "
-else
-#echo running on  Linux 
-export ECHO="echo -e "
-export SORTARG=" -k2 "
-export PRINTN="echo -n "
-fi
-
-if (( wantCONFIG ))
-then
-config_start()
-{
-$ECHO '<script>'
-$ECHO 'function config() {'
-$ECHO '    var myWindow = window.open("", "MsgWindow", "width=1024, height=800");'
-$ECHO '    myWindow.document.write("<h2>Configuration data for' $ORIGINAL '<br>Use PageDown or Scroll bar (if available)</h2><br>\\' 
-}
-
-config_end()
-{
-$ECHO '");'
-$ECHO '}'
-$ECHO '</script>'
-}
-
-config_button()
-{
-$ECHO '\t<button onclick="config()"><b>Configuration</b></button> '
-}
-fi
-
-# first part of the .html file
-html_start() 
-{
-$ECHO '<html>'
-$ECHO '\t<head>'
-$ECHO '\t\t<title>nmonChart</title>'
-
-if (( wantCONFIG ))
-then
-config_start $1
-grep ^AAA $INPUT | grep -v ^AAA,note | awk -F, '{ printf "<b>%s</b> = %s<br>\\\n",$2,$3}'
-grep ^BBB $INPUT | grep -v ^BBBP| sed 's/"//g' | sed 's/,/:/' | sed 's/,/:/' | awk -F: '{ printf "<b>%s</b> %s %s<br>\\\n",$1,$2,$3}'
-grep ^BBBP $INPUT |  sed 's/"//g' | awk -F, '{ printf "%s <b>%s</b> %s<br>\\\n",$2,$3,$4}'
-config_end
-fi
-
-$ECHO '\t\t<script type="text/javascript" src="https://www.google.com/jsapi"></script>'
-$ECHO '\t\t<script type="text/javascript">'
-$ECHO '\t\tgoogle.load("visualization", "1.1", {packages:["corechart"]});'
-$ECHO 
-$ECHO '\t\tgoogle.setOnLoadCallback(setupCharts);'
-$ECHO '\t\t'
-$ECHO '\t\tfunction setupCharts() {'
-$ECHO '\t\t'
-$ECHO '\t\tvar chart = null;'
-$ECHO 
-}
-
-# declare array for the data
-chart_start()
-{
-$ECHO '\t\t\tvar data_'$1 '= google.visualization.arrayToDataTable(['
-}
-
-# finish the data area and create Area Chart 
-chart_end()
-{
-$ECHO '\t\t]);'
-$ECHO
-$ECHO '\t\tvar options_'$1' = {'
-$ECHO '\t\t\tchartArea: {left: "5%", width: "85%", top: "10%", height: "80%"},'
-$ECHO '\t\t\ttitle: "'$2'",'
-$ECHO '\t\t\tfocusTarget: "category",'
-$ECHO '\t\t\thAxis: {'
-$ECHO '\t\t\t\tgridlines: {'
-$ECHO '\t\t\t\t\tcolor: "lightgrey",'
-$ECHO '\t\t\t\t\tcount: 30'
-$ECHO '\t\t\t\t}'
-$ECHO '\t\t\t},'
-$ECHO '\t\t\tvAxis: {'
-$ECHO '\t\t\t\tgridlines: {'
-$ECHO '\t\t\t\t\tcolor: "lightgrey",'
-$ECHO '\t\t\t\t\tcount: 11'
-$ECHO '\t\t\t\t}'
-$ECHO '\t\t\t},'
-if (( wantZOOM ))
-then
-$ECHO '\t\t\texplorer: { actions: ["dragToZoom", "rightClickToReset"],'
-$ECHO '\t\t\t\taxis: "horizontal",'
-$ECHO '\t\t\t\tkeepInBounds: true,'
-$ECHO '\t\t\t\tmaxZoomIn: 20.0'
-$ECHO '\t\t\t},'
-fi
-$ECHO '\t\t\tisStacked: ' $3
-$ECHO '\t\t};'
-$ECHO
-$ECHO '\t\tdocument.getElementById("draw_'$1'").addEventListener("'click'", function() {'
-$ECHO '\t\tif (chart && chart.clearChart) chart.clearChart();'
-$ECHO 
-$ECHO '\t\tchart = new google.visualization.AreaChart(document.getElementById("chart_master"));'
-$ECHO '\t\tchart.draw( data_'$1', options_'$1');'
-$ECHO '\t\t});'
-$ECHO
-}
-
-# finish the data area and create Bar Chart 
-chart_end_column()
-{
-$ECHO '\t\t]);'
-$ECHO
-$ECHO '\t\tvar options_'$1' = {'
-$ECHO '\t\t\tchartArea: {left: "5%", width: "85%", top: "10%", height: "80%"},'
-$ECHO '\t\t\ttitle: "'$2'",'
-$ECHO '\t\t\tfocusTarget: "category",'
-$ECHO '\t\t\tvAxis: {'
-$ECHO '\t\t\t\tgridlines: {'
-$ECHO '\t\t\t\t\tcolor: "lightgrey",'
-$ECHO '\t\t\t\t\tcount: 11'
-$ECHO '\t\t\t\t}'
-$ECHO '\t\t\t},'
-$ECHO '\t\t\tisStacked: ' $3
-$ECHO '\t\t};'
-$ECHO
-$ECHO '\t\tdocument.getElementById("draw_'$1'").addEventListener("'click'", function() {'
-$ECHO '\t\tif (chart && chart.clearChart) chart.clearChart();'
-$ECHO 
-$ECHO '\t\tchart = new google.visualization.ColumnChart(document.getElementById("chart_master"));'
-$ECHO '\t\tchart.draw( data_'$1', options_'$1');'
-$ECHO '\t\t});'
-$ECHO
-}
-
-# Variation of the above for TOPSUM graph
-chart_end_top()
-{
-$ECHO '\t\t]);'
-$ECHO
-$ECHO '\t\tvar options_TOPSUM = {'
-$ECHO '\t\t\tchartArea: {left: "5%", width: "85%", top: "10%", height: "80%"},'
-$ECHO '\t\t\ttitle: "Top 20 processes by CPU correlation between CPU-seconds(Total), Character-I/O(Total), Memory-Size(Max) for each Command Name",'
-$ECHO '\t\t\thAxis: {title: "CPU seconds in Total"},'
-$ECHO '\t\t\tvAxis: {title: "Character I/O in Total"},'
-$ECHO '\t\t\tsizeAxis: {maxSize: 200},'
-$ECHO '\t\t\tbubble: {textStyle: {fontSize: 15}}'
-$ECHO '\t\t};'
-$ECHO
-$ECHO '\t\tdocument.getElementById("draw_TOPSUM").addEventListener("'click'", function() {'
-$ECHO '\t\tif (chart && chart.clearChart) chart.clearChart();'
-$ECHO 
-$ECHO '\t\tchart = new google.visualization.BubbleChart(document.getElementById("chart_master"));'
-$ECHO '\t\tchart.draw( data_TOPSUM, options_TOPSUM);'
-$ECHO '\t\t});'
-$ECHO
-}
-
-# data is reused and we just add the same chart but unstacked - used in Disk unstacked
-chart_add_unstacked()
-{
-$ECHO '\t\tvar options_'$1'u = {'
-$ECHO '\t\t\tchartArea: {left: "5%", width: "85%", top: "10%", height: "80%"},'
-$ECHO '\t\t\ttitle: "'$2'",'
-$ECHO '\t\t\tfocusTarget: "category",'
-$ECHO '\t\t\thAxis: {'
-$ECHO '\t\t\t\tgridlines: {'
-$ECHO '\t\t\t\t\tcolor: "lightgrey",'
-$ECHO '\t\t\t\t\tcount: 30'
-$ECHO '\t\t\t\t}'
-$ECHO '\t\t\t},'
-$ECHO '\t\t\tvAxis: {'
-$ECHO '\t\t\t\tgridlines: {'
-$ECHO '\t\t\t\t\tcolor: "lightgrey",'
-$ECHO '\t\t\t\t\tcount: 11'
-$ECHO '\t\t\t\t}'
-$ECHO '\t\t\t},'
-$ECHO '\t\t\tisStacked: 0'
-$ECHO '\t\t};'
-$ECHO
-$ECHO '\t\tdocument.getElementById("draw_'$1'u").addEventListener("'click'", function() {'
-$ECHO '\t\tif (chart && chart.clearChart) chart.clearChart();'
-$ECHO 
-$ECHO '\t\tchart = new google.visualization.AreaChart(document.getElementById("chart_master"));'
-$ECHO '\t\tchart.draw( data_'$1', options_'$1'u);'
-$ECHO '\t\t});'
-$ECHO
-}
-
-# Finished the .html heads ections and entire the body then output top nmon file name
-html_mid() 
-{
-$ECHO '\t\t}'
-$ECHO '\t\t</script>'
-$ECHO '\t</head>'
-$ECHO '\t <body bgcolor="#EEEEFF">'
-$ECHO '\tnmon data file: <b>'$ORIGINAL'</b>  '
-
-if (( wantCONFIG ))
-then
-config_button
-fi
-
-if (( hasTOP ))
-then
-chart_button TOPSUM    "Top Summary" black
-chart_button TOPCMD    "Top Commands" black
-fi
-
-if (( hasDISKBUSY1 ))
-then
-chart_button TOPDISK    "Top Disk" black
-fi
-
-$ECHO '\t<br>'
-#$ECHO '\t <hr>'
-}
-
-# Add and the graph drawing buttouns at the top of the page
-chart_button() 
-{
-$ECHO '\t<button id="draw_'$1'" style="color:'$3';"><b>'$2'</b></button>'
-}
-
-
-# This is the placement of where the graphs get placed
-chart_draw()
-{
-$ECHO ""
-$ECHO '\t<div id="chart_master" style="width:100%; height:75%;">'
-$ECHO '\t<h2 style="color:blue">Click on a Graph button above, to display that graph</h2>'
-$ECHO '\t</div>'
-$ECHO ""
-}
-
-# Finish the .html file
-html_end() 
-{
-$ECHO '\t</body>'
-$ECHO '</html>'
-}
-
-# Called to generate the data for the fixed format nmon file lines - just extract the data colum needed and reformat it necessary
-# The parameter is graph name
-chart()
-{
-# note: '\'' is VERY special and means to get around the impossible ' in a awk string
-#	'   = end the awk program string 
-#	\'  = escaped so its just a character
-#	'   = start the awk program string again
-#	strings are concatenated to the end and start disappear but this allows a ' char to be added
-# as strings are concatenated by the shell before awk gets it, this just adds a single quote = '
-
-	chart_start $1
-	case $1 in
-
-	CPUUTIL_ALL) 
-	awk -F , '
-	/^CPUUTIL_ALL,C/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''" $4 "'\'','\''" $5 "'\'','\''" $6 "'\'','\''" $7 "'\'','\''" $8 "'\'','\''" $9 "'\'','\''" $10 "'\'','\''" $11 "'\'','\''" $12 "'\'']" }
-	/^CPUUTIL_ALL,T/ { print ",['\''" $2 "'\''," $3 ","  $4 "," $5 "," $6 "," $7 "," $8 "," $9 "," $10 "," $11 "," $12 "]" }
-	' <$INPUT
-	;;
-
-	PHYSICAL_CPU) 
-	awk -F , '
-	/^LPAR,L/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''" $4 "'\'','\''" $7 "'\'']" }
-	/^LPAR,T/ { print ",['\''" $2 "'\''," $3 ","  $4 "," $7 "]" }
-	' <$INPUT
-	;;
-
-	PHYSICAL_CPU_LINUX) 
-	awk -F , '
-	/^LPAR,S/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''Entitlement'\'','\''VP'\'']" }
-	/^LPAR,T/ { print ",['\''" $2 "'\''," $3 ","  $10 "," $14 "]" }
-	' <$INPUT
-	;;
-
-	GPU_UTIL) 
-	awk -F , '
-	/^GPU_UTIL,N/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''"  $4 "'\'','\''"  $5 "'\'','\''" $6 "'\'']" }
-	/^GPU_UTIL,T/ { print ",['\''" $2 "'\''," $3 "," $4 "," $5 "," $6 "]" }
-	' <$GPUINPUT
-	;;
-
-	GPU_MEM) 
-	awk -F , '
-	/^GPU_MEM,N/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''"  $4 "'\'','\''"  $5 "'\'','\''" $6 "'\'']" }
-	/^GPU_MEM,T/ { print ",['\''" $2 "'\''," $3 "," $4 "," $5 "," $6 "]" }
-	' <$GPUINPUT
-	;;
-
-	GPU_TEMP) 
-	awk -F , '
-	/^GPU_TEMP,N/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''"  $4 "'\'','\''"  $5 "'\'','\''" $6 "'\'']" }
-	/^GPU_TEMP,T/ { print ",['\''" $2 "'\''," $3 "," $4 "," $5 "," $6 "]" }
-	' <$GPUINPUT
-	;;
-
-	GPU_WATTS) 
-	awk -F , '
-	/^GPU_WATTS,N/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''"  $4 "'\'','\''"  $5 "'\'','\''" $6 "'\'']" }
-	/^GPU_WATTS,T/ { print ",['\''" $2 "'\''," $3 "," $4 "," $5 "," $6 "]" }
-	' <$GPUINPUT
-	;;
-
-	GPU_MHZ) 
-	awk -F , '
-	/^GPU_MHZ,N/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''"  $4 "'\'','\''"  $5 "'\'','\''" $6 "'\'']" }
-	/^GPU_MHZ,T/ { print ",['\''" $2 "'\''," $3 "," $4 "," $5 "," $6 "]" }
-	' <$GPUINPUT
-	;;
-
-	CPU_UTIL) 
-if (( hasSTEAL ))
-then
-	awk -F , '
-	/^CPU_ALL,C/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''"  $4 "'\'','\''"  $5 "'\'','\''" $6 "'\'','\''" $7 "'\'']" }
-	/^CPU_ALL,T/ { print ",['\''" $2 "'\''," $3 "," $4 "," $5 "," $6 "," $7 "]" }
-	' <$INPUT
-else
-	awk -F , '
-	/^CPU_ALL,C/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''"  $4 "'\'','\''"  $5 "'\'','\''" $6 "'\'']" }
-	/^CPU_ALL,T/ { print ",['\''" $2 "'\''," $3 "," $4 "," $5 "," $6 "]" }
-	' <$INPUT
-fi
-	;;
-
-	POOLIDLE) 
-	awk -F , '
-	/^LPAR,L/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $6 "'\'','\''" $9 "'\'']" }
-	/^LPAR,T/ { print ",['\''" $2 "'\''," $6 "," $9 "]" }
-	' <$INPUT
-	;;
-
-	POOLIDLE_LINUX) 
-	awk -F , '
-	/^LPAR,S/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''PoolSize'\'','\''PoolFree'\'']" }
-	/^LPAR,T/ { print ",['\''" $2 "'\''," $8 "," $21 "]" }
-	' <$INPUT
-	;;
-
-	CPUMHZ) 
-	awk -F , '
-	/^CPUMHZ,C/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''" $4 "'\'']" }
-	/^CPUMHZ,T/ { print ",['\''" $2 "'\''," $3 "," $4 "]" }
-	' <$INPUT
-	;;
-
-	REALMEM) 
-	awk -F , '
-	/^MEM,M/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $5 "'\'','\''" $7 "'\'']" }
-	/^MEM,T/ { print ",['\''" $2 "'\''," $5 "," $7 "]" }
-	' <$INPUT
-	;;
-
-	MEM_LINUX) 
-	awk -F , '
-	/^MEM,M/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''" $7 "'\'','\''" $12 "'\'','\''" $13 "'\'','\''" $15 "'\'','\''" $17"'\'']" }
-	/^MEM,T/ { print ",['\''" $2 "'\''," $3 "," $7 "," $12 "," $13 "," $15 "," $17 "]" }
-	' <$INPUT
-	;;
-
-	VIRTMEM) 
-	awk -F , '
-	/^MEM,M/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $6 "'\'','\''" $8 "'\'']" }
-	/^MEM,T/ { print ",['\''" $2 "'\''," $6 "," $8 "]" }
-	' <$INPUT
-	;;
-
-	SWAP_LINUX) 
-	awk -F , '
-	/^MEM,M/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $6 "'\'','\''" $10 "'\'']" }
-	/^MEM,T/ { print ",['\''" $2 "'\''," $6 "," $10 "]" }
-	' <$INPUT
-	;;
-
-	FSCACHE) 
-	awk -F , '
-	/^MEMUSE,M/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''"  $4 "'\'','\''"  $5 "'\'']" }
-	/^MEMUSE,T/ { print ",['\''" $2 "'\''," $3 "," $4 "," $5 "]" }
-	' <$INPUT
-	;;
-
-#	MEMNEW) 
-#	awk -F , '
-#	/^MEMNEW,M/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''"  $4 "'\'','\''"  $5 "'\'','\''"  $6"'\'','\''"  $7"'\'','\''"  $8"'\'']" }
-#	/^MEMNEW,T/ { print ",['\''" $2 "'\''," $3 "," $4 "," $5 "," $6 "," $7 "," $8 "]" }
-#	' <$INPUT
-#	;;
-
-	MEMNEW) 
-	awk -F , '
-	/^MEMNEW,M/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''"  $4 "'\'','\''"  $5 "'\'','\''"  $6 "'\'']" }
-	/^MEMNEW,T/ { print ",['\''" $2 "'\''," $3 "," $4 "," $5 "," $6 "]" }
-	' <$INPUT
-	;;
-
-	RUNQ) 
-	awk -F , '
-	/^PROC,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'']" }
-	/^PROC,T/ { print ",['\''" $2 "'\''," $3 "]" }
-	' <$INPUT
-	;;
-
-	RUNQBLOCK) 
-	awk -F , '
-	/^PROC,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''" $4 "'\'']" }
-	/^PROC,T/ { print ",['\''" $2 "'\''," $3 "," $4 "]" }
-	' <$INPUT
-	;;
-
-	PSWITCH) 
-	awk -F , '
-	/^PROC,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $5 "'\'']" }
-	/^PROC,T/ { print ",['\''" $2 "'\''," $5 "]" }
-	' <$INPUT
-	;;
-
-	SYSCALL) 
-	awk -F , '
-	/^PROC,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $6 "'\'','\''" $7 "'\'','\''" $8 "'\'']" }
-	/^PROC,T/ { print ",['\''" $2 "'\''," $6 "," $7 "," $8 "]" }
-	' <$INPUT
-	;;
-
-	READWRITE) 
-	awk -F , '
-	/^PROC,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $7 "'\'','\''" $8 "'\'']" }
-	/^PROC,T/ { print ",['\''" $2 "'\''," $7 "," $8 "]" }
-	' <$INPUT
-	;;
-
-	FORKEXEC) 
-	awk -F , '
-	/^PROC,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $9 "'\'','\''" $10 "'\'']" }
-	/^PROC,T/ { print ",['\''" $2 "'\''," $9 "," $10 "]" }
-	' <$INPUT
-	;;
-
-	FILEIO) 
-	awk -F , '
-	/^FILE,F/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $6 "'\'','\''" $7 "'\'']" }
-	/^FILE,T/ { print ",['\''" $2 "'\''," $6 "," $7 "]" }
-	' <$INPUT
-	;;
-
-	PAGING) 
-	awk -F , '
-	/^PAGE,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $4 "'\'','\''"  $5 "'\'','\''"  $6 "'\'','\''" $7 "'\'']" }
-	/^PAGE,T/ { print ",['\''" $2 "'\''," $4 "," $5 "," $6 "," $7 "]" }
-	' <$INPUT
-	;;
-
-	SWAPIN) 
-	awk -F , '
-	/^PROC,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $4 "'\'']" }
-	/^PROC,T/ { print ",['\''" $2 "'\''," $4 "]" }
-	' <$INPUT
-	;;
-
-	PROCCOUNT) 
-	awk -F , '
-	/^PROCCOUNT,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'']" }
-	/^PROCCOUNT,T/ { print ",['\''" $2 "'\''," $3 "]" }
-	' <$INPUT
-	;;
-
-	MORE1) 
-	awk -F , '
-	/^MORE1,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'']" }
-	/^MORE1,T/ { print ",['\''" $2 "'\''," $3 "]" }
-	' <$INPUT
-	;;
-
-	MORE3) 
-	awk -F , '
-	/^MORE3,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $3 "'\'','\''" $4 "'\'','\''" $5 "'\'']" }
-	/^MORE3,T/ { print ",['\''" $2 "'\''," $3 "'\''," $4 "'\''," $5 "]" }
-	' <$INPUT
-	;;
-
-	IPC) 
-	awk -F , '
-	/^PROC,P/ { print "[{type: '\''datetime'\'', label: '\''Datetime'\'' },'\''" $11 "'\'','\''" $12 "'\'']" }
-	/^PROC,T/ { print ",['\''" $2 "'\''," $11 "," $12 "]" }
-	' <$INPUT
-	;;
-
-	*) $ECHO Oh dear, no code to handle chart name $1
-		;;
-	esac
-
-	chart_end $1 "$2" $3
-}
-
-# nmonchart command help
-hint()
-{
-	$ECHO 
-	$ECHO  Hint: nmonchart nmon_file html_file
-	$ECHO  Version $nmonchart_version
-	$ECHO 
-	$ECHO  Hint: nmonchart nmon_file html_file
-	$ECHO  "\tnmon_file \t1st parameter is the nmon capatured data file like hostname_date_time.nmon"
-	$ECHO  "\thtml_file \t2nd parameter is the output file on your website directory like /webpages/docs/hostname_date_time.html"
-	$ECHO  "\t\t\tNow optional. If not given the output file uses the 1st parameter but changes .nmon to .html"
-	$ECHO  "\t\t\tif the inputfile name does not end with .nmon then the .html is just added"
-	$ECHO  
-	$ECHO  "\tExample: nmonchart mynmonfile.nmon"
-	$ECHO  "\t\t the output file will be mynmonfile.html"
-	$ECHO  "\tExample: nmonchart nmon_file.csv"
-	$ECHO  "\t\t the output file will be nmon_file.csv.html"
-	$ECHO  "\tExample: nmonchart mynmonfile.nmon /webpages/docs/mycharts.html"
-	$ECHO  
-	exit
-}
-
-#------------------------------------ Don't change anything above here
-#------------------------------------ Add new grpahs below
-
-# this function used to redirect output into the .html file
-generate()
-{
-html_start
-
-# Straight forward graphs where we have to pick the data out of the line with fixed numbers of columns
-if (( isAIX ))
-then
- if (( hasLPARstats ))
- then
- chart PHYSICAL_CPU "Physical CPU Use of Shared CPU(s) (Note:if Entitlement=VP then LPAR is capped)" 0
- chart POOLIDLE "Whole machine Shared Physical CPU Pool Use (If all PoolIdle=0 it means perf stats are not switch on at VM level)" 0
- fi
-else   # Linux
- if (( hasLPARstats ))
- then
- chart PHYSICAL_CPU_LINUX "Physical CPU Use of Shared CPU(s) (Note:if Entitlement=VP then LPAR is capped)" 0
- chart POOLIDLE_LINUX "Whole machine Shared Physical CPU Pool Use (If all PoolIdle=0 it means perf stats are not switch on at VM level)" 0
- fi
-fi
-chart CPU_UTIL "CPU Utilisation Percentages" 1
-if (( hasCPUMHZ ))
-then
- chart CPUMHZ "CPU Frequency in MHz" 0
-fi
-if (( isAIX ))
-then
- chart REALMEM "Real Memory - RAM in MB" 0
- chart VIRTMEM "Virtual Memory - Paging Space in MB" 0
- chart FSCACHE "Filesystem Cache Memory Use (numperm) Percentage" 0
- chart MEMNEW "Memory Use System, Process, Cache & Free Memory Percentage" 1
-else 
- if(( hasCPUUTIL_ALL ))
- then
- chart CPUUTIL_ALL "Linux CPU Utilisation FULL details" 1
- fi
- if(( hasGPU ))
- then
- # Work around that some system can have 1 GPU adapter/socket instead of the maximum of two
- # Each adpater/socket has two GPUs so its two or four GPUs
- grep ^GPU $INPUT | sed 's/$/,0,0/'  >$GPUINPUT
- chart GPU_UTIL  "NVidia GPU Utilisation Percent" 0
- chart GPU_MEM   "NVidia Memory Utilisation Percent" 0
- chart GPU_TEMP  "NVidia Temperature C" 0
- chart GPU_WATTS "NVidia Power Draw Watts" 0
- chart GPU_MHZ   "NVidia GPU MHz" 0
- rm $GPUINPUT
- fi
-
- if(( hasMHZ ))
- then
-	chart_start MHZ
-	$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-	grep ^MHZ,C $INPUT | cut -f 3- -d, | sed  -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-	grep ^MHZ,T $INPUT | sed -e "s/MHZ,/#\[\'/" -e  "s/,/\',/"   -e  "s/\$/\\]/" -e  "s/#/,/"
-	chart_end MHZ "CPU MHz (Stacked)" 1
- fi
-
- chart MEM_LINUX "Real Memory - RAM in MB" 0
- chart SWAP_LINUX "Virtual Memory - Paging Space in MB" 0
-fi
-
-if (( isAIX ))
-then
- chart RUNQ    "Run Queue - processes that running or ready to run" 0
-else
- chart RUNQBLOCK    "Run Queue - processes that running or ready to run or Blocked" 0
-fi
-chart PSWITCH "Process Switches per second - between processes" 0
-if (( isAIX ))
-then
- chart SYSCALL "System Calls per second from application to the kernel" 0
- chart READWRITE "Read and Write System Calls per second - for disk & network I/O" 0
-fi
-chart FORKEXEC "Fork() and Exec() System Calls per second - creating processes" 0
-if (( isAIX ))
-then
- chart FILEIO "File I/O through read() & write() System Calls (disk, pipe & network socket) in bytes per second" 0
-fi
-if (( isAIX ))
-then
- chart PAGING "All Paging (pgin & pgout) & Paging from Paging Space only (pgsin & pgsout) per second" 0
- chart SWAPIN    "Process Swap-In per second" 0
-fi
-
-if (( hasPROCCOUNT ))
-then
- chart PROCCOUNT  "Process Count"  0
-fi
-
-if (( hasMORE1 ))
-then
- chart MORE1  "Graph MORE1 Title"  0
-fi
-
-if (( hasMORE3 ))
-then
- chart MORE3  "Graph MORE3 Title"  0
-fi
-
-# CPU_USE
-chart_start CPU_USE
-$ECHO "['CPU','User%','System%']" 
-#$ECHO "['CPU','User%','System%','Max(usr+sys)%']" 
-grep "^CPU[0-9]*,T"  $INPUT | awk -F, '
-{
-	total = $3 + $4;
-	if(total > 0.05) {
-        usr[$1]   += $3;
-        sys[$1]   += $4;
-	}
-	count[$1] += 1;
-        #if(total > max[$1]) { max[$1] = total;}
-}
-END {
-        for (i in usr) {
-                printf "%s %.1f %.1f\n", i, usr[i]/count[i], sys[i]/count[i]
-                #printf "%s %.1f %.1f %.1f \n", i, usr[i]/count[i], sys[i]/count[i], max[i]
-        }
-}' | sed -e 's/CPU//' | sort -n | awk '{
-                printf ",['\''CPU%s'\'',%.1f,%.1f]\n", $1, $2, $3
-                #printf ",['\''CPU%s'\'',%.1f,%.1f]\n", $1, $2, $3, $4
-}'
-chart_end_column CPU_USE "Average Use of Logical CPU Core Threads - POWER=SMT or x86=Hyperthreads (ignoring values below 5%)" 1
-
-if (( hasDISKBUSY1 ))
-then
-# TOPSUM Buble chart of CPU, I/O and RAM use
-chart_start TOPDISK 
-
-export DISKBUSY=/tmp/DISKBUSY.$$
-export DISKTMP1=/tmp/DISKTMP1.$$
-export DISKTMP2=/tmp/DISKTMP2.$$
-export DISKALL=/tmp/DISKALL.$$
-export DISK20=/tmp/DISK20.$$
-
-#extract all disks busy lines
-grep ^DISKBUSY $INPUT >$DISKBUSY
-
-#extract the zeroth disks busy lines as the base data
-grep ^DISKBUSY,  $DISKBUSY >$DISKALL
-
-for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
-do
-    grep ^DISKBUSY$i, $DISKBUSY | cut -d "," -f 3- >$DISKTMP2
-    # if file zero bytes in length = we have processes all the DISKBUSYn lines
-    if [ ! -s $DISKTMP2 ]
-    then
-        break
-    fi
-    cp $DISKALL $DISKTMP1
-    paste -d, $DISKTMP1 $DISKTMP2 >$DISKALL
-done
-
-columns=$(cat $DISKALL | awk -F , '{ for (i=1;i<=NF;i++) sum[i]+=$i;};
-   END { for(i in sum) print sum[i] " " i; }' | sort -nr | head -15 | cut -d " " -f 2 | awk '{ printf ","$1}')
-
-cut -d "," -f 1,2$columns <$DISKALL >$DISK20
-
-
-# Generate the JaveScript data Array
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DISKBUSY,D $DISK20 | cut -f 3- -d, | sed  -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DISKBUSY,T $DISK20 | sed -e "s/DISKBUSY,/#\[\'/" -e  "s/,/\',/"   -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end TOPDISK "Top 15 disks by sum(Busy%)" 0
-fi
-
-if (( hasTOP ))
-then
-# TOPSUM Buble chart of CPU, I/O and RAM use
- chart_start TOPSUM 
-# output the fixed header line
- $ECHO  "['Command', 'CPU seconds', 'CharIO', 'Type', 'Memory KB' ]" 
-
-# extract filter out the rubish out  particularly for command names
- grep "^TOP,[0-9]" $INPUT | sed -e "s/[ =<>\[\]-+\/:]/_/g" >$TOP
-
-cat $TOP | awk -F, '
-{
-	cpu[$14] += $4;
-	io[$14] += $11;
-	size[$14] = (size[$14] < ($9 + $10) ? ($9 + $10) : size[$14] );
-}
-END {
-	for (i in cpu) {
-		printf ",['\''%s'\'',%8.1f,%8d,'\''%s'\'',%8d]\n", i, cpu[i], io[i] / 1024, i, size[i]
-	}
-}' | sort -rn $SORTARG | head -n 20 >>$TOP20
- cat $TOP20
- chart_end_top
-
-# TOP processes over time chart
-
-awk -F\' '
-	BEGIN { 
-		i=1 ; 
-		printf "BEGIN {\n" ;
-	}
-	{ 
-		printf "\tcmd[%d]=\"%s\";\n", i, $2 ;
-		i++  ;
-	}' <$TOP20 >$TOPAWKS
-
-$ECHO '
-}
-{
-        timestamp[$3]=$3;
-        cpu[$3 "_" $14] += $4;
-}
-END {
-        for (j in cmd) {
-                printf ",'\''%s'\''", cmd[j];
-        }
-        printf "]\\n" ;
-        for (i in timestamp) {
-                printf ",['\''%s'\''", i;
-                for (j in cmd) {
-                        printf ", %.1f", cpu[i "_" cmd[j]] ;
-                }
-                printf "]\\n" ;
-        }
-}' >>$TOPAWKS 
-
-# Now execute the dynamically created awk script
-chart_start TOPCMD
-$PRINTN "[{type: 'datetime', label: 'Datetime' }"
-cat $TOP | awk -F, -f $TOPAWKS  | sort -n
-chart_end TOPCMD "Top Process Commands by CPU (Percentage of a CPU core)" 0
-fi
-
-# These lines have different numbers of stats depending on the machine config
-# Assuming we want all the stats on a line then fortunately we just need to format them
-chart_start NET 
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^NET,N $INPUT | cut -f 3- -d, | sed -e "s/,$//"  -e "s/,/\',\'/g" -e "s/\$/\\']/" 
-grep ^NET,T $INPUT | \
-awk -F"," '{printf(",['\''%s'\''",$2); for(i=3; i<=NF; i++){if( i<=(((NF-2)/2+2)) ){printf(",%.1f",$i)} else {printf(",-%.1f",$i)}};printf("]\n")}'
-chart_end NET "Network Receive(read) & Send(write shown negatively) in KB per second" 0
-
-chart_start NETPACKET 
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^NETPACKET,N $INPUT | cut -f 3- -d, | sed -e "s/,$//"  -e "s/,/\',\'/g" -e "s/\$/\\']/" 
-grep ^NETPACKET,T $INPUT | sed -e "s/NETPACKET,/#\[\'/" -e "s/,/\',/"    -e "s/\$/\\]/" -e  "s/#/,/"
-chart_end NETPACKET "Network packet count per second" 0
-
-if (( isAIX ))
-then
- if (( hasNETSIZE ))
- then
-  chart_start NETSIZE 
-  $PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-  grep ^NETSIZE,N $INPUT | cut -f 3- -d, | sed -e "s/,$//"  -e "s/,/\',\'/g" -e "s/\$/\\']/" 
-  grep ^NETSIZE,T $INPUT | sed -e "s/NETSIZE,/#\[\'/" -e "s/,/\',/"    -e "s/\$/\\]/" -e  "s/#/,/"
-  chart_end NETSIZE "Network packet size (bytes)" 0
- fi
-fi
-
-if (( isAIX ))
-then
- if (( hasSEA ))
- then
-chart_start SEA 
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^SEA,S $INPUT | cut -f 3- -d, | sed -e "s/,$//"  -e "s/,/\',\'/g" -e "s/\$/\\']/" 
-grep ^SEA,T $INPUT | \
-awk -F"," '{printf(",['\''%s'\''",$2); for(i=3; i<=NF; i++){if( i<=(((NF-2)/2+2)) ){printf(",%.1f",$i)} else {printf(",-%.1f",$i)}};printf("]\n")}'
-chart_end SEA "Shared Ethernet Adapter Receive(read) & Send(write shown negatively) in KB per second" 0
- fi
-fi
-
-if (( isAIX ))
-then
- if (( hasSEAPACKET ))
- then
-  chart_start SEAPACKET
-  $PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-  grep ^SEAPACKET,S $INPUT | cut -f 3- -d, | sed -e "s/,$//"  -e "s/,/\',\'/g" -e "s/\$/\\']/" 
-  grep ^SEAPACKET,T $INPUT | \
-  awk -F"," '{printf(",['\''%s'\''",$2); for(i=3; i<=NF; i++){if( i<=(((NF-2)/2+2)) ){printf(",%.1f",$i)} else {printf(",-%.1f",$i)}};printf("]\n")}'
-  chart_end SEAPACKET "Shared Ethernet Adapter Receive(read) & Send(write shown negatively) in Packets per second" 0
- fi
-fi
-
-if (( isAIX ))
-then
- if (( hasSEACHPHY ))
- then
-  chart_start SEACHPHY
-  $PRINTN "[{type: 'datetime', label: 'Datetime' }"
-  grep ^SEACHPHY,P $INPUT | cut -f 3- -d, | awk -F"," '{for(i=3; i<=NF; i=i+9){j=i+1; printf(",'"'"%s"'"','"'"%s"'"'",$i,$j) };printf("]\n")}'
-  grep ^SEACHPHY,T $INPUT | awk -F"," '{printf(",['\''%s'\''",$2); for(i=3; i<=NF; i=i+9){j=i+1; printf(",%.1f,-%.1f",$i,$j) };printf("]\n")}'
-  chart_end SEACHPHY "SEA Phyical Receive(read) & Send(write shown negatively) in KB per second" 0
- fi
-fi
-#--
-if (( isAIX ))
-then
-chart_start ADAPT_KBS
-grep ^IOADAPT,D $INPUT | \
-awk -F"," '{printf("[{type: '\''datetime'\'', label: '\''Datetime'\'' }"); for(i=3; i<=NF; i++){if( ((i+1)%3) || i<1 ){printf(",'\''%s'\''",$i)}}; printf("]\n")}'
-grep ^IOADAPT,T $INPUT | \
-awk -F"," 'NR!=1{printf(",['\''%s'\''",$2); for(i=3; i<=NF; i++){if( ((i+1)%3) || i<1 ){printf(",%.1f",$i)}};printf("]\n")}'
-chart_end ADAPT_KBS "I/O Adapter stats in KB per second (Stacked)" 1
-fi
-
-#---
-if (( isAIX ))
-then
-chart_start ADAPT_TPS
-grep ^IOADAPT,D $INPUT | \
-awk -F"," '{printf("[{type: '\''datetime'\'', label: '\''Datetime'\'' }"); for(i=5; i<=NF; i++){if( !((i+1)%3) || i<1 ){printf(",'\''%s'\''",$i)}}; printf("]\n")}'
-grep ^IOADAPT,T $INPUT | \
-awk -F"," 'NR!=1{printf(",['\''%s'\''",$2); for(i=5; i<=NF; i++){if( !((i+1)%3) || i<1  ){printf(",%.1f",$i)}};printf("]\n")}'
-chart_end ADAPT_TPS "I/O Adapter stats in Transfers per second (Stacked)" 1
-fi
-
-if (( isAIX ))
-then
- if (( hasFC ))
- then
-  chart_start FCREAD 
-  $PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-  grep ^FCREAD,F $INPUT | cut -f 3- -d, | sed -e "s/,$//"  -e "s/,/\',\'/g" -e "s/\$/\\']/" 
-  grep ^FCREAD,T $INPUT | sed -e "s/FCREAD,/#\[\'/" -e "s/,/\',/"    -e "s/\$/\\]/" -e  "s/#/,/"
-  chart_end FCREAD "Fibre Channel Read KB/s" 0
-
-  chart_start FCWRITE 
-  $PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-  grep ^FCWRITE,F $INPUT | cut -f 3- -d, | sed -e "s/,$//"  -e "s/,/\',\'/g" -e "s/\$/\\']/" 
-  grep ^FCWRITE,T $INPUT | sed -e "s/FCWRITE,/#\[\'/" -e "s/,/\',/"    -e "s/\$/\\]/" -e  "s/#/,/"
-  chart_end FCWRITE "Fibre Channel Write KB/s" 0
-
-  chart_start FCXFERIN 
-  $PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-  grep ^FCXFERIN,F $INPUT | cut -f 3- -d, | sed -e "s/,$//"  -e "s/,/\',\'/g" -e "s/\$/\\']/" 
-  grep ^FCXFERIN,T $INPUT | sed -e "s/FCXFERIN,/#\[\'/" -e "s/,/\',/"    -e "s/\$/\\]/" -e  "s/#/,/"
-  chart_end FCXFERIN "Fibre Channel transfers In/s" 0
-
-  chart_start FCXFEROUT 
-  $PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-  grep ^FCXFEROUT,F $INPUT | cut -f 3- -d, | sed -e "s/,$//"  -e "s/,/\',\'/g" -e "s/\$/\\']/" 
-  grep ^FCXFEROUT,T $INPUT | sed -e "s/FCXFEROUT,/#\[\'/" -e "s/,/\',/"    -e "s/\$/\\]/" -e  "s/#/,/"
-  chart_end FCXFEROUT "Fibre Channel transfers Out/s" 0
- fi
-fi
-
-#--- next the disks graphs that have Stacked and Unstacked versions
-# handles diskless servers
-if (( hasDISKs ))
-then
-
-chart_start DISKBUSY
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DISKBUSY,D $INPUT | cut -f 3- -d, | sed  -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DISKBUSY,T $INPUT | sed -e "s/DISKBUSY,/#\[\'/" -e  "s/,/\',/"   -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DISKBUSY "Disk Busy Percentage of the time (Stacked)" 1
-chart_add_unstacked DISKBUSY "Disk Busy Percentage of the time (UnStacked)"
-
-chart_start DISKREAD
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DISKREAD,D $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DISKREAD,T $INPUT | sed -e "s/DISKREAD,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DISKREAD "Disk Read KB per second (Stacked)" 1
-chart_add_unstacked DISKREAD "Disk Read KB per second (UnStacked)" 
-
-chart_start DISKWRITE
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DISKWRITE,D $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DISKWRITE,T $INPUT | sed -e "s/DISKWRITE,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DISKWRITE "Disk Write KB per second (Stacked)" 1
-chart_add_unstacked DISKWRITE "Disk Write KB per second (UnStacked)" 
-
-chart_start DISKBSIZE
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DISKBSIZE,D $INPUT | cut -f 3- -d, | sed  -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DISKBSIZE,T $INPUT | sed -e "s/DISKBSIZE,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DISKBSIZE "Disk Block Size KB" 0
-
-chart_start DISKXFER
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DISKXFER,D $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DISKXFER,T $INPUT | sed -e "s/DISKXFER,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DISKXFER "Disk Transfers per second (Stacked)" 1
-chart_add_unstacked DISKXFER "Disk Transfers per second (UnStacked)"
-
-fi 
-# end of hasDISKs
-
-
-if(( hasSERVICETIME ))
-then
-chart_start DISKSERV
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DISKSERV,D $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DISKSERV,T $INPUT | sed -e "s/DISKSERV,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DISKSERV "Disk Service Time in milli-seconds" 0
-
-chart_start DISKREADSERV
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DISKREADSERV,D $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DISKREADSERV,T $INPUT | sed -e "s/DISKREADSERV,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DISKREADSERV "Disk Read Service Time in milli-seconds" 0
-
-chart_start DISKWRITESERV
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DISKWRITESERV,D $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DISKWRITESERV,T $INPUT | sed -e "s/DISKWRITESERV,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DISKWRITESERV "Disk Write Service Time in milli-seconds" 0
-
-chart_start DISKWAIT
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DISKWAIT,D $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DISKWAIT,T $INPUT | sed -e "s/DISKWAIT,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DISKWAIT "Disk Wait Time in milli-seconds" 0
-fi
-
-if (( hasDG ))
-then
-chart_start DGBUSY
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DGBUSY,D $INPUT | cut -f 3- -d, | sed  -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DGBUSY,T $INPUT | sed -e "s/DGBUSY,/#\[\'/" -e  "s/,/\',/"   -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DGBUSY "Disk Group Busy Percentage of the time (Stacked)" 1
-chart_add_unstacked DGBUSY "Disk Group Busy Percentage of the time (UnStacked)"
-
-chart_start DGREAD
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DGREAD,D $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DGREAD,T $INPUT | sed -e "s/DGREAD,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DGREAD "Disk Group Read KB per second (Stacked)" 1
-chart_add_unstacked DGREAD "Disk Group Read KB per second (UnStacked)"
-
-chart_start DGWRITE
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DGWRITE,D $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DGWRITE,T $INPUT | sed -e "s/DGWRITE,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DGWRITE "Disk Group Write KB per second (Stacked)" 1
-chart_add_unstacked DGWRITE "Disk Group Write KB per second (UnStacked)"
-
-chart_start DGSIZE
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DGSIZE,D $INPUT | cut -f 3- -d, | sed  -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DGSIZE,T $INPUT | sed -e "s/DGSIZE,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DGSIZE "Disk Group Block Size KB" 0
-
-chart_start DGXFER
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^DGXFER,D $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^DGXFER,T $INPUT | sed -e "s/DGXFER,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/"
-chart_end DGXFER "Disk Group Transfers per second" 1
-chart_add_unstacked DGXFER "Disk Group Transfers per second (UnStacked)"
-fi
-
-if (( hasJFS ))
-then
-chart_start JFS
-$PRINTN "[{type: 'datetime', label: 'Datetime' },'"
-grep ^JFSFILE,J $INPUT | cut -f 3- -d, | sed -e  "s/,/\',\'/g" -e  "s/\$/\\']/"
-grep ^JFSFILE,T $INPUT | sed -e "s/JFSFILE,/#\[\'/" -e  "s/,/\',/"    -e  "s/\$/\\]/" -e  "s/#/,/" -e "s/-nan/-1.234/g"
-chart_end JFS "Journal File System Percent Full (Note: -1.234 = stats not avaialble)" 0
-fi
-
-if (( isAIX ))
-then
-chart IPC    "InterProcess Comms - Semaphores per second & Message Queues send per seconds" 0
-fi
-
-
-
-html_mid
-
-if (( isAIX ))
-then
- if (( hasLPARstats ))
- then
-chart_button PHYSICAL_CPU "Physical CPU" black
-chart_button POOLIDLE  "Pool Idle" black
- fi
-chart_button CPU_UTIL  "CPU Util." red
-chart_button CPU_USE   "CPU Use"   red
-if (( hasCPUMHZ ))
-then
-chart_button CPUMHZ      "CPU MHz"      red
-fi
-chart_button RUNQ      "RunQ"      red
-if (( hasPROCCOUNT ))
-then
-chart_button PROCCOUNT      "Procs"      red
-fi
-chart_button PSWITCH   "pSwitch"   red
-chart_button SYSCALL   "SysCall"   red
-chart_button READWRITE "ReadWrite" red
-chart_button FORKEXEC  "ForkExec"  red
-chart_button FILEIO    "File I/O"  red
-chart_button REALMEM   "Real Mem"  blue
-chart_button VIRTMEM   "Virt Mem"  blue
-chart_button FSCACHE   "FS Cache"  blue
-chart_button MEMNEW    "MemUse"  blue
-chart_button PAGING    "Paging"    blue
-chart_button SWAPIN    "Swaping "  blue
-$ECHO '<br>' 
-# This is in the AIX section
-if (( hasMORE1 ))
-then
-chart_button MORE1      "MORE1"      red
-fi
-if (( hasMORE3 ))
-then
-chart_button MORE3      "MORE3"      red
-fi
-chart_button NET       "Network"     purple
-chart_button NETPACKET "Net Packets" purple
-if (( hasNETSIZE ))
-then
-chart_button NETSIZE   "Net Size" purple
-fi
-
-chart_button ADAPT_KBS  "Adapter KBs" green
-chart_button ADAPT_TPS  "Adapter Tps" green
-
-
-# ----- handle diskless servers
-if (( hasDISKs ))
-then
-chart_button DISKBUSY   "Disk Busy"  brown
-chart_button DISKBUSYu  "Unstacked"  brown
-chart_button DISKREAD   "Disk Read"  brown
-chart_button DISKREADu  "Unstacked"  brown
-chart_button DISKWRITE  "Disk Write" brown
-chart_button DISKWRITEu "Unstacked"  brown
-chart_button DISKBSIZE  "Disk BSize" brown
-chart_button DISKXFER   "DiskXfer"   brown
-chart_button DISKXFERu  "Unstacked"  brown
-fi
-
-if (( hasSERVICETIME ))
-then
-chart_button DISKSERV      "Disk Service"       brown
-chart_button DISKREADSERV  "Disk Read Service " brown
-chart_button DISKWRITESERV "Disk Write Service" brown
-chart_button DISKWAIT      "Disk Wait"          brown
-fi
-
-if (( hasDG ))
-then
-$ECHO '<br>' 
-chart_button DGBUSY   "Disk Grp Busy"  brown
-chart_button DGBUSYu  "Unstacked"      brown
-chart_button DGREAD   "Disk Grp Read"  brown
-chart_button DGREADu  "Unstacked"      brown
-chart_button DGWRITE  "Disk Grp Write" brown
-chart_button DGWRITEu "Unstacked"      brown
-chart_button DGSIZE   "Disk Grp BSize" brown
-chart_button DGXFER   "Disk Grp Xfer"  brown
-chart_button DGXFERu  "Unstacked"      brown
-fi
-
-if (( hasJFS ))
-then
-chart_button JFS "JFS" brown
-fi
-
-chart_button IPC "IPC" black
-
-if (( hasSEA || hasFC ))
-then
-$ECHO '<br>' 
-fi
-
-if (( hasSEA ))
-then
-chart_button SEA   "SEA" purple
-fi
-if (( hasSEAPACKET ))
-then
-chart_button SEAPACKET   "SEA Packets" purple
-fi
-if (( hasSEACHPHY ))
-then
-chart_button SEACHPHY   "SEA Physical" purple
-fi
-
-if (( hasFC ))
-then
-chart_button FCREAD      "FCread KBs"  green
-chart_button FCWRITE     "FCwrite KBs" green
-chart_button FCXFERIN    "FCxferIn"    green
-chart_button FCXFEROUT   "FCxferOut"   green
-fi
-
-else 
-# ------ Linux
- if (( hasLPARstats ))
- then
-chart_button PHYSICAL_CPU_LINUX "Physical CPU" black
-chart_button POOLIDLE_LINUX  "Pool Idle" black
- fi
-chart_button CPU_UTIL "CPU Util." red
-chart_button CPU_USE  "CPU Use"   red
-if ((hasCPUUTIL_ALL ))
- then
-	chart_button CPUUTIL_ALL "CPU All Util."     red
- fi
-if ((hasMHZ ))
- then
-	chart_button MHZ "MHz"      red
- fi
-if ((hasGPU ))
- then
-	# Work around that some system can have 1 GPU adapter/socket instead of the maximum of two
-	# Each adpater/socket has two GPUs so its two or four GPUs
-	grep ^GPU $INPUT | sed 's/$/,0,0/'  >$GPUINPUT
-	chart_button GPU_UTIL  "GPU Util." green
-	chart_button GPU_MEM   "GPU Mem."  green
-	chart_button GPU_TEMP  "GPU Temp." green
-	chart_button GPU_WATTS "GPU Watts" green
-	chart_button GPU_MHZ   "GPU MHz"   green
-	rm $GPUINPUT
- fi
-chart_button RUNQBLOCK       "RunQ Blocked"     red
-if (( hasPROCCOUNT ))
-then
-chart_button PROCCOUNT      "Procs"      red
-fi
-chart_button PSWITCH    "pSwitch"  red
-chart_button FORKEXEC   "ForkExec" red
-chart_button MEM_LINUX  "Memory"   blue
-chart_button SWAP_LINUX "Swap"     blue
-$ECHO '<br>' 
-# This is in the Linux section
-if (( hasMORE1 ))
-then
-chart_button MORE1      "MORE1"      red
-fi
-if (( hasMORE3 ))
-then
-chart_button MORE3      "MORE3"      red
-fi
-chart_button NET        "Network"    purple
-chart_button NETPACKET  "Net Packet" purple
-if (( hasDISKs ))
-then
-chart_button DISKBUSY   "Disk Busy"  brown
-chart_button DISKBUSYu  "Unstacked"  brown
-chart_button DISKREAD   "Disk Read"  brown
-chart_button DISKREADu  "Unstacked"  brown
-chart_button DISKWRITE  "Disk Write" brown
-chart_button DISKWRITEu "Unstacked"  brown
-chart_button DISKBSIZE  "Disk BSize" brown
-chart_button DISKXFER   "Disk Xfers" brown
-chart_button DISKXFERu  "Unstacked"  brown
-fi
-if (( hasDG ))
-then
-$ECHO '<br>' 
-chart_button DGBUSY   "Disk Grp Busy"  brown
-chart_button DGBUSYu  "Unstacked"      brown
-chart_button DGREAD   "Disk Grp Read"  brown
-chart_button DGREADu  "Unstacked"      brown
-chart_button DGWRITE  "Disk Grp write" brown
-chart_button DGWRITEu "Unstacked"      brown
-chart_button DGSIZE   "Disk Grp BSize" brown
-chart_button DGXFER   "Disk Grp Xfers" brown
-chart_button DGXFERu  "Unstacked"      brown
-fi
-if (( hasJFS ))
-then
-chart_button JFS      "JFS"            brown
-fi # JFS
-fi # Linux 
-
-chart_draw
-
-# Display the bottom text table with basic onfiguration data
-COUNT=`grep ZZZZ $INPUT | wc -l`
-grep "^AAA," $INPUT >$INFO
-$ECHO "<table><tr><td>"
-grep AAA,host, $INFO | sed 's/AAA,host,/<li>Host:/'
-grep AAA,date, $INFO | sed 's/AAA,date,/<li>Date:/'
-grep AAA,time, $INFO | sed 's/AAA,time,/<li>Time:/'
-grep AAA,interval, $INFO | sed 's/AAA,interval,/<li>Interval:/'
-$ECHO "<li>Snapshots:" $COUNT
-
-if (( isAIX))
-then
-$ECHO "<td>"
-grep ",lparstat -i," $INPUT >$INFO
-grep "Online Virtual CPU" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "Entitled Capacity" $INFO | grep -v "Pool" | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "Mode" $INFO | grep -v "Memory" | grep -v "Sub Proc" | grep -v Saving | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "Online Memory" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "Type" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-
-grep ",lsconf," $INPUT >$INFO
-$ECHO "<td>"
-grep "System Model" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "Serial Number" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "Processor Type" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "Implementation Mode" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "Processor Version" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-
-$ECHO "<td>"
-grep "Clock Speed" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "CPU Type" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "Kernel Type" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "LPAR Info" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-grep "Platform Firmware level" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-else
-$ECHO "<td>"
-grep AAA,cpus,  $INFO | sed 's/AAA,cpus,/<li>Number of CPUs:/'
-grep AAA,disks, $INFO | sed 's/AAA,disks,/<li>Number of Disks:/'
-grep "AAA,OS,Linux," $INFO | awk -F, '{ printf "<li>Linux Kernel %s\n", $4 }'
-grep "^BBBP," $INPUT >$INFO
-grep "/etc/release," $INFO | sed 's/PRETTY_NAME=//' | grep -v = | grep -v LSB_VERSION | awk -F, 'NR == 1{ printf "<li>Linux Version %s\n", $4 }'
-
-fi
-grep ",oslevel," $INPUT | tail -1 >$INFO
-$ECHO "<td valign=top>"
-cat $INFO | awk -F \" '{ printf "<li>AIX Level %s\n", $2 }'
-grep ",lparstat -i," $INPUT >$INFO
-grep "Power Saving Mode" $INFO | awk -F \" '{ printf "<li>%s\n", $2 }'
-$ECHO "<hr>"
-$ECHO "<li><a href=http://nmon.sourceforge.net/pmwiki.php?n=Site.Nmonchart>nmonchart</a> v" $nmonchart_version " by Nigel Griffiths @mr_nmon"
-if (( wantZOOM ))
-then
-$ECHO "<li>Zoom-In: Left-click-on-Graph, Drag-Right & Let-Go"
-$ECHO "<li>Zoom-Out: Right-click-on-Graph"
-fi
-$ECHO "</table>"
-
-html_end
-}
-
-################################### Main code starts here
-# -- Check the parameters
-if [[ "$1" == "" ]]
-then
-	$ECHO  $0 Error: No first parameter = nmon data file = filename.nmon
-	hint
-fi
-
-if [[ ! -r "$1" ]]
-then
-	$ECHO  $0 Error: Input file $1 is not readable
-	hint
-fi
-
-# --- save the parameters to avoid confusion
-ORIGINAL=$1
-
-if [[ "$2" == "" ]]
-then
-	# output file name missing so use the inputfilename but replace .nmon with .html
-	# if not ending with .nmon (really dumb idea) the .html is added anyway.
-	OUTPUT=${ORIGINAL%.nmon}.html
-else
-	OUTPUT=$2
-fi
-
-grep "^AAA,OS,Linux" $1>/dev/null
-if [[ $? == 0 ]]
-then
-        isLINUX=1
-        isAIX=0
-	grep "^LPAR,Shared" $1>/dev/null
-	if [[ $? == 0 ]]
-	then
-	    hasLPARstats=1
-	else
-	    hasLPARstats=0
-	fi
-else
-        isLINUX=0
-        isAIX=1
-	grep "^LPAR,Logical" $1>/dev/null
-	if [[ $? == 0 ]]
-	then
-	    hasLPARstats=1
-	else
-	    hasLPARstats=0
-	fi
-fi
-
-grep "^CPUMHZ," $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasCPUMHZ=1
-else
-        hasCPUMHZ=0
-fi
-
-grep "^PROCCOUNT," $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasPROCCOUNT=1
-else
-        hasPROCCOUNT=0
-fi
-
-grep "^MORE1," $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasMORE1=1
-else
-        hasMORE1=0
-fi
-
-grep "^MORE3," $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasMORE3=1
-else
-        hasMORE3=0
-fi
-
-grep "^AAA,steal,1" $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasSTEAL=1
-else
-        hasSTEAL=0
-fi
-
-grep "^NETSIZE,Network" $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasNETSIZE=1
-else
-        hasNETSIZE=0
-fi
-
-grep "^SEA,Shared" $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasSEA=1
-else
-        hasSEA=0
-fi
-
-grep "^SEACHPHY,Physical" $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasSEACHPHY=1
-else
-        hasSEACHPHY=0
-fi
-
-grep "^SEAPACKET,Shared" $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasSEAPACKET=1
-else
-        hasSEAPACKET=0
-fi
-
-grep "^JFSFILE" $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasJFS=1
-else
-        hasJFS=0
-fi
-
-grep "^DISKBUSY1," $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasDISKBUSY1=1
-else
-        hasDISKBUSY1=0
-        # Remove the below line, if you want only the Top Disk graph for
-	# data collections more than 150 disks i.e. DISKBUSY1+ lines.
-        hasDISKBUSY1=1
-fi
-
-grep "^FCREAD," $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasFC=1
-else
-        hasFC=0
-fi
-
-
-grep "^TOP,+PID" $1>/dev/null
-if [[ $? == 0 ]]
-then
-        hasTOP=1
-else
-        hasTOP=0
-fi
-
-hasCPUUTIL_ALL=0
-hasMHZ=0
-hasGPU=0
-hasDISKs=1
-
-if ((isLINUX ))
-then
-	grep "^CPUUTIL_ALL" $1>/dev/null
-	if [[ $? == 0 ]]
-	then
-		hasCPUUTIL_ALL=1
-	fi
-
-	grep "^MHZ" $1>/dev/null
-	if [[ $? == 0 ]]
-	then
-		hasMHZ=1
-	fi
-
-	grep "^GPU" $1>/dev/null
-	if [[ $? == 0 ]]
-	then
-		hasGPU=1
-		GPUINPUT=/tmp/nmonchart.gpu.$$
-	fi
-
-	# Diskless machines only found in the Linux environment
-	if [[ $(grep AAA,disks, $1 | awk -F "," '{ print $3 }') == 0 ]]
-	then
-		hasDISKs=0
-	fi
-fi
-
-# Disk Groups switched on
-hasDG=0
-grep "^DGBUSY" $1>/dev/null
-if [[ $? == 0 ]]
-then
-	hasDG=1
-fi
-
-# Disk Service Time and Wait Time
-hasSERVICETIME=0
-grep "^DISKSERV" $1>/dev/null
-if [[ $? == 0 ]]
-then
-	hasSERVICETIME=1
-fi
-
-# --- make temporary file names
-TMP=/tmp/nmonchart.tmp.$$
-SED=/tmp/nmonchart.sed.$$
-INFO=/tmp/nmonchart.info.$$
-TOP=/tmp/nmonchart.top.$$
-TOP20=/tmp/nmonchart.top20.$$
-TOPAWKS=/tmp/nmonchart.topawks.$$
-###$ECHO  tmp=$TMP sed=$SED
-
-# Strip Control-M from input file as some people don't know how to use FTP!!!
-# Strip out end-of-line commas and add zeros for missing data from topas output!!!
-# Strip out "-0.0" and "-1.0" from Linux files - normally used as warning of errors or missing data
-INPUT=/tmp/nmonchart.input.$$
-sed -e 's/
//' -e 's/,,$//' -e 's/,$//' -e 's/,,/,0,/g' -e 's/-0.0/0.0/g' -e 's/-1\.0/0.0/g' <$ORIGINAL >$INPUT
-
-# --- do the bulk of the work and save in tmp file
-generate >$TMP
-
-# --- Finally fix the time stamps 
-# Original Time and Date  in text format was too slow in graphing
-# grep ZZZZ $INPUT| sed -e 's?ZZZZ,?s/?' -e 's?,?/?' -e 's?,? on ?' -e 's?$?/?' >$SED
-
-# Changed to using the numeric Date() function 
-# Extract ZZZZ lines, transform with sed commands but split them out in to files of less that 1000 lines otherwise sed explodes
-# Change mnth names to numbers starting with zero: JAN to 0, FEB to 1 up to DEC to 11
-# Example ZZZZ,T0099,22:04:11,30-NOV-2015 to
-#         Date('2015', '10', '30', '22', '04', '11')
-if (( fastmode ))
-then
-awk -F, -v input="$INPUT" '
-BEGIN{OFS=","
-mno["JAN"]=0;mno["FEB"]=1;mno["MAR"]=2;mno["APR"]=3
-mno["MAY"]=4;mno["JUN"]=5;mno["JUL"]=6;mno["AUG"]=7
-mno["SEP"]=8;mno["OCT"]=9;mno["NOV"]=10;mno["DEC"]=11
-}
-/^ZZZZ,T/{
-  if(FILENAME==input){
-    h=substr($3,1,2);m=substr($3,4,2);s=substr($3,7,2)
-    d=substr($4,1,2);mm=mno[substr($4,4,3)];y=substr($4,8,4)
-    gtime[$2]=sprintf("Date(%4.4d, %2d, %2.2d, %2.2d, %2.2d, %2.2d)",y,mm,d,h,m,s)
-  }
-  next
-}
-{if(FILENAME==input){next}}
-/^,\[.*T[0-9][0-9][0-9][0-9]/{
-  tstamp=substr($0,4,5)
-  sub(tstamp,gtime[tstamp],$0)
-  print;next
-}
-{print}
-' $INPUT $TMP > $OUTPUT
-else
-grep ^ZZZZ,T $INPUT | sed -e 's/^M//' -e 's/-JAN-/- 0-/' -e 's/-FEB-/- 1-/' -e 's/-MAR-/- 2-/' -e 's/-APR-/- 3-/' -e 's/-MAY-/- 4-/' -e 's/-JUN-/- 5-/' -e 's/-JUL-/- 6-/' -e 's/-AUG-/- 7-/' -e 's/-SEP-/- 8-/' -e 's/-OCT-/- 9-/' -e 's/-NOV-/-10-/' -e 's/-DEC-/-11-/' | sed -e 's/^\(....\),\(T[0-9]*\),\(..\):\(..\):\(..\),\(..\)-\(..\)-\(....\)/s\/\2\/Date(\8, \7, \6, \3, \4, \5)\//' | split -l 999 - $SED 
-
-for i in $SED*
-do
-	sed -f $i $TMP >$OUTPUT
-	cp $OUTPUT $TMP  # ready for next loop
-done
-rm $SED*
-fi
-# --- make the output file user, group and other readable
-chmod ugo+r $OUTPUT
-
-# --- Cleanup
-rm $TMP $INFO $INPUT 
-
-if (( hasTOP ))
-then
-	rm $TOP $TOP20 $TOPAWKS 
-fi
-if (( hasDISKBUSY1 ))
-then
-	rm  -f $DISKBUSY $DISKTMP1 $DISKTMP2 $DISKALL $DISK20
-fi
diff --git a/tools/benchmark/live/lib/nmonmerge2 b/tools/benchmark/live/lib/nmonmerge2
deleted file mode 100755
index 559b1e286456d3f8fc920861fe59c8d928c4d9c6..0000000000000000000000000000000000000000
Binary files a/tools/benchmark/live/lib/nmonmerge2 and /dev/null differ
diff --git a/tools/benchmark/live/resources/get-stream-infos.sh b/tools/benchmark/live/resources/get-stream-infos.sh
deleted file mode 100755
index 5367924d48bb25a62c39a879dec71511be1cb1ed..0000000000000000000000000000000000000000
--- a/tools/benchmark/live/resources/get-stream-infos.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env bash
-
-USER="${1}"
-LIVES_USER="/home/${USER}/msinstance/conf/lives.json"
-LIVES_DEFAULT="/usr/lib/python3/dist-packages/mediaserver/services/configuration/lives.json"
-
-for live in ${LIVES_USER} ${LIVES_DEFAULT}; do
-    if grep -q '"STREAMS"' ${live} 2>/dev/null; then
-        sed -nE '/"STREAMS"/,/],/p' ${live}
-        break
-    fi
-done
-
-exit 0
diff --git a/tools/benchmark/live/resources/sysinfos.sh b/tools/benchmark/live/resources/sysinfos.sh
deleted file mode 100755
index e2faccf89af58e440ac56892aa9c2ff2fa47810c..0000000000000000000000000000000000000000
--- a/tools/benchmark/live/resources/sysinfos.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-
-DIR="$(dirname "$(readlink -e "${0}")")"
-CONFIG_FILE="${DIR}/../config"
-SYSINFO_FILE="${1}"
-LOG_FILE="${2}"
-
-cleanup() {
-    # Rectifications in case the SIGINT happens during the network bench
-    ssh -l root ${MS_IP}   -- "pkill -f dd"
-    ssh -l root ${LIVE_IP} -- "pkill -f nc"
-    ssh -l root ${LIVE_IP} -- "systemctl start nginx"
-
-    exit 1
-}
-
-trap cleanup SIGINT
-
-# Loading global functions
-source "${DIR}/../lib/functions.sh"
-
-# Configuration loading
-if [[ -e "${CONFIG_FILE}" ]]; then
-    source "${CONFIG_FILE}"
-else
-    echo "The configuration file \"${CONFIG_FILE}\" is missing"
-    exit 1
-fi
-
-get_stream_infos "${MS_IP}" "${MS_INSTANCE}"
-get_system_config "${MS_IP}" "Mediaserver"
-get_system_config "${LIVE_IP}" "Live server"
-get_net_bandwidth "${MS_IP}" "${LIVE_IP}"
-
-exit 0
diff --git a/tools/benchmark/live/resources/time_diff_test.sh b/tools/benchmark/live/resources/time_diff_test.sh
deleted file mode 100755
index 66e7a7c17146ec8d2edf239bab3eed423398133d..0000000000000000000000000000000000000000
--- a/tools/benchmark/live/resources/time_diff_test.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env bash
-
-# Global variables
-HLS_DIR="${1}"
-DURATION="${2}"              # Duration of the checks
-EXPECTED_PLAYLISTS_NB="${3}" # Number of playlists
-
-current_iteration=1
-failure=0
-
-# While we did not reach the targeted duration
-while [[ ${current_iteration} -le ${DURATION} ]]; do
-    playlists_ls=$(ls -1 "${HLS_DIR}/"*.m3u8)
-    playlists_nb=$(echo "${playlists_ls}" | wc -l)
-    # For each m3u8 live playlist
-    for playlist in $(ls "${HLS_DIR}/"*.m3u8); do
-        playlist_mtime=$(stat -c %Y ${playlist})
-        current_time=$(date +%s)
-        # If there are more than 4s of difference the check as a failure and print infos
-        # We compute the mtime difference with "now" directly in the "if" directive to gain the assignation time
-        if [[ $(( current_time - playlist_mtime )) -gt 4 ]]; then
-            cat << EOF
-$(basename ${playlist}) mtime older than 4s:
- => mtime:           $(date -d @$(${playlist_mtime}))
- => comparison time: $(date -d @$(${current_time}))
-EOF
-
-            failure=$(( failure + 1 ))
-            break
-        fi
-    done
-
-    # If we dont have the expected number of playlists, it is a anomaly
-    if [[ ${playlists_nb} -ne ${EXPECTED_PLAYLISTS_NB} ]]; then
-        echo "Wrong number of playlists, expected ${EXPECTED_PLAYLISTS_NB}, got ${playlists_nb}"
-        failure=$(( failure + 1 ))
-    fi
-
-    current_iteration=$(( current_iteration + 1 ))
-    sleep 1
-done
-
-# If there was a failure tagged, stops the script
-if [[ ${failure} -ge 1 ]]; then
-    echo "${failure} anomalie(s) has been found during the test"
-
-    exit 1
-else
-    exit 0
-fi
diff --git a/tools/set_app_domain.py b/tools/set_app_domain.py
deleted file mode 100755
index e4c3e439eea8bb95cd6689227e2db94786b56f36..0000000000000000000000000000000000000000
--- a/tools/set_app_domain.py
+++ /dev/null
@@ -1,244 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-'''
-Script to set the domain of an existing MediaServer, Miris Manager or Monitor
-'''
-import os
-import re
-import subprocess
-import sys
-
-
-def log(text, error=False):
-    fo = sys.stderr if error else sys.stdout
-    print(text, file=fo)
-    fo.flush()
-
-
-def run_cmd(cmd, shell=False):
-    p = subprocess.run(cmd, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', check=False, shell=shell)
-    log(p.stdout)
-    if p.returncode != 0:
-        raise Exception('Command failed with code %s.' % p.returncode)
-    return p.returncode
-
-
-class SetAppDomain():
-    USAGE = '''USAGE: %s [-d] [-f] [-h] <app> <domain>
-    -d: Debug mode (can be started with non root users).
-    -f: Force mode (to force replacement of configuration even if there are warnings).
-    -h: Show this message.
-    app: The application for which the new domain should be set.
-         Possible values:
-           "ms" (MediaServer), "mm" (Miris Manager), "mon" (Monitor).
-         It is possible to specify which MediaServer instance should be targetted
-         by using this format: ms-<instance name> (for example ms-msuser).
-    domain: The new domain.''' % __file__
-    UNIX_USER_PATTERN = r'[a-z0-9\-]+'
-    DOMAIN_PATTERN = r'([a-z0-9\-]+\.)*[a-z0-9\-]+(\.[a-z]+){0,1}'
-
-    def __init__(self, *args):
-        args = list(args)
-        # Check if help is required
-        if '-h' in args:
-            log(self.USAGE)
-            sys.exit(0)
-        # Check if force mode is enabled
-        self.force = '-f' in args
-        if self.force:
-            args.remove('-f')
-        # Check that this script is run by root
-        self.debug = '-d' in args
-        if self.debug:
-            args.remove('-d')
-        whoami = subprocess.check_output(['whoami']).decode('utf-8').strip()
-        if whoami != 'root' and not self.debug:
-            log('This script should be run as root user.')
-            sys.exit(1)
-        # Parse args
-        if not args:
-            log('Not enough arguments.')
-            log(self.USAGE)
-            sys.exit(1)
-        new_domain = args.pop()
-        if not re.match(self.DOMAIN_PATTERN, new_domain):
-            log('The given domain "%s" does not match the expected pattern (%s).\n' % (new_domain, self.DOMAIN_PATTERN))
-            log(self.USAGE)
-            sys.exit(1)
-        if args:
-            app = args.pop()
-        # Get Nginx conf path and instance user
-        key = None
-        instance = None
-        if app == 'mm':
-            nginx_conf = '/etc/nginx/sites-available/skyreach.conf'
-            key = 'CM_SERVER_NAME'
-        elif app == 'mon':
-            nginx_conf = '/etc/nginx/sites-available/msmonitor.conf'
-            key = 'MONITOR_SERVER_NAME'
-        elif app == 'ms' or app.startswith('ms-'):
-            if app.startswith('ms-'):
-                instance = app[3:].strip('. -\t\n')
-                if not re.match(self.UNIX_USER_PATTERN, instance):
-                    log('Invalid MediaServer instance requested. Validation reg exp is: %s' % self.UNIX_USER_PATTERN)
-                    sys.exit(1)
-            else:
-                instance = 'msuser'
-            nginx_conf = '/etc/nginx/sites-available/mediaserver-%s.conf' % instance
-            if instance == 'msuser':
-                key = 'MS_SERVER_NAME'
-        else:
-            log('Invalid app name requested.')
-            log(self.USAGE)
-            sys.exit(1)
-        # Change domain
-        self.change_nginx_domain(nginx_conf, new_domain)
-        self.change_hosts_file(new_domain)
-        if key:
-            self.change_envsetup_conf(key, new_domain)
-        if app == 'mm':
-            warning = self.change_mm_domain(new_domain)
-        elif app == 'mon':
-            warning = None
-        else:
-            warning = self.change_ms_domain(new_domain, instance)
-        log('\033[92mDone\033[0m')
-        if warning:
-            log('\033[93mWarning:\033[0m')
-            log(warning)
-        sys.exit(0)
-
-    def change_nginx_domain(self, path, new_domain):
-        log('Checking Nginx configuration file "%s".' % path)
-        if not os.path.exists(path):
-            log('The configuration file does not exist.')
-            sys.exit(1)
-        with open(path, 'r') as fo:
-            vhost = fo.read()
-        new_vhost = ''
-        changed_lines = 0
-        for line in vhost.split('\n'):
-            if re.match(r'\s*server_name\s+([\w\-\_\.\ ]+);', line):
-                line = re.sub(r'server_name\s+([\w\-\_\.\ ]+);', 'server_name %s;' % new_domain, line)
-                changed_lines += 1
-            new_vhost += line + '\n'
-        if changed_lines != 2:
-            log('Warning the number of server_name occurence changed in Nginx configuration is not the expected number (2) but is %s.' % changed_lines)
-            if not self.force:
-                log('New configuration will be:')
-                log(new_vhost)
-                log('Use -f to force the replacement of the configuration.')
-                sys.exit(1)
-
-        new_vhost = new_vhost.strip() + '\n'
-        if new_vhost != vhost:
-            if self.debug:
-                log('New Nginx conf:')
-                log(new_vhost)
-            else:
-                with open(path, 'w') as fo:
-                    fo.write(new_vhost)
-                log('The configuration file "%s" has been update.' % path)
-                run_cmd(['nginx', '-t'])
-                run_cmd(['systemctl', 'restart', 'nginx'])
-        else:
-            log('The configuration file "%s" is already up to date.' % path)
-
-    def change_hosts_file(self, new_domain):
-        with open('/etc/hosts', 'r') as fo:
-            hosts = fo.read()
-        ori_hosts = hosts
-        if ' ' + new_domain in hosts:
-            hosts = hosts.replace(' ' + new_domain, '')
-        elif '\t' + new_domain in hosts:
-            hosts = hosts.replace('\t' + new_domain, '')
-        hosts = hosts.strip()
-        new_hosts = ''
-        for line in hosts.split('\n'):
-            new_hosts += '\n' + line
-            if line.startswith('127.0.0.1'):
-                new_hosts += ' ' + new_domain
-        if new_hosts != ori_hosts:
-            if self.debug:
-                log('New hosts:')
-                log(new_hosts)
-            else:
-                with open('/etc/hosts', 'w') as fo:
-                    fo.write(new_hosts)
-                log('The "/etc/hosts" file has been update.')
-                try:
-                    run_cmd(['systemctl', 'restart', 'nscd'])
-                except Exception as nscd_err:
-                    log(nscd_err)
-        else:
-            log('The "/etc/hosts" file is already up to date.')
-
-    def change_envsetup_conf(self, key, new_domain):
-        confs = (
-            'conf.sh',
-            'auto-generated-conf.sh',
-        )
-        replaced = False
-        for path in confs:
-            if os.path.exists(path):
-                with open(path, 'r') as fo:
-                    content = fo.read()
-                if key in content:
-                    content = re.sub(r'%s=.*' % key, '%s=\'%s\'' % (key, new_domain), content)
-                    with open(path, 'w') as fo:
-                        fo.write(content)
-                    replaced = True
-                    log('Envsetup configration file "%s" updated.' % path)
-                    break
-        if not replaced:
-            with open(confs[0], 'a') as fo:
-                fo.write('\n%s=\'%s\'' % (key, new_domain))
-            log('Envsetup configration file "%s" updated.' % confs[0])
-
-    def change_ms_domain(self, new_domain, instance):
-        try:
-            import mediaserver
-            ms_path = mediaserver.__path__[0]
-            new_url = 'https://%s' % new_domain
-            log('Assuming that the new url is using HTTPS: "%s"' % new_url)
-            cmds = [
-                # set site url in site settings
-                ['python3', os.path.join(ms_path, 'scripts', 'mssiteconfig.py'), instance, 'site_url="%s"' % new_url],
-                # reset all local resources managers
-                ['python3', os.path.join(ms_path, 'scripts', 'reset_service_resources.py'), instance, 'local'],
-                # change configuration of celerity in MS and in workers
-                ['python3', os.path.join(ms_path, 'scripts', 'celerity_config_updater.py'), 'update', instance],
-                # restart ms
-                ['mscontroller.py', 'restart', '-u', instance],
-            ]
-            for cmd in cmds:
-                run_cmd(cmd)
-        except Exception as e:
-            log('Unable to set domain in MediaServer database and Celerity config:\n%s' % e)
-            sys.exit(1)
-        else:
-            log('Domain changed in MediaServer database and Celerity config.')
-        msg = 'Some steps to change the domain should be done manually:'
-        msg += '\n  - Change the domain of MediaServer used in Miris Manager stations configuration.'
-        return msg
-
-    def change_mm_domain(self, new_domain):
-        try:
-            new_url = 'https://%s' % new_domain
-            log('Assuming that the new url is using HTTPS: "%s"' % new_url)
-            # set site url in settings
-            run_cmd(['sed', '-i', 's@^#*\\s*SITE_URL.*@SITE_URL = \'%s\'@' % new_url, '/home/skyreach/skyreach_data/private/settings_override.py'])
-            # restart service
-            run_cmd(['systemctl', 'restart', 'skyreach'])
-        except Exception as e:
-            log('Unable to set domain in Miris Manager database:\n%s' % e)
-            sys.exit(1)
-        else:
-            log('Domain changed in Miris Manager database.')
-        msg = 'Some steps to change the domain should be done manually:'
-        msg += '\n  - Change the url of Miris Manager in the related MediaServer.'
-        return msg
-
-
-if __name__ == '__main__':
-    SetAppDomain(*sys.argv[1:])