diff --git a/.flake8 b/.flake8
index 1059d326dce56f5fd173fc7522745891dba6188f..7b5099d7f590998c97e98bb3ffb9b72af3268153 100644
--- a/.flake8
+++ b/.flake8
@@ -7,6 +7,7 @@ ignore =
     W505
 
 per-file-ignores =
+    roles/elastic.elasticsearch/*:E713
     roles/manager/files/set_site_url.py:E402
     library/*:E402
     library/nmcli.py:E402,F401
diff --git a/.gitignore b/.gitignore
index 824159af08e7abae5abd61ea14fa16fe7462f365..6245fd6be87d36dba9f4572d4f037d11abfad1bf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,6 +11,7 @@ inventories/local*/host_vars/localhost.yml
 inventories/offline*/host_vars/localhost.yml
 playbooks/_*
 roles/_*
+roles/elastic.elasticsearch
 ./logs/
 log/
 
diff --git a/.yamllint b/.yamllint
index faf11063e66bd8128c73774c63955632c99bd0e7..4ae768fda42381b2b6f9e33889b73dd2f4f9567c 100644
--- a/.yamllint
+++ b/.yamllint
@@ -4,6 +4,7 @@ extends: default
 
 ignore: |
   .venv/
+  roles/elastic.elasticsearch/
 
 rules:
   braces:
diff --git a/Makefile b/Makefile
index 1b769c6542f9f0f5de631b1f85e7638ed9680d05..b2c6d1a758a08187b30ad029f0f9a4f84c8fde99 100644
--- a/Makefile
+++ b/Makefile
@@ -5,6 +5,7 @@ PIP_BIN = $(shell command -v $(VENV)/bin/pip3 || command -v pip3 || echo pip3)
 PIP_COMPILE_BIN = $(shell command -v $(VENV)/bin/pip-compile || command -v pip-compile)
 ANSIBLE_BIN = $(shell command -v ansible || command -v $(VENV)/bin/ansible)
 ANSIBLE_PLAYBOOK_BIN = $(shell command -v ansible-playbook || command -v $(VENV)/bin/ansible-playbook)
+ANSIBLE_GALAXY_BIN = $(shell command -v ansible-galaxy || command -v $(VENV)/bin/ansible-galaxy)
 ANSIBLE_LINT_BIN = $(shell command -v ansible-lint || command -v $(VENV)/bin/ansible-lint)
 YAMLLINT_BIN = $(shell command -v yamllint || command -v $(VENV)/bin/yamllint)
 FLAKE8_BIN = $(shell command -v flake8 || command -v $(VENV)/bin/flake8)
@@ -38,6 +39,8 @@ venv:
 install: venv
 	$(PIP_BIN) install -U pip wheel
 	$(PIP_BIN) install -r requirements.txt
+	$(ANSIBLE_GALAXY_BIN) install -r requirements.yml
+
 
 .PHONY: install-dev
 ## install-dev: Install development requirements
diff --git a/playbooks/bench.yml b/playbooks/bench.yml
index 5034ad59d3f1e734ded259a18e5870d56cf3d436..6ccc264a01a06d36ebd11147f38e8bc7d5cfbcec 100755
--- a/playbooks/bench.yml
+++ b/playbooks/bench.yml
@@ -11,11 +11,49 @@
   tags: bench_server
   roles:
     - bench-server
+  tasks:
+    - name: restart bench-server
+      service:
+        name: bench-server
+        state: restarted
+      tags: [ 'never', 'prepare-bench' ]
 
 - name: DEPLOY BENCHMARK WORKERS
   hosts: bench_worker
   tags: bench_worker
   roles:
     - bench-worker
+  tasks:
+    - name: restart bench-worker
+      service:
+        name: bench-worker
+        state: restarted
+      tags: [ 'never', 'prepare-bench' ]
+
+- name: DEPLOY ELASTIC KIBANA SERVER
+  hosts: elastic
+  vars:
+    - es_heap_size: 2g
+    - es_config:
+        network.host: '{{ hostvars[groups['elastic'][0]]['ansible_default_ipv4']['address'] }}'
+        node.data: true
+        node.master: true
+        cluster.initial_master_nodes: '{{ hostvars[groups["elastic"][0]].ansible_hostname }}'
+    - kibana_server_host: "{{ hostvars[groups['elastic'][0]]['ansible_default_ipv4']['address'] }}"
+    - elastic_host: "{{ es_config['network.host'] }}"
+    - es_api_host: "{{ es_config['network.host'] }}"
+  tags: [ 'never', 'monbench' ]
+  roles:
+    - elastic
+
+- name: DEPLOY METRICBEAT WORKERS
+  hosts: mediaserver,postgres
+  tags: [ 'never', 'monbench' ]
+  vars:
+    - kibana_server_host: "{{ hostvars[groups['elastic'][0]]['ansible_default_ipv4']['address'] }}"
+    - elastic_host: "{{ es_config['network.host'] }}"
+    - es_api_host: "{{ es_config['network.host'] }}"
+  roles:
+    - metricbeat
 
 ...
diff --git a/requirements.yml b/requirements.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5f5264002c53a0262f89a0867806a6139045f126
--- /dev/null
+++ b/requirements.yml
@@ -0,0 +1,5 @@
+---
+- src: elastic.elasticsearch
+  version: 7.8.1
+
+...
diff --git a/roles/elastic/defaults/main.yml b/roles/elastic/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..172ecc50a801a0dfbd5a28cc1457f3c4614eeb62
--- /dev/null
+++ b/roles/elastic/defaults/main.yml
@@ -0,0 +1,5 @@
+kibana_default_port: 5601
+---
+kibana_server_host: localhost
+
+...
diff --git a/roles/elastic/handlers/main.yml b/roles/elastic/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..65d7d70557485bf967156b1feea618560fcca49f
--- /dev/null
+++ b/roles/elastic/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+- name: restart kibana
+  service:
+    name: kibana
+    state: restarted
+
+- name: restart apm-server
+  service:
+    name: apm-server
+    state: restarted
+
+...
diff --git a/roles/elastic/meta/main.yml b/roles/elastic/meta/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f4b7b68e04673547b39cfe665f6daaa2ec9fdf4b
--- /dev/null
+++ b/roles/elastic/meta/main.yml
@@ -0,0 +1,5 @@
+---
+dependencies:
+  - role: elastic.elasticsearch
+
+...
diff --git a/roles/elastic/tasks/main.yml b/roles/elastic/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a8727e84b95c03f66f1b65daa0a2dcb3d2fbf2e1
--- /dev/null
+++ b/roles/elastic/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+- name: install kibana package
+  apt:
+    pkg:
+      - kibana
+    state: latest
+
+- name: deploy kibana configuration
+  template:
+    src: kibana.yml.j2
+    dest: /etc/kibana/kibana.yml
+  notify: restart kibana
+
+- name: install apm-server package
+  apt:
+    pkg:
+      - apm-server
+    state: latest
+
+- name: deploy apm-server configuration
+  template:
+    src: apm-server.yml.j2
+    dest: /etc/apm-server/apm-server.yml
+  notify: restart apm-server
+
+...
diff --git a/roles/elastic/templates/apm-server.yml.j2 b/roles/elastic/templates/apm-server.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c5b844631aed83aa2484741997c7d6a651711695
--- /dev/null
+++ b/roles/elastic/templates/apm-server.yml.j2
@@ -0,0 +1,1204 @@
+######################### APM Server Configuration #########################
+
+################################ APM Server ################################
+
+apm-server:
+  # Defines the host and port the server is listening on. Use "unix:/path/to.sock" to listen on a unix domain socket.
+  host: "{{ kibana_server_host }}:8200"
+
+  # Maximum permitted size in bytes of a request's header accepted by the server to be processed.
+  #max_header_size: 1048576
+
+  # Maximum amount of time to wait for the next incoming request before underlying connection is closed.
+  #idle_timeout: 45s
+
+  # Maximum permitted duration for reading an entire request.
+  #read_timeout: 30s
+
+  # Maximum permitted duration for writing a response.
+  #write_timeout: 30s
+
+  # Maximum duration before releasing resources when shutting down the server.
+  #shutdown_timeout: 5s
+
+  # Maximum permitted size in bytes of an event accepted by the server to be processed.
+  #max_event_size: 307200
+
+  # Maximum number of new connections to accept simultaneously (0 means unlimited).
+  #max_connections: 0
+
+  # If true (default), APM Server captures the IP of the instrumented service
+  # or the IP and User Agent of the real user (RUM requests).
+  #capture_personal_data: true
+
+  # Enable APM Server Golang expvar support (https://golang.org/pkg/expvar/).
+  #expvar:
+    #enabled: false
+
+    # Url to expose expvar.
+    #url: "/debug/vars"
+
+  # Instrumentation support for the server's HTTP endpoints and event publisher.
+  #instrumentation:
+    # Set to true to enable instrumentation of the APM Server itself.
+    #enabled: false
+
+    # Environment in which the APM Server is running on (eg: staging, production, etc.)
+    #environment: ""
+
+    # Remote hosts to report instrumentation results to.
+    #hosts:
+    #  - http://remote-apm-server:8200
+
+    # API Key for the remote APM Server(s).
+    # If api_key is set then secret_token will be ignored.
+    #api_key:
+
+    # Secret token for the remote APM Server(s).
+    #secret_token:
+
+    # Enable profiling of the server, recording profile samples as events.
+    #
+    # This feature is experimental.
+    #profiling:
+      #cpu:
+        # Set to true to enable CPU profiling.
+        #enabled: false
+        #interval: 60s
+        #duration: 10s
+      #heap:
+        # Set to true to enable heap profiling.
+        #enabled: false
+        #interval: 60s
+
+  # A pipeline is a definition of processors applied to documents when ingesting them to Elasticsearch.
+  # Using pipelines involves two steps:
+  # (1) registering a pipeline
+  # (2) applying a pipeline during data ingestion (see `output.elasticsearch.pipeline`)
+  #
+  # You can manually register a pipeline, or use this configuration option to ensure
+  # the pipeline is loaded and registered at the configured Elasticsearch instances.
+  # Find the default pipeline configuration at `ingest/pipeline/definition.json`.
+  # Automatic pipeline registration requires the `output.elasticsearch` to be enabled and configured.
+  #register.ingest.pipeline:
+    # Registers APM pipeline definition in Elasticsearch on APM Server startup. Defaults to true.
+    #enabled: true
+    # Overwrites existing APM pipeline definition in Elasticsearch. Defaults to false.
+    #overwrite: false
+
+
+  #---------------------------- APM Server - Secure Communication with Agents ----------------------------
+
+  # Enable secure communication between APM agents and the server. By default ssl is disabled.
+  #ssl:
+    #enabled: false
+
+    # Path to file containing the certificate for server authentication.
+    # Needs to be configured when ssl is enabled.
+    #certificate: ''
+
+    # Path to file containing server certificate key.
+    # Needs to be configured when ssl is enabled.
+    #key: ''
+
+    # Optional configuration options for ssl communication.
+
+    # Passphrase for decrypting the Certificate Key.
+    # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
+    #key_passphrase: ''
+
+    # List of supported/valid protocol versions. By default TLS versions 1.1 up to 1.3 are enabled.
+    #supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
+
+    # Configure cipher suites to be used for SSL connections.
+    # Note that cipher suites are not configurable for TLS 1.3.
+    #cipher_suites: []
+
+    # Configure curve types for ECDHE based cipher suites.
+    #curve_types: []
+
+    # Following options only concern requiring and verifying client certificates provided by the agents.
+    # Providing a client certificate is currently only supported by the RUM agent through
+    # browser configured certificates and Jaeger agents connecting via gRPC.
+    #
+    # Configure a list of root certificate authorities for verifying client certificates.
+    #certificate_authorities: []
+    #
+    # Configure which type of client authentication is supported.
+    # Options are `none`, `optional`, and `required`.
+    # Default is `none`. If `certificate_authorities` are configured,
+    # the value for `client_authentication` is automatically changed to `required`.
+    #client_authentication: "none"
+
+  # The APM Server endpoints can be secured by configuring a secret token or enabling the usage of API keys. Both
+  # options can be enabled in parallel, allowing Elastic APM agents to chose whichever mechanism they support.
+  # As soon as one of the options is enabled, requests without a valid token are denied by the server. An exception
+  # to this are requests to any enabled RUM endpoint. RUM endpoints are generally not secured by any token.
+  #
+  # Configure authorization via a common `secret_token`. By default it is disabled.
+  # Agents include the token in the following format: Authorization: Bearer <secret-token>.
+  # It is recommended to use an authorization token in combination with SSL enabled,
+  # and save the token in the apm-server keystore.
+  #secret_token:
+
+  # Enable API key authorization by setting enabled to true. By default API key support is disabled.
+  # Agents include a valid API key in the following format: Authorization: ApiKey <token>.
+  # The key must be the base64 encoded representation of the API key's "id:key".
+  # This is an experimental feature, use with care.
+  #api_key:
+    #enabled: false
+
+    # Restrict how many unique API keys are allowed per minute. Should be set to at least the amount of different
+    # API keys configured in your monitored services. Every unique API key triggers one request to Elasticsearch.
+    #limit: 100
+
+    # API keys need to be fetched from Elasticsearch. If nothing is configured, configuration settings from the
+    # output section will be reused.
+    # Note that configuration needs to point to a secured Elasticsearch cluster that is able to serve API key requests.
+    #elasticsearch:
+      #hosts: ["localhost:9200"]
+
+      #protocol: "http"
+
+      # Username and password are only needed for the apm-server apikey sub-command, and they are ignored otherwise
+      # See `apm-server apikey --help` for details.
+      #username: "elastic"
+      #password: "changeme"
+
+      # Optional HTTP Path.
+      #path: ""
+
+      # Proxy server url.
+      #proxy_url: ""
+      #proxy_disable: false
+
+      # Configure http request timeout before failing an request to Elasticsearch.
+      #timeout: 5s
+
+      # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
+      #ssl.enabled: true
+
+      # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
+      # Configure SSL verification mode. If `none` is configured, all server hosts
+      # and certificates will be accepted. In this mode, SSL based connections are
+      # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+      # `full`.
+      #ssl.verification_mode: full
+
+      # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+      # 1.2 are enabled.
+      #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+      # List of root certificates for HTTPS server verifications.
+      #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+      # Certificate for SSL client authentication.
+      #ssl.certificate: "/etc/pki/client/cert.pem"
+
+      # Client Certificate Key
+      #ssl.key: "/etc/pki/client/cert.key"
+
+      # Optional passphrase for decrypting the Certificate Key.
+      # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
+      #ssl.key_passphrase: ''
+
+      # Configure cipher suites to be used for SSL connections.
+      #ssl.cipher_suites: []
+
+      # Configure curve types for ECDHE based cipher suites.
+      #ssl.curve_types: []
+
+      # Configure what types of renegotiation are supported. Valid options are
+      # never, once, and freely. Default is never.
+      #ssl.renegotiation: never
+
+
+  #---------------------------- APM Server - RUM Real User Monitoring ----------------------------
+
+  # Enable Real User Monitoring (RUM) Support. By default RUM is disabled.
+  # RUM does not support token based authorization. Enabled RUM endpoints will not require any authorization
+  # token configured for other endpoints.
+  #rum:
+    #enabled: false
+
+    #event_rate:
+
+      # Defines the maximum amount of events allowed to be sent to the APM Server RUM
+      # endpoint per IP per second. Defaults to 300.
+      #limit: 300
+
+      # An LRU cache is used to keep a rate limit per IP for the most recently seen IPs.
+      # This setting defines the number of unique IPs that can be tracked in the cache.
+      # Sites with many concurrent clients should consider increasing this limit. Defaults to 1000.
+      #lru_size: 1000
+
+    #-- General RUM settings
+
+    # A list of permitted origins for real user monitoring.
+    # User-agents will send an origin header that will be validated against this list.
+    # An origin is made of a protocol scheme, host and port, without the url path.
+    # Allowed origins in this setting can have * to match anything (eg.: http://*.example.com)
+    # If an item in the list is a single '*', everything will be allowed.
+    #allow_origins : ['*']
+
+    # A list of Access-Control-Allow-Headers to allow RUM requests, in addition to "Content-Type",
+    # "Content-Encoding", and "Accept"
+    #allow_headers : []
+
+    # Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes.
+    # If the regexp matches, the stacktrace frame is considered to be a library frame.
+    #library_pattern: "node_modules|bower_components|~"
+
+    # Regexp to be matched against a stacktrace frame's `file_name`.
+    # If the regexp matches, the stacktrace frame is not used for calculating error groups.
+    # The default pattern excludes stacktrace frames that have a filename starting with '/webpack'
+    #exclude_from_grouping: "^/webpack"
+
+    # If a source map has previously been uploaded, source mapping is automatically applied.
+    # to all error and transaction documents sent to the RUM endpoint.
+    #source_mapping:
+
+      # Sourcemapping is enabled by default.
+      #enabled: true
+
+      # Source maps are always fetched from Elasticsearch, by default using the output.elasticsearch configuration.
+      # A different instance must be configured when using any other output.
+      # This setting only affects sourcemap reads - the output determines where sourcemaps are written.
+      #elasticsearch:
+        # Array of hosts to connect to.
+        # Scheme and port can be left out and will be set to the default (`http` and `9200`).
+        # In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
+        # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
+        # hosts: ["localhost:9200"]
+
+        # Protocol - either `http` (default) or `https`.
+        #protocol: "https"
+
+        # Authentication credentials - either API key or username/password.
+        #api_key: "id:api_key"
+        #username: "elastic"
+        #password: "changeme"
+
+      # The `cache.expiration` determines how long a source map should be cached before fetching it again from Elasticsearch.
+      # Note that values configured without a time unit will be interpreted as seconds.
+      #cache:
+        #expiration: 5m
+
+      # Source maps are stored in a separate index.
+      # If the default index pattern for source maps at 'outputs.elasticsearch.indices'
+      # is changed, a matching index pattern needs to be specified here.
+      #index_pattern: "apm-*-sourcemap*"
+
+  #---------------------------- APM Server - Agent Configuration ----------------------------
+
+  # When using APM agent configuration, information fetched from Kibana will be cached in memory for some time.
+  # Specify cache key expiration via this setting. Default is 30 seconds.
+  #agent.config.cache.expiration: 30s
+
+  #kibana:
+    # For APM Agent configuration in Kibana, enabled must be true.
+    #enabled: false
+
+    # Scheme and port can be left out and will be set to the default (`http` and `5601`).
+    # In case you specify an additional path, the scheme is required: `http://localhost:5601/path`.
+    # IPv6 addresses should always be defined as: `https://[2001:db8::1]:5601`.
+    #host: "localhost:5601"
+
+    # Optional protocol and basic auth credentials.
+    #protocol: "https"
+    #username: "elastic"
+    #password: "changeme"
+
+    # Optional HTTP path.
+    #path: ""
+
+    # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
+    #ssl.enabled: true
+
+    # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
+    # Configure SSL verification mode. If `none` is configured, all server hosts
+    # and certificates will be accepted. In this mode, SSL based connections are
+    # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+    # `full`.
+    #ssl.verification_mode: full
+
+    # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+    # 1.2 are enabled.
+    #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+    # List of root certificates for HTTPS server verifications.
+    #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+    # Certificate for SSL client authentication.
+    #ssl.certificate: "/etc/pki/client/cert.pem"
+
+    # Client Certificate Key
+    #ssl.key: "/etc/pki/client/cert.key"
+
+    # Optional passphrase for decrypting the Certificate Key.
+    # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
+    #ssl.key_passphrase: ''
+
+    # Configure cipher suites to be used for SSL connections.
+    #ssl.cipher_suites: []
+
+    # Configure curve types for ECDHE based cipher suites.
+    #ssl.curve_types: []
+
+  #---------------------------- APM Server - ILM Index Lifecycle Management ----------------------------
+
+  #ilm:
+    # Supported values are `auto`, `true` and `false`.
+    # `true`: Make use of Elasticsearch's Index Lifecycle Management (ILM) for APM indices. If no Elasticsearch output is
+    # configured or the configured instance does not support ILM, APM Server cannot apply ILM and must create
+    # unmanaged indices instead.
+    # `false`: APM Server does not make use of ILM in Elasticsearch.
+    # `auto`: If an Elasticsearch output is configured with default index and indices settings, and the configured
+    # Elasticsearch instance supports ILM, `auto` will resolve to `true`. Otherwise `auto` will resolve to `false`.
+    # Default value is `auto`.
+    #enabled: "auto"
+
+    #setup:
+      # Only disable setup if you want to set up everything related to ILM on your own.
+      # When setup is enabled, the APM Server creates:
+      # - aliases and ILM policies if `apm-server.ilm.enabled` resolves to `true`.
+      # - An ILM specific template per event type. This is required to map ILM aliases and policies to indices. In case
+      # ILM is disabled, the templates will be created without any ILM settings.
+      # Be aware that if you turn off setup, you need to manually manage event type specific templates on your own.
+      # If you simply want to disable ILM, use the above setting, `apm-server.ilm.enabled`, instead.
+      # Defaults to true.
+      #enabled: true
+
+      # Configure whether or not existing policies and ILM related templates should be updated. This needs to be
+      # set to true when customizing your policies.
+      # Defaults to false.
+      #overwrite: false
+
+      # Set `require_policy` to `false` when policies are set up outside of APM Server but referenced here.
+      # Default value is `true`.
+      #require_policy: true
+
+      # The configured event types and policies will be merged with the default setup. You only need to configure
+      # the mappings that you want to customize.
+      #mapping:
+        #- event_type: "error"
+        #  policy_name: "apm-rollover-30-days"
+        #- event_type: "span"
+        #  policy_name: "apm-rollover-30-days"
+        #- event_type: "transaction"
+        #  policy_name: "apm-rollover-30-days"
+        #- event_type: "metric"
+        #  policy_name: "apm-rollover-30-days"
+
+      # Configured policies are added to pre-defined default policies.
+      # If a policy with the same name as a default policy is configured, the configured policy overwrites the default policy.
+      #policies:
+        #- name: "apm-rollover-30-days"
+          #policy:
+            #phases:
+              #hot:
+                #actions:
+                  #rollover:
+                    #max_size: "50gb"
+                    #max_age: "30d"
+                  #set_priority:
+                    #priority: 100
+              #warm:
+                #min_age: "30d"
+                #actions:
+                  #set_priority:
+                    #priority: 50
+                  #readonly: {}
+
+
+
+  #---------------------------- APM Server - Experimental Jaeger integration ----------------------------
+
+  # When enabling Jaeger integration, APM Server acts as Jaeger collector. It supports jaeger.thrift over HTTP
+  # and gRPC. This is an experimental feature, use with care.
+  #jaeger:
+    #grpc:
+      # Set to true to enable the Jaeger gRPC collector service.
+      #enabled: false
+
+      # Defines the gRPC host and port the server is listening on.
+      # Defaults to the standard Jaeger gRPC collector port 14250.
+      #host: "localhost:14250"
+
+      # Set to the name of a process tag to use for authorizing
+      # Jaeger agents.
+      #
+      # The tag value should have the same format as an HTTP
+      # Authorization header, i.e. "Bearer <secret_token>" or
+      # "ApiKey <base64(id:key)>".
+      #
+      # By default (if the auth_tag value is empty), authorization
+      # does not apply to Jaeger agents.
+      #auth_tag: ""
+
+    #http:
+      # Set to true to enable the Jaeger HTTP collector endpoint.
+      #enabled: false
+
+      # Defines the HTTP host and port the server is listening on.
+      # Defaults to the standard Jaeger HTTP collector port 14268.
+      #host: "localhost:14268"
+
+#================================= General =================================
+
+# Data is buffered in a memory queue before it is published to the configured output.
+# The memory queue will present all available events (up to the outputs
+# bulk_max_size) to the output, the moment the output is ready to serve
+# another batch of events.
+#queue:
+  # Queue type by name (default 'mem').
+  #mem:
+    # Max number of events the queue can buffer.
+    #events: 4096
+
+    # Hints the minimum number of events stored in the queue,
+    # before providing a batch of events to the outputs.
+    # The default value is set to 2048.
+    # A value of 0 ensures events are immediately available
+    # to be sent to the outputs.
+    #flush.min_events: 2048
+
+    # Maximum duration after which events are available to the outputs,
+    # if the number of events stored in the queue is < `flush.min_events`.
+    #flush.timeout: 1s
+
+# Sets the maximum number of CPUs that can be executing simultaneously. The
+# default is the number of logical CPUs available in the system.
+#max_procs:
+
+#================================= Template =================================
+
+# A template is used to set the mapping in Elasticsearch.
+# By default template loading is enabled and the template is loaded.
+# These settings can be adjusted to load your own template or overwrite existing ones.
+
+# Set to false to disable template loading.
+#setup.template.enabled: true
+
+# Template name. By default the template name is "apm-%{[observer.version]}"
+# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
+#setup.template.name: "apm-%{[observer.version]}"
+
+# Template pattern. By default the template pattern is "apm-%{[observer.version]}-*" to apply to the default index settings.
+# The first part is the version of apm-server and then -* is used to match all daily indices.
+# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
+#setup.template.pattern: "apm-%{[observer.version]}-*"
+
+# Path to fields.yml file to generate the template.
+#setup.template.fields: "${path.config}/fields.yml"
+
+# Overwrite existing template.
+#setup.template.overwrite: false
+
+# Elasticsearch template settings.
+#setup.template.settings:
+
+  # A dictionary of settings to place into the settings.index dictionary
+  # of the Elasticsearch template. For more details, please check
+  # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
+  #index:
+    #number_of_shards: 1
+    #codec: best_compression
+    #number_of_routing_shards: 30
+    #mapping.total_fields.limit: 2000
+
+#============================= Elastic Cloud =============================
+
+# These settings simplify using APM Server with the Elastic Cloud (https://cloud.elastic.co/).
+
+# The cloud.id setting overwrites the `output.elasticsearch.hosts` option.
+# You can find the `cloud.id` in the Elastic Cloud web UI.
+#cloud.id:
+
+# The cloud.auth setting overwrites the `output.elasticsearch.username` and
+# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
+#cloud.auth:
+
+#================================ Outputs =================================
+
+# Configure the output to use when sending the data collected by apm-server.
+
+#-------------------------- Elasticsearch output --------------------------
+output.elasticsearch:
+  # Array of hosts to connect to.
+  # Scheme and port can be left out and will be set to the default (`http` and `9200`).
+  # In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
+  # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
+  hosts: ["{{ elastic_host }}:9200"]
+
+  # Boolean flag to enable or disable the output module.
+  #enabled: true
+
+  # Set gzip compression level.
+  #compression_level: 0
+
+  # Protocol - either `http` (default) or `https`.
+  #protocol: "https"
+
+  # Authentication credentials - either API key or username/password.
+  #api_key: "id:api_key"
+  #username: "elastic"
+  #password: "changeme"
+
+  # Dictionary of HTTP parameters to pass within the url with index operations.
+  #parameters:
+    #param1: value1
+    #param2: value2
+
+  # Number of workers per Elasticsearch host.
+  #worker: 1
+
+  # By using the configuration below, APM documents are stored to separate indices,
+  # depending on their `processor.event`:
+  # - error
+  # - transaction
+  # - span
+  # - sourcemap
+  #
+  # The indices are all prefixed with `apm-%{[observer.version]}`.
+  # To allow managing indices based on their age, all indices (except for sourcemaps)
+  # end with the information of the day they got indexed.
+  # e.g. "apm-7.3.0-transaction-2019.07.20"
+  #
+  # Be aware that you can only specify one Elasticsearch template.
+  # If you modify the index patterns you must also update these configurations accordingly,
+  # as they need to be aligned:
+  # * `setup.template.name`
+  # * `setup.template.pattern`
+  #index: "apm-%{[observer.version]}-%{+yyyy.MM.dd}"
+  #indices:
+  #  - index: "apm-%{[observer.version]}-sourcemap"
+  #    when.contains:
+  #      processor.event: "sourcemap"
+  #
+  #  - index: "apm-%{[observer.version]}-error-%{+yyyy.MM.dd}"
+  #    when.contains:
+  #      processor.event: "error"
+  #
+  #  - index: "apm-%{[observer.version]}-transaction-%{+yyyy.MM.dd}"
+  #    when.contains:
+  #      processor.event: "transaction"
+  #
+  #  - index: "apm-%{[observer.version]}-span-%{+yyyy.MM.dd}"
+  #    when.contains:
+  #      processor.event: "span"
+  #
+  #  - index: "apm-%{[observer.version]}-metric-%{+yyyy.MM.dd}"
+  #    when.contains:
+  #      processor.event: "metric"
+  #
+  #  - index: "apm-%{[observer.version]}-onboarding-%{+yyyy.MM.dd}"
+  #    when.contains:
+  #      processor.event: "onboarding"
+
+  # A pipeline is a definition of processors applied to documents when ingesting them to Elasticsearch.
+  # APM Server comes with a default pipeline definition, located at `ingest/pipeline/definition.json`, which is
+  # loaded to Elasticsearch by default (see `apm-server.register.ingest.pipeline`).
+  # APM pipeline is enabled by default. To disable it, set `pipeline: _none`.
+  #pipeline: "apm"
+
+  # Optional HTTP Path.
+  #path: "/elasticsearch"
+
+  # Custom HTTP headers to add to each request.
+  #headers:
+  #  X-My-Header: Contents of the header
+
+  # Proxy server url.
+  #proxy_url: http://proxy:3128
+
+  # The number of times a particular Elasticsearch index operation is attempted. If
+  # the indexing operation doesn't succeed after this many retries, the events are
+  # dropped. The default is 3.
+  #max_retries: 3
+
+  # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
+  # The default is 50.
+  #bulk_max_size: 50
+
+  # The number of seconds to wait before trying to reconnect to Elasticsearch
+  # after a network error. After waiting backoff.init seconds, apm-server
+  # tries to reconnect. If the attempt fails, the backoff timer is increased
+  # exponentially up to backoff.max. After a successful connection, the backoff
+  # timer is reset. The default is 1s.
+  #backoff.init: 1s
+
+  # The maximum number of seconds to wait before attempting to connect to
+  # Elasticsearch after a network error. The default is 60s.
+  #backoff.max: 60s
+
+  # Configure http request timeout before failing an request to Elasticsearch.
+  #timeout: 90
+
+  # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
+  #ssl.enabled: true
+
+  # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
+  # Configure SSL verification mode. If `none` is configured, all server hosts
+  # and certificates will be accepted. In this mode, SSL based connections are
+  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+  # `full`.
+  #ssl.verification_mode: full
+
+  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+  # 1.2 are enabled.
+  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+  # List of root certificates for HTTPS server verifications.
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication.
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Optional passphrase for decrypting the Certificate Key.
+  # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
+  #ssl.key_passphrase: ''
+
+  # Configure cipher suites to be used for SSL connections.
+  #ssl.cipher_suites: []
+
+  # Configure curve types for ECDHE based cipher suites.
+  #ssl.curve_types: []
+
+  # Configure what types of renegotiation are supported. Valid options are
+  # never, once, and freely. Default is never.
+  #ssl.renegotiation: never
+
+  # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
+  #kerberos.enabled: true
+
+  # Authentication type to use with Kerberos. Available options: keytab, password.
+  #kerberos.auth_type: password
+
+  # Path to the keytab file. It is used when auth_type is set to keytab.
+  #kerberos.keytab: /etc/elastic.keytab
+
+  # Path to the Kerberos configuration.
+  #kerberos.config_path: /etc/krb5.conf
+
+  # Name of the Kerberos user.
+  #kerberos.username: elastic
+
+  # Password of the Kerberos user. It is used when auth_type is set to password.
+  #kerberos.password: changeme
+
+  # Kerberos realm.
+  #kerberos.realm: ELASTIC
+
+
+#----------------------------- Console output -----------------------------
+#output.console:
+  # Boolean flag to enable or disable the output module.
+  #enabled: false
+
+  # Configure JSON encoding.
+  #codec.json:
+    # Pretty-print JSON event.
+    #pretty: false
+
+    # Configure escaping HTML symbols in strings.
+    #escape_html: false
+
+#---------------------------- Logstash output -----------------------------
+#output.logstash:
+  # Boolean flag to enable or disable the output module.
+  #enabled: false
+
+  # The Logstash hosts.
+  #hosts: ["localhost:5044"]
+
+  # Number of workers per Logstash host.
+  #worker: 1
+
+  # Set gzip compression level.
+  #compression_level: 3
+
+  # Configure escaping html symbols in strings.
+  #escape_html: true
+
+  # Optional maximum time to live for a connection to Logstash, after which the
+  # connection will be re-established.  A value of `0s` (the default) will
+  # disable this feature.
+  #
+  # Not yet supported for async connections (i.e. with the "pipelining" option set).
+  #ttl: 30s
+
+  # Optional load balance the events between the Logstash hosts. Default is false.
+  #loadbalance: false
+
+  # Number of batches to be sent asynchronously to Logstash while processing
+  # new batches.
+  #pipelining: 2
+
+  # If enabled only a subset of events in a batch of events is transferred per
+  # group.  The number of events to be sent increases up to `bulk_max_size`
+  # if no error is encountered.
+  #slow_start: false
+
+  # The number of seconds to wait before trying to reconnect to Logstash
+  # after a network error. After waiting backoff.init seconds, apm-server
+  # tries to reconnect. If the attempt fails, the backoff timer is increased
+  # exponentially up to backoff.max. After a successful connection, the backoff
+  # timer is reset. The default is 1s.
+  #backoff.init: 1s
+
+  # The maximum number of seconds to wait before attempting to connect to
+  # Logstash after a network error. The default is 60s.
+  #backoff.max: 60s
+
+  # Optional index name. The default index name is set to apm
+  # in all lowercase.
+  #index: 'apm'
+
+  # SOCKS5 proxy server URL
+  #proxy_url: socks5://user:password@socks5-server:2233
+
+  # Resolve names locally when using a proxy server. Defaults to false.
+  #proxy_use_local_resolver: false
+
+  # Enable SSL support. SSL is automatically enabled if any SSL setting is set.
+  #ssl.enabled: false
+
+  # Optional SSL configuration options. SSL is off by default.
+  # Configure SSL verification mode. If `none` is configured, all server hosts
+  # and certificates will be accepted. In this mode, SSL based connections are
+  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+  # `full`.
+  #ssl.verification_mode: full
+
+  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+  # 1.2 are enabled.
+  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+  # List of root certificates for HTTPS server verifications.
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication.
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Optional passphrase for decrypting the Certificate Key.
+  # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
+  #ssl.key_passphrase: ''
+
+  # Configure cipher suites to be used for SSL connections.
+  #ssl.cipher_suites: []
+
+  # Configure curve types for ECDHE based cipher suites.
+  #ssl.curve_types: []
+
+  # Configure what types of renegotiation are supported. Valid options are
+  # never, once, and freely. Default is never.
+  #ssl.renegotiation: never
+
+#------------------------------ Kafka output ------------------------------
+#output.kafka:
+  # Boolean flag to enable or disable the output module.
+  #enabled: false
+
+  # The list of Kafka broker addresses from where to fetch the cluster metadata.
+  # The cluster metadata contain the actual Kafka brokers events are published
+  # to.
+  #hosts: ["localhost:9092"]
+
+  # The Kafka topic used for produced events. The setting can be a format string
+  # using any event field. To set the topic from document type use `%{[type]}`.
+  #topic: beats
+
+  # The Kafka event key setting. Use format string to create unique event key.
+  # By default no event key will be generated.
+  #key: ''
+
+  # The Kafka event partitioning strategy. Default hashing strategy is `hash`
+  # using the `output.kafka.key` setting or randomly distributes events if
+  # `output.kafka.key` is not configured.
+  #partition.hash:
+    # If enabled, events will only be published to partitions with reachable
+    # leaders. Default is false.
+    #reachable_only: false
+
+    # Configure alternative event field names used to compute the hash value.
+    # If empty `output.kafka.key` setting will be used.
+    # Default value is empty list.
+    #hash: []
+
+  # Authentication details. Password is required if username is set.
+  #username: ''
+  #password: ''
+
+  # Kafka version libbeat is assumed to run against. Defaults to the "1.0.0".
+  #version: '1.0.0'
+
+  # Configure JSON encoding.
+  #codec.json:
+    # Pretty print json event
+    #pretty: false
+
+    # Configure escaping html symbols in strings.
+    #escape_html: true
+
+  # Metadata update configuration. Metadata do contain leader information
+  # deciding which broker to use when publishing.
+  #metadata:
+    # Max metadata request retry attempts when cluster is in middle of leader
+    # election. Defaults to 3 retries.
+    #retry.max: 3
+
+    # Waiting time between retries during leader elections. Default is 250ms.
+    #retry.backoff: 250ms
+
+    # Refresh metadata interval. Defaults to every 10 minutes.
+    #refresh_frequency: 10m
+
+  # The number of concurrent load-balanced Kafka output workers.
+  #worker: 1
+
+  # The number of times to retry publishing an event after a publishing failure.
+  # After the specified number of retries, the events are typically dropped.
+  # Set max_retries to a value less than 0 to retry
+  # until all events are published. The default is 3.
+  #max_retries: 3
+
+  # The maximum number of events to bulk in a single Kafka request. The default
+  # is 2048.
+  #bulk_max_size: 2048
+
+  # The number of seconds to wait for responses from the Kafka brokers before
+  # timing out. The default is 30s.
+  #timeout: 30s
+
+  # The maximum duration a broker will wait for number of required ACKs. The
+  # default is 10s.
+  #broker_timeout: 10s
+
+  # The number of messages buffered for each Kafka broker. The default is 256.
+  #channel_buffer_size: 256
+
+  # The keep-alive period for an active network connection. If 0s, keep-alives
+  # are disabled. The default is 0 seconds.
+  #keep_alive: 0
+
+  # Sets the output compression codec. Must be one of none, snappy and gzip. The
+  # default is gzip.
+  #compression: gzip
+
+  # Set the compression level. Currently only gzip provides a compression level
+  # between 0 and 9. The default value is chosen by the compression algorithm.
+  #compression_level: 4
+
+  # The maximum permitted size of JSON-encoded messages. Bigger messages will be
+  # dropped. The default value is 1000000 (bytes). This value should be equal to
+  # or less than the broker's message.max.bytes.
+  #max_message_bytes: 1000000
+
+  # The ACK reliability level required from broker. 0=no response, 1=wait for
+  # local commit, -1=wait for all replicas to commit. The default is 1.  Note:
+  # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
+  # on error.
+  #required_acks: 1
+
+  # The configurable ClientID used for logging, debugging, and auditing
+  # purposes.  The default is "beats".
+  #client_id: beats
+
+  # Enable SSL support. SSL is automatically enabled if any SSL setting is set.
+  #ssl.enabled: false
+
+  # Optional SSL configuration options. SSL is off by default.
+  # Configure SSL verification mode. If `none` is configured, all server hosts
+  # and certificates will be accepted. In this mode, SSL based connections are
+  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+  # `full`.
+  #ssl.verification_mode: full
+
+  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+  # 1.2 are enabled.
+  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+  # List of root certificates for HTTPS server verifications.
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication.
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Optional passphrase for decrypting the Certificate Key.
+  # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
+  #ssl.key_passphrase: ''
+
+  # Configure cipher suites to be used for SSL connections.
+  #ssl.cipher_suites: []
+
+  # Configure curve types for ECDHE based cipher suites.
+  #ssl.curve_types: []
+
+  # Configure what types of renegotiation are supported. Valid options are
+  # never, once, and freely. Default is never.
+  #ssl.renegotiation: never
+
+  # Authentication type to use with Kerberos. Available options: keytab, password.
+  #kerberos.auth_type: password
+
+  # Path to the keytab file. It is used when auth_type is set to keytab.
+  #kerberos.keytab: /etc/krb5kdc/kafka.keytab
+
+  # Path to the Kerberos configuration.
+  #kerberos.config_path: /etc/path/config
+
+  # The service principal name.
+  #kerberos.service_name: HTTP/my-service@realm
+
+  # Name of the Kerberos user. It is used when auth_type is set to password.
+  #kerberos.username: elastic
+
+  # Password of the Kerberos user. It is used when auth_type is set to password.
+  #kerberos.password: changeme
+
+  # Kerberos realm.
+  #kerberos.realm: ELASTIC
+
+#================================= Paths ==================================
+
+# The home path for the apm-server installation. This is the default base path
+# for all other path settings and for miscellaneous files that come with the
+# distribution.
+# If not set by a CLI flag or in the configuration file, the default for the
+# home path is the location of the binary.
+#path.home:
+
+# The configuration path for the apm-server installation. This is the default
+# base path for configuration files, including the main YAML configuration file
+# and the Elasticsearch template file. If not set by a CLI flag or in the
+# configuration file, the default for the configuration path is the home path.
+#path.config: ${path.home}
+
+# The data path for the apm-server installation. This is the default base path
+# for all the files in which apm-server needs to store its data. If not set by a
+# CLI flag or in the configuration file, the default for the data path is a data
+# subdirectory inside the home path.
+#path.data: ${path.home}/data
+
+# The logs path for an apm-server installation. If not set by a CLI flag or in the
+# configuration file, the default is a logs subdirectory inside the home path.
+#path.logs: ${path.home}/logs
+
+#================================= Logging =================================
+
+# There are three options for the log output: syslog, file, and stderr.
+# Windows systems default to file output. All other systems default to syslog.
+
+# Sets the minimum log level. The default log level is info.
+# Available log levels are: error, warning, info, or debug.
+#logging.level: info
+
+# Enable debug output for selected components. To enable all selectors use ["*"].
+# Other available selectors are "beat", "publish", or "service".
+# Multiple selectors can be chained.
+#logging.selectors: [ ]
+
+# Send all logging output to syslog. The default is false.
+#logging.to_syslog: true
+
+# If enabled, apm-server periodically logs its internal metrics that have changed
+# in the last period. For each metric that changed, the delta from the value at
+# the beginning of the period is logged. Also, the total values for
+# all non-zero internal metrics are logged on shutdown. The default is false.
+#logging.metrics.enabled: false
+
+# The period after which to log the internal metrics. The default is 30s.
+#logging.metrics.period: 30s
+
+# Logging to rotating files. When true, writes all logging output to files.
+# The log files are automatically rotated when the log file size limit is reached.
+#logging.to_files: true
+#logging.files:
+  # Configure the path where the logs are written. The default is the logs directory
+  # under the home path (the binary location).
+  #path: /var/log/apm-server
+
+  # The name of the files where the logs are written to.
+  #name: apm-server
+
+  # Configure log file size limit. If limit is reached, log file will be
+  # automatically rotated.
+  #rotateeverybytes: 10485760 # = 10MB
+
+  # Number of rotated log files to keep. Oldest files will be deleted first.
+  #keepfiles: 7
+
+  # The permissions mask to apply when rotating log files. The default value is 0600.
+  # Must be a valid Unix-style file permissions mask expressed in octal notation.
+  #permissions: 0600
+
+  # Enable log file rotation on time intervals in addition to size-based rotation.
+  # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h
+  # are boundary-aligned with minutes, hours, days, weeks, months, and years as
+  # reported by the local system clock. All other intervals are calculated from the
+  # Unix epoch. Defaults to disabled.
+  #interval: 0
+
+# Set to true to log messages in json format.
+#logging.json: false
+
+# Set to true to log with minimal Elastic Common Schema (ECS) fields set.
+# It is recommended to set `logging.json=true` when enabling ECS logging.
+# Defaults to false.
+#logging.ecs: false
+
+
+#=============================== HTTP Endpoint ===============================
+
+# apm-server can expose internal metrics through a HTTP endpoint. For security
+# reasons the endpoint is disabled by default. This feature is currently experimental.
+# Stats can be access through http://localhost:5066/stats. For pretty JSON output
+# append ?pretty to the URL.
+
+# Defines if the HTTP endpoint is enabled.
+#http.enabled: false
+
+# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
+#http.host: localhost
+
+# Port on which the HTTP endpoint will bind. Default is 5066.
+#http.port: 5066
+
+#============================= X-pack Monitoring =============================
+
+# APM server can export internal metrics to a central Elasticsearch monitoring
+# cluster.  This requires x-pack monitoring to be enabled in Elasticsearch.  The
+# reporting is disabled by default.
+
+# Set to true to enable the monitoring reporter.
+#monitoring.enabled: false
+
+# Most settings from the Elasticsearch output are accepted here as well.
+# Note that these settings should be configured to point to your Elasticsearch *monitoring* cluster.
+# Any setting that is not set is automatically inherited from the Elasticsearch
+# output configuration. This means that if you have the Elasticsearch output configured,
+# you can simply uncomment the following line.
+#monitoring.elasticsearch:
+
+  # Protocol - either `http` (default) or `https`.
+  #protocol: "https"
+
+  # Authentication credentials - either API key or username/password.
+  #api_key: "id:api_key"
+  #username: "elastic"
+  #password: "changeme"
+
+  # Array of hosts to connect to.
+  # Scheme and port can be left out and will be set to the default (`http` and `9200`).
+  # In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
+  # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
+  #hosts: ["localhost:9200"]
+
+  # Set gzip compression level.
+  #compression_level: 0
+
+  # Dictionary of HTTP parameters to pass within the URL with index operations.
+  #parameters:
+    #param1: value1
+    #param2: value2
+
+  # Custom HTTP headers to add to each request.
+  #headers:
+  #  X-My-Header: Contents of the header
+
+  # Proxy server url.
+  #proxy_url: http://proxy:3128
+
+  # The number of times a particular Elasticsearch index operation is attempted. If
+  # the indexing operation doesn't succeed after this many retries, the events are
+  # dropped. The default is 3.
+  #max_retries: 3
+
+  # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
+  # The default is 50.
+  #bulk_max_size: 50
+
+  # The number of seconds to wait before trying to reconnect to Elasticsearch
+  # after a network error. After waiting backoff.init seconds, apm-server
+  # tries to reconnect. If the attempt fails, the backoff timer is increased
+  # exponentially up to backoff.max. After a successful connection, the backoff
+  # timer is reset. The default is 1s.
+  #backoff.init: 1s
+
+  # The maximum number of seconds to wait before attempting to connect to
+  # Elasticsearch after a network error. The default is 60s.
+  #backoff.max: 60s
+
+  # Configure HTTP request timeout before failing an request to Elasticsearch.
+  #timeout: 90
+
+  # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
+  #ssl.enabled: true
+
+  # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
+  # Configure SSL verification mode. If `none` is configured, all server hosts
+  # and certificates will be accepted. In this mode, SSL based connections are
+  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+  # `full`.
+  #ssl.verification_mode: full
+
+  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+  # 1.2 are enabled.
+  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+  # List of root certificates for HTTPS server verifications.
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication.
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Optional passphrase for decrypting the Certificate Key.
+  # It is recommended to use the provided keystore instead of entering the passphrase in plain text.
+  #ssl.key_passphrase: ''
+
+  # Configure cipher suites to be used for SSL connections.
+  #ssl.cipher_suites: []
+
+  # Configure curve types for ECDHE based cipher suites.
+  #ssl.curve_types: []
+
+  # Configure what types of renegotiation are supported. Valid options are
+  # never, once, and freely. Default is never.
+  #ssl.renegotiation: never
+
+  # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
+  #kerberos.enabled: true
+
+  # Authentication type to use with Kerberos. Available options: keytab, password.
+  #kerberos.auth_type: password
+
+  # Path to the keytab file. It is used when auth_type is set to keytab.
+  #kerberos.keytab: /etc/elastic.keytab
+
+  # Path to the Kerberos configuration.
+  #kerberos.config_path: /etc/krb5.conf
+
+  # Name of the Kerberos user.
+  #kerberos.username: elastic
+
+  # Password of the Kerberos user. It is used when auth_type is set to password.
+  #kerberos.password: changeme
+
+  # Kerberos realm.
+  #kerberos.realm: ELASTIC
+
+  #metrics.period: 10s
+  #state.period: 1m
diff --git a/roles/elastic/templates/kibana.yml.j2 b/roles/elastic/templates/kibana.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..d22c9f6e8ff0d0392043f89badc35711e26c00e4
--- /dev/null
+++ b/roles/elastic/templates/kibana.yml.j2
@@ -0,0 +1,116 @@
+# Kibana is served by a back end server. This setting specifies the port to use.
+server.port: {{ kibana_default_port }}
+
+# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
+# The default is 'localhost', which usually means remote machines will not be able to connect.
+# To allow connections from remote users, set this parameter to a non-loopback address.
+#server.host: "localhost"
+server.host: {{ kibana_server_host }}
+
+# Enables you to specify a path to mount Kibana at if you are running behind a proxy.
+# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
+# from requests it receives, and to prevent a deprecation warning at startup.
+# This setting cannot end in a slash.
+# server.basePath: ""
+
+# Specifies whether Kibana should rewrite requests that are prefixed with
+# `server.basePath` or require that they are rewritten by your reverse proxy.
+# This setting was effectively always `false` before Kibana 6.3 and will
+# default to `true` starting in Kibana 7.0.
+#server.rewriteBasePath: false
+
+# The maximum payload size in bytes for incoming server requests.
+#server.maxPayloadBytes: 1048576
+
+# The Kibana server's name.  This is used for display purposes.
+#server.name: "your-hostname"
+
+# The URLs of the Elasticsearch instances to use for all your queries.
+elasticsearch.hosts: ["http://{{ elastic_host }}:9200"]
+
+# When this setting's value is true Kibana uses the hostname specified in the server.host
+# setting. When the value of this setting is false, Kibana uses the hostname of the host
+# that connects to this Kibana instance.
+#elasticsearch.preserveHost: true
+
+# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
+# dashboards. Kibana creates a new index if the index doesn't already exist.
+#kibana.index: ".kibana"
+
+# The default application to load.
+#kibana.defaultAppId: "home"
+
+# If your Elasticsearch is protected with basic authentication, these settings provide
+# the username and password that the Kibana server uses to perform maintenance on the Kibana
+# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
+# is proxied through the Kibana server.
+#elasticsearch.username: "kibana_system"
+#elasticsearch.password: "pass"
+
+# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
+# These settings enable SSL for outgoing requests from the Kibana server to the browser.
+#server.ssl.enabled: false
+#server.ssl.certificate: /path/to/your/server.crt
+#server.ssl.key: /path/to/your/server.key
+
+# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
+# These files are used to verify the identity of Kibana to Elasticsearch and are required when
+# xpack.security.http.ssl.client_authentication in Elasticsearch is set to required.
+#elasticsearch.ssl.certificate: /path/to/your/client.crt
+#elasticsearch.ssl.key: /path/to/your/client.key
+
+# Optional setting that enables you to specify a path to the PEM file for the certificate
+# authority for your Elasticsearch instance.
+#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
+
+# To disregard the validity of SSL certificates, change this setting's value to 'none'.
+#elasticsearch.ssl.verificationMode: full
+
+# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
+# the elasticsearch.requestTimeout setting.
+#elasticsearch.pingTimeout: 1500
+
+# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
+# must be a positive integer.
+#elasticsearch.requestTimeout: 30000
+
+# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
+# headers, set this value to [] (an empty list).
+#elasticsearch.requestHeadersWhitelist: [ authorization ]
+
+# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
+# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
+#elasticsearch.customHeaders: {}
+
+# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
+#elasticsearch.shardTimeout: 30000
+
+# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
+#elasticsearch.startupTimeout: 5000
+
+# Logs queries sent to Elasticsearch. Requires logging.verbose set to true.
+#elasticsearch.logQueries: false
+
+# Specifies the path where Kibana creates the process ID file.
+#pid.file: /var/run/kibana.pid
+
+# Enables you specify a file where Kibana stores log output.
+#logging.dest: stdout
+
+# Set the value of this setting to true to suppress all logging output.
+#logging.silent: false
+
+# Set the value of this setting to true to suppress all logging output other than error messages.
+#logging.quiet: false
+
+# Set the value of this setting to true to log all events, including system usage information
+# and all requests.
+#logging.verbose: false
+
+# Set the interval in milliseconds to sample system and process performance
+# metrics. Minimum is 100ms. Defaults to 5000.
+#ops.interval: 5000
+
+# Specifies locale to be used for all localizable strings, dates and number formats.
+# Supported languages are the following: English - en , by default , Chinese - zh-CN .
+#i18n.locale: "en"
diff --git a/roles/metricbeat/defaults/main.yml b/roles/metricbeat/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f3a01583e783f8e6d5b8617f9646486e79fb3713
--- /dev/null
+++ b/roles/metricbeat/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+elastic_host: localhost
+elastic_port: 9200
+kibana_server_host: localhost
+
+...
diff --git a/roles/metricbeat/handlers/main.yml b/roles/metricbeat/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5d576b93bdc9bac8372926d851fff7227e5059b9
--- /dev/null
+++ b/roles/metricbeat/handlers/main.yml
@@ -0,0 +1,7 @@
+---
+- name: restart metricbeat
+  service:
+    name: metricbeat
+    state: restarted
+
+...
diff --git a/roles/metricbeat/tasks/main.yml b/roles/metricbeat/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4f02198b264a43181052fb7d2ad2561579b06d41
--- /dev/null
+++ b/roles/metricbeat/tasks/main.yml
@@ -0,0 +1,38 @@
+---
+- name: install apt-transport-https
+  apt:
+    name: apt-transport-https
+    state: latest
+
+- name: install elastic GPG key
+  apt_key:
+    url: https://artifacts.elastic.co/GPG-KEY-elasticsearch
+    state: present
+
+- name: install elastic repository
+  apt_repository:
+    repo: deb https://artifacts.elastic.co/packages/7.x/apt stable main
+
+- name: install metricbeat
+  apt:
+    name: metricbeat
+    state: latest
+
+- name: install metricbeat configuration
+  template:
+    src: metricbeat.yml.j2
+    dest: /etc/metricbeat/metricbeat.yml
+  notify: restart metricbeat
+
+- name: enable metricbeat dashboard
+  command: metricbeat setup
+  when: inventory_hostname == groups['mediaserver'][0]
+
+- name: enable sql metricbeat configuration
+  template:
+    src: postgresql.yml.j2
+    dest: /etc/metricbeat/modules.d/postgresql.yml
+  when: "'postgres' in group_names"
+  notify: restart metricbeat
+
+...
diff --git a/roles/metricbeat/templates/metricbeat.yml.j2 b/roles/metricbeat/templates/metricbeat.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..26a6e18c6fd4971461c72864ac0805f8295d8bac
--- /dev/null
+++ b/roles/metricbeat/templates/metricbeat.yml.j2
@@ -0,0 +1,166 @@
+###################### Metricbeat Configuration Example #######################
+
+# This file is an example configuration file highlighting only the most common
+# options. The metricbeat.reference.yml file from the same directory contains all the
+# supported options with more comments. You can use it as a reference.
+#
+# You can find the full configuration reference here:
+# https://www.elastic.co/guide/en/beats/metricbeat/index.html
+
+# =========================== Modules configuration ============================
+
+metricbeat.config.modules:
+  # Glob pattern for configuration loading
+  path: ${path.config}/modules.d/*.yml
+
+  # Set to true to enable config reloading
+  reload.enabled: false
+
+  # Period on which files under path should be checked for changes
+  #reload.period: 10s
+
+# ======================= Elasticsearch template setting =======================
+
+setup.template.settings:
+  index.number_of_shards: 1
+  index.codec: best_compression
+  #_source.enabled: false
+
+
+# ================================== General ===================================
+
+# The name of the shipper that publishes the network data. It can be used to group
+# all the transactions sent by a single shipper in the web interface.
+#name:
+
+# The tags of the shipper are included in their own field with each
+# transaction published.
+#tags: ["service-X", "web-tier"]
+
+# Optional fields that you can specify to add additional information to the
+# output.
+#fields:
+#  env: staging
+
+# ================================= Dashboards =================================
+# These settings control loading the sample dashboards to the Kibana index. Loading
+# the dashboards is disabled by default and can be enabled either by setting the
+# options here or by using the `setup` command.
+#setup.dashboards.enabled: false
+
+# The URL from where to download the dashboards archive. By default this URL
+# has a value which is computed based on the Beat name and version. For released
+# versions, this URL points to the dashboard archive on the artifacts.elastic.co
+# website.
+#setup.dashboards.url:
+
+# =================================== Kibana ===================================
+
+# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
+# This requires a Kibana endpoint configuration.
+setup.kibana:
+
+  # Kibana Host
+  # Scheme and port can be left out and will be set to the default (http and 5601)
+  # In case you specify and additional path, the scheme is required: http://localhost:5601/path
+  # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
+  host: "{{ kibana_server_host }}:5601"
+
+  # Kibana Space ID
+  # ID of the Kibana Space into which the dashboards should be loaded. By default,
+  # the Default Space will be used.
+  #space.id:
+
+# =============================== Elastic Cloud ================================
+
+# These settings simplify using Metricbeat with the Elastic Cloud (https://cloud.elastic.co/).
+
+# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
+# `setup.kibana.host` options.
+# You can find the `cloud.id` in the Elastic Cloud web UI.
+#cloud.id:
+
+# The cloud.auth setting overwrites the `output.elasticsearch.username` and
+# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
+#cloud.auth:
+
+# ================================== Outputs ===================================
+
+# Configure what output to use when sending the data collected by the beat.
+
+# ---------------------------- Elasticsearch Output ----------------------------
+output.elasticsearch:
+  # Array of hosts to connect to.
+  hosts: ["{{ elastic_host }}:{{ elastic_port }}"]
+
+  # Protocol - either `http` (default) or `https`.
+  #protocol: "https"
+
+  # Authentication credentials - either API key or username/password.
+  #api_key: "id:api_key"
+  #username: "elastic"
+  #password: "changeme"
+
+# ------------------------------ Logstash Output -------------------------------
+#output.logstash:
+  # The Logstash hosts
+  #hosts: ["localhost:5044"]
+
+  # Optional SSL. By default is off.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+# ================================= Processors =================================
+
+# Configure processors to enhance or manipulate events generated by the beat.
+
+processors:
+  - add_host_metadata: ~
+  - add_cloud_metadata: ~
+  - add_docker_metadata: ~
+  - add_kubernetes_metadata: ~
+
+
+# ================================== Logging ===================================
+
+# Sets log level. The default log level is info.
+# Available log levels are: error, warning, info, debug
+#logging.level: debug
+
+# At debug level, you can selectively enable logging only for some components.
+# To enable all selectors use ["*"]. Examples of other selectors are "beat",
+# "publish", "service".
+#logging.selectors: ["*"]
+
+# ============================= X-Pack Monitoring ==============================
+# Metricbeat can export internal metrics to a central Elasticsearch monitoring
+# cluster.  This requires xpack monitoring to be enabled in Elasticsearch.  The
+# reporting is disabled by default.
+
+# Set to true to enable the monitoring reporter.
+#monitoring.enabled: false
+
+# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
+# Metricbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
+# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
+#monitoring.cluster_uuid:
+
+# Uncomment to send the metrics to Elasticsearch. Most settings from the
+# Elasticsearch output are accepted here as well.
+# Note that the settings should point to your Elasticsearch *monitoring* cluster.
+# Any setting that is not set is automatically inherited from the Elasticsearch
+# output configuration, so if you have the Elasticsearch output configured such
+# that it is pointing to your Elasticsearch monitoring cluster, you can simply
+# uncomment the following line.
+#monitoring.elasticsearch:
+
+# ================================= Migration ==================================
+
+# This allows to enable 6.7 migration aliases
+#migration.6_to_7.enabled: true
\ No newline at end of file
diff --git a/roles/metricbeat/templates/postgresql.yml.j2 b/roles/metricbeat/templates/postgresql.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..ee3a33a53b20947f43ecfea5ea1c8007b3883361
--- /dev/null
+++ b/roles/metricbeat/templates/postgresql.yml.j2
@@ -0,0 +1,12 @@
+# Module: postgresql
+# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.8/metricbeat-module-postgresql.html
+
+- module: postgresql
+  #metricsets:
+  #  - database
+  #  - bgwriter
+  #  - activity
+  period: 10s
+  hosts: ["postgres://localhost:5432"]
+  username: {{ metricbeat_pgsql_username }}
+  password: {{ metricbeat_pgsql_pass }}