Skip to content
Snippets Groups Projects
Commit 6d2184a9 authored by Stéphane Diemer's avatar Stéphane Diemer
Browse files

Merge branch 't32853-ovh_cloud_provisioning' into 'master'

See merge request mediaserver/envsetup!25
parents 2f5addd9 783a826d
No related branches found
No related tags found
No related merge requests found
Showing
with 902 additions and 49 deletions
...@@ -8,5 +8,7 @@ exclude_paths: ...@@ -8,5 +8,7 @@ exclude_paths:
skip_list: skip_list:
- '701' - '701'
- '403' - '403'
- '208'
- '106'
... ...
...@@ -6,6 +6,9 @@ stages: ...@@ -6,6 +6,9 @@ stages:
- docker - docker
- lint - lint
- test - test
- test-pgsql-ha
- test-mediaserver-ha
- deploy
before_script: before_script:
- make install-dev - make install-dev
...@@ -64,9 +67,45 @@ test: ...@@ -64,9 +67,45 @@ test:
script: script:
- make test - make test
.preprod-ovh:
image: registry.ubicast.net/mediaserver/envsetup:root
stage: deploy
tags:
- docker
before_script:
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- mkdir -p ~/.ssh
- chmod 700 ~/.ssh
- make install-dev
- git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@git.ubicast.net/mediaserver/envsetup-extra
start-ovh-preprod:
extends: .preprod-ovh
environment:
name: preproduction
url: https://test-ms.ubicast.video
on_stop: stop-ovh-preprod
auto_stop_in: 30 minutes
rules:
- if: '$CI_PIPELINE_SOURCE == "web"'
- if: '$CI_PIPELINE_SOURCE == "schedules"'
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
script:
- make start-preprod-ovh
stop-ovh-preprod:
extends: .preprod-ovh
environment:
name: preproduction
action: stop
when: manual
script:
- make stop-preprod-ovh
test-ha-pgsql: test-ha-pgsql:
image: registry.ubicast.net/mediaserver/envsetup:root image: registry.ubicast.net/mediaserver/envsetup:root
stage: test stage: test-pgsql-ha
tags: tags:
- docker - docker
rules: rules:
...@@ -78,4 +117,18 @@ test-ha-pgsql: ...@@ -78,4 +117,18 @@ test-ha-pgsql:
script: script:
- make test ha-pgsql=1 - make test ha-pgsql=1
test-mediaserver-ha:
image: registry.ubicast.net/mediaserver/envsetup:root
stage: test-mediaserver-ha
tags:
- docker
rules:
- if: '$CI_PIPELINE_SOURCE == "web"'
- if: '$CI_PIPELINE_SOURCE == "merge_requests"'
- if: '$CI_PIPELINE_SOURCE == "push"'
changes:
- ansible/**/*
script:
- make test mediaserver-ha=1
... ...
...@@ -22,5 +22,8 @@ rules: ...@@ -22,5 +22,8 @@ rules:
truthy: truthy:
level: warning level: warning
line-length: disable line-length: disable
indentation:
spaces: consistent
indent-sequences: true
check-multi-line-strings: false
... ...
...@@ -6,8 +6,8 @@ PIP_BIN = $(shell command -v $(VENV)/bin/pip3 || command -v pip3 || echo pip3) ...@@ -6,8 +6,8 @@ PIP_BIN = $(shell command -v $(VENV)/bin/pip3 || command -v pip3 || echo pip3)
PIP_COMPILE_BIN = $(shell command -v $(VENV)/bin/pip-compile || command -v pip-compile) PIP_COMPILE_BIN = $(shell command -v $(VENV)/bin/pip-compile || command -v pip-compile)
ANSIBLE_BIN = $(shell command -v ansible || command -v $(VENV)/bin/ansible) ANSIBLE_BIN = $(shell command -v ansible || command -v $(VENV)/bin/ansible)
ANSIBLE_PLAYBOOK_BIN = $(shell command -v ansible-playbook || command -v $(VENV)/bin/ansible-playbook) ANSIBLE_PLAYBOOK_BIN = $(shell command -v ansible-playbook || command -v $(VENV)/bin/ansible-playbook)
ANSIBLE_GALAXY_BIN = $(shell command -v ansible-galaxy || command -v $(VENV)/bin/ansible-galaxy)
ANSIBLE_LINT_BIN = $(shell command -v ansible-lint || command -v $(VENV)/bin/ansible-lint) ANSIBLE_LINT_BIN = $(shell command -v ansible-lint || command -v $(VENV)/bin/ansible-lint)
ANSIBLE_GALAXY_BIN = $(shell command -v ansible-galaxy || command -v $(VENV)/bin/ansible-galaxy || echo ansible-galaxy)
YAMLLINT_BIN = $(shell command -v yamllint || command -v $(VENV)/bin/yamllint) YAMLLINT_BIN = $(shell command -v yamllint || command -v $(VENV)/bin/yamllint)
FLAKE8_BIN = $(shell command -v flake8 || command -v $(VENV)/bin/flake8) FLAKE8_BIN = $(shell command -v flake8 || command -v $(VENV)/bin/flake8)
...@@ -21,6 +21,9 @@ endif ...@@ -21,6 +21,9 @@ endif
ifdef ha-pgsql ifdef ha-pgsql
MOLECULE_TEST_FLAGS += --scenario-name ha-pgsql MOLECULE_TEST_FLAGS += --scenario-name ha-pgsql
endif endif
ifdef mediaserver-ha
MOLECULE_TEST_FLAGS += --scenario-name mediaserver-ha
endif
.PHONY: all .PHONY: all
## TARGET: DESCRIPTION: ARGS ## TARGET: DESCRIPTION: ARGS
...@@ -43,9 +46,10 @@ venv: ...@@ -43,9 +46,10 @@ venv:
install: venv install: venv
$(PIP_BIN) install -U pip wheel $(PIP_BIN) install -U pip wheel
$(PIP_BIN) install -r ansible/requirements.txt $(PIP_BIN) install -r ansible/requirements.txt
## ANSIBLE_GALAXY_BIN := $(if $(ANSIBLE_GALAXY_BIN),$(ANSIBLE_GALAXY_BIN),$(shell command -v ansible-galaxy || command -v $(VENV)/bin/ansible-galaxy))
## $(eval override ANSIBLE_GALAXY_BIN=$(shell command -v ansible-galaxy || command -v $(VENV)/bin/ansible-galaxy))
ANSIBLE_CONFIG=$(ANSIBLE_CONFIG) $(ANSIBLE_GALAXY_BIN) install -r ansible/requirements.yml ANSIBLE_CONFIG=$(ANSIBLE_CONFIG) $(ANSIBLE_GALAXY_BIN) install -r ansible/requirements.yml
.PHONY: install-dev .PHONY: install-dev
## install-dev: Install development requirements ## install-dev: Install development requirements
install-dev: install install-dev: install
...@@ -63,11 +67,18 @@ lint: ...@@ -63,11 +67,18 @@ lint:
.PHONY: test .PHONY: test
## test: Run development tests on the project : debug=1, keep=1, SKYREACH_SYSTEM_KEY=<xxx>, ha-pgsql=1 ## test: Run development tests on the project : debug=1, keep=1, SKYREACH_SYSTEM_KEY=<xxx>, ha-pgsql=1
test: test:
ifndef SKYREACH_SYSTEM_KEY
$(error SKYREACH_SYSTEM_KEY is undefined)
endif
cd ansible; molecule $(MOLECULE_FLAGS) test $(MOLECULE_TEST_FLAGS) cd ansible; molecule $(MOLECULE_FLAGS) test $(MOLECULE_TEST_FLAGS)
.PHONY: start-preprod-ovh
## start-preprod-ovh: launch ansible to deploy OVH preprod environment
start-preprod-ovh:
ANSIBLE_CONFIG=$(ANSIBLE_CONFIG) $(ANSIBLE_PLAYBOOK_BIN) -i envsetup-extra/inventories/preprod ansible/playbooks/site_ovh.yml
.PHONY: stop-preprod-ovh
## stop-preprod-ovh: launch ansible to deploy OVH preprod environment
stop-preprod-ovh:
ANSIBLE_CONFIG=$(ANSIBLE_CONFIG) $(ANSIBLE_PLAYBOOK_BIN) -i envsetup-extra/inventories/preprod ansible/playbooks/site_ovh.yml -t force-delete
.PHONY: deploy .PHONY: deploy
## deploy: Run deployment playbooks : i=<inventory-path>, l=<host-or-group>, t=<tag> ## deploy: Run deployment playbooks : i=<inventory-path>, l=<host-or-group>, t=<tag>
deploy: deploy:
......
...@@ -13,26 +13,5 @@ hap_config_listen: ...@@ -13,26 +13,5 @@ hap_config_listen:
maxconn 500 maxconn 500
server pg1 192.168.122.1:5432 maxconn 500 check port 8543 server pg1 192.168.122.1:5432 maxconn 500 check port 8543
server pg2 192.168.122.2:5432 maxconn 500 check port 8543 backup server pg2 192.168.122.2:5432 maxconn 500 check port 8543 backup
- name: pgsql-standby
content: |2
bind localhost:54322
default-server inter 2s fall 3 rise 2 on-marked-down shutdown-sessions
option tcp-check
tcp-check expect rstring (primary|standby)
maxconn 500
server pg1 192.168.122.1:5432 maxconn 500 check port 8543
server pg2 192.168.122.2:5432 maxconn 500 check port 8543
- name: wowza
content: |2
bind :1935
stick-table type ip size 1
stick on dst
default-server inter 2s fall 3 rise 2 on-marked-down shutdown-sessions
option httpchk
maxconn 500
server ms1 10.0.0.1:19350 maxconn 500 check port 19350
server ms2 10.0.0.2:19350 maxconn 500 check port 19350 backup
wowza_rtmp_port: 19350
... ...
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import quote_plus
DOCUMENTATION = '''
---
module: ovh_loadbalancer
short_description: Manage OVH LoadBalancer
description:
- Manage for OVH (French European hosting provider) LoadBalancer.
author: "Emmanuel Cohen (@atmaniak)"
notes:
- Uses the python OVH Api U(https://github.com/ovh/python-ovh).
You have to create an application (a key and secret) with a consummer
key as described into U(https://eu.api.ovh.com/g934.first_step_with_api)
requirements:
- ovh >= 0.4.8
options:
loadbalancerid:
required: true
description:
- Id of loadbalancer to manage
refresh:
required: true
choices: ['true', 'false']
description:
- Refresh the loadbalancer configuration
timeout:
required: false
default: 120
description:
- Time to wait for the refresh to complete
endpoint:
required: false
default: ovh-eu
description:
- The endpoint to use (for instance ovh-eu)
application_key:
required: true
description:
- The applicationKey to use
application_secret:
required: true
description:
- The application secret to use
consumer_key:
required: true
description:
- The consumer key to use
'''
EXAMPLES = '''
# Refresh LoadBalancer 1234
- ovh_loadbalancer:
name: 1234
refresh: true
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
'''
RETURN = '''
'''
import time # noqa: E402
try:
import ovh
import ovh.exceptions
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def getOvhClient(ansibleModule):
endpoint = ansibleModule.params.get('endpoint')
application_key = ansibleModule.params.get('application_key')
application_secret = ansibleModule.params.get('application_secret')
consumer_key = ansibleModule.params.get('consumer_key')
return ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key
)
def waitForNoTask(client, loadbalancerid, timeout):
currentTimeout = timeout
while client.get('/ipLoadbalancing/{0}/task'.format(quote_plus(loadbalancerid)),
action='refreshIplb',
status='todo'):
time.sleep(1) # Delay for 1 sec
currentTimeout -= 1
if currentTimeout < 0:
return False
return True
def waitForTaskDone(client, loadbalancerid, taskId, timeout):
currentTimeout = timeout
while True:
task = client.get('/ipLoadbalancing/{0}/task/{1}'.format(quote_plus(loadbalancerid), taskId))
if task['status'] == 'done':
return True
time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API
currentTimeout -= 5
if currentTimeout < 0:
return False
return True
def main():
module = AnsibleModule(
argument_spec=dict(
loadbalancerid=dict(required=True),
refresh=dict(type='bool', default=True),
timeout=dict(default=120, type='int'),
endpoint=dict(default='ovh-eu'),
wait_completion=dict(default=True, type='bool'),
wait_task_completion=dict(default=0, type='int'),
application_key=dict(required=True, no_log=True),
application_secret=dict(required=True, no_log=True),
consumer_key=dict(required=True, no_log=True),
),
supports_check_mode=True
)
result = dict(
changed=False
)
if not HAS_OVH:
module.fail_json(msg='ovh-api python module is required to run this module ')
# Get parameters
loadbalancerid = module.params.get('loadbalancerid')
refresh = module.params.get('refresh')
timeout = module.params.get('timeout')
wait_completion = module.params.get('wait_completion')
wait_task_completion = module.params.get('wait_task_completion')
# Connect to OVH API
client = getOvhClient(module)
# Check that the load balancer exists
try:
client.get('/ipLoadbalancing/{0}'.format(quote_plus(loadbalancerid)))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of ips, '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was0 : {0}'.format(apiError))
# Check that no task is pending before going on
try:
if not waitForNoTask(client, loadbalancerid, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for no pending '
'tasks before executing the module '.format(timeout))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of pending tasks '
'of the ip, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was1 : {0}'
.format(apiError))
if not module.check_mode and refresh:
try:
if wait_task_completion == 0:
task = client.post('/ipLoadbalancing/{}/refresh'.format(loadbalancerid))
taskId = task['id']
result['changed'] = True
else:
# Just wait for the given taskId to be completed
taskId = wait_task_completion
result['TaskId'] = taskId
if wait_completion or wait_task_completion != 0:
if not waitForTaskDone(client, loadbalancerid, taskId, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion '
'of move ip to service'.format(timeout))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the object '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was2222 : {0}'.format(apiError))
module.exit_json(**result)
if __name__ == '__main__':
main()
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: ovh_loadbalancer_farm_server
short_description: Manage farm server for OVH LoadBalancer
description:
- Manage farm server for OVH (French European hosting provider) LoadBalancer.
author: "Emmanuel Cohen (@atmaniak)"
notes:
- Uses the python OVH Api U(https://github.com/ovh/python-ovh).
You have to create an application (a key and secret) with a consummer
key as described into U(https://eu.api.ovh.com/g934.first_step_with_api)
requirements:
- ovh >= 0.4.8
options:
name:
required: true
description:
- Name of the server to add
loadbalancerid:
required: true
description:
- id of OVH loadbalancer to manage
farmid:
required: true
description:
- The name of farm to manage
address:
required: true
description:
- The destination IP address to manage
port:
required: true
description:
- Destination port
ssl:
required: false
description:
- Enable or not the SSL certificate
active:
required: true
choices: ['tcp', 'http', 'udp']
description:
- Enable or not
state:
required: true
default: present
choices: ['present', 'absent']
description:
- Determines wether the record is to be created/modified or deleted
farmtype:
required: true
default: tcp
choices: ['tcp', 'http', 'udp']
description:
- Farmtype
endpoint:
required: false
default: ovh-eu
description:
- The endpoint to use (for instance ovh-eu)
application_key:
required: true
description:
- The applicationKey to use
application_secret:
required: true
description:
- The application secret to use
consumer_key:
required: true
description:
- The consumer key to use
'''
EXAMPLES = '''
# Add 127.0.0.1 to farm server id 1234
- ovh_loadbalancer_farm_server:
name: dislay_name
loadbalancerid: 1234
farmid: 1234
address: 127.0.0.1
port: 443
status: active
farmtype: http
ssl: yes
state: present
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
'''
RETURN = '''
'''
try:
import ovh
import ovh.exceptions
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def getOvhClient(ansibleModule):
endpoint = ansibleModule.params.get('endpoint')
application_key = ansibleModule.params.get('application_key')
application_secret = ansibleModule.params.get('application_secret')
consumer_key = ansibleModule.params.get('consumer_key')
return ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key
)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
loadbalancerid=dict(required=True),
farmid=dict(required=True, type='int'),
address=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
farmtype=dict(default='tcp', choices=['tcp', 'http', 'udp']),
probe=dict(default='false'),
weight=dict(default='0'),
backup=dict(default='false'),
port=dict(default='', type='int'),
status=dict(default='active', choices=['active', 'inactive']),
ssl=dict(type='bool', default=False),
endpoint=dict(default='ovh-eu'),
application_key=dict(required=True, no_log=True),
application_secret=dict(required=True, no_log=True),
consumer_key=dict(required=True, no_log=True),
),
supports_check_mode=True
)
result = dict(
changed=False
)
if not HAS_OVH:
module.fail_json(msg='ovh-api python module is required to run this module ')
# Get parameters
name = module.params.get('name')
loadbalancerid = module.params.get('loadbalancerid')
farmid = module.params.get('farmid')
farmtype = module.params.get('farmtype')
address = module.params.get('address')
state = module.params.get('state')
weight = module.params.get('weight')
ssl = module.params.get('ssl')
port = module.params.get('port')
status = module.params.get('status')
# Connect to OVH API
client = getOvhClient(module)
# Check that the LoadBalancer & farm exists
try:
client.get('/ipLoadbalancing/{}/{}/farm/{}'.format(loadbalancerid, farmtype, farmid))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the object '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was : {0}'.format(apiError))
farm_servers = client.get('/ipLoadbalancing/{}/{}/farm/{}/server'.format(loadbalancerid, farmtype, farmid))
defined_server = dict({})
for server in farm_servers:
myserver = client.get('/ipLoadbalancing/{}/{}/farm/{}/server/{}'.format(loadbalancerid, farmtype, farmid, server))
defined_server[myserver['address']] = myserver
# module.fail_json(msg='{}'.format(type(defined_server[myserver['address']])))
need_update = False
already_exists = False
if address in defined_server:
already_exists = True
if defined_server[address]['displayName'] != name:
need_update = True
if not module.check_mode and already_exists and state == "absent":
try:
server_to_delete = defined_server[address]["serverId"]
print("action: delete")
client.delete('/ipLoadbalancing/{}/{}/farm/{}/server/{}'.format(loadbalancerid, farmtype, farmid, server_to_delete))
result['changed'] = True
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for removing the object '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was0 : {0}'.format(apiError))
if not module.check_mode and need_update and state == "present":
try:
client.put('/ipLoadbalancing/{}/{}/farm/{}/server/{}'.format(loadbalancerid, farmtype, farmid, defined_server[address]["serverId"]), address=address, port=port, weight=weight, ssl=ssl, status=status, displayName=name)
result['changed'] = True
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for updating the object '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was1 : {0}'.format(apiError))
if not module.check_mode and not need_update and not already_exists and state == "present":
try:
client.post('/ipLoadbalancing/{}/{}/farm/{}/server'.format(loadbalancerid, farmtype, farmid), address=address, port=port, weight=weight, ssl=ssl, status=status, displayName=name)
result['changed'] = True
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for creating the object '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was2 : {0}{1}'.format(apiError, need_update))
module.exit_json(**result)
if __name__ == '__main__':
main()
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: ovh_nasha
short_description: Manage OVH NAS-HA ACL
description:
- Manage OVH (French European hosting provider) NAS-HA ACL.
author: "Emmanuel Cohen (@atmaniak)"
notes:
- Uses the python OVH Api U(https://github.com/ovh/python-ovh).
You have to create an application (a key and secret) with a consummer
key as described into U(https://eu.api.ovh.com/g934.first_step_with_api)
requirements:
- ovh >= 0.4.8
options:
name:
required: true
description:
- Name of the zpoool to manage
partition:
required: true
description:
- The name of partition inside zpool
ip:
required: true
description:
- The IP address to register in ACL
state:
required: true
default: present
choices: ['present', 'absent']
description:
- Determines wether the record is to be created/modified or deleted
endpoint:
required: false
default: ovh-eu
description:
- The endpoint to use (for instance ovh-eu)
application_key:
required: true
description:
- The applicationKey to use
application_secret:
required: true
description:
- The application secret to use
consumer_key:
required: true
description:
- The consumer key to use
'''
EXAMPLES = '''
# Allow 127.0.0.1 on partition mypartition on zpool-test
- ovh_nasha:
name: zpool-test
partition: mypartition
ip: 127.0.0.1
state: present
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
# Remove 127.0.0.1 on partition mypartition on zpool-test
- ovh_nasha:
name: zpool-test
partition: mypartition
ip: 127.0.0.1
state: absent
endpoint: ovh-eu
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
'''
RETURN = '''
'''
try:
import ovh
import ovh.exceptions
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def getOvhClient(ansibleModule):
endpoint = ansibleModule.params.get('endpoint')
application_key = ansibleModule.params.get('application_key')
application_secret = ansibleModule.params.get('application_secret')
consumer_key = ansibleModule.params.get('consumer_key')
return ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key
)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
partition=dict(required=True),
ip=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
endpoint=dict(default='ovh-eu'),
application_key=dict(required=True, no_log=True),
application_secret=dict(required=True, no_log=True),
consumer_key=dict(required=True, no_log=True),
),
supports_check_mode=True
)
result = dict(
changed=False
)
if not HAS_OVH:
module.fail_json(msg='ovh-api python module is required to run this module ')
# Get parameters
name = module.params.get('name')
partition = module.params.get('partition')
ip = module.params.get('ip')
state = module.params.get('state')
# Connect to OVH API
client = getOvhClient(module)
# Check that the NAS HA and partition exists exists
try:
nasha = client.get('/dedicated/nasha/{}/partition'.format(name))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the this partition '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was : {0}'.format(apiError))
if partition not in nasha:
module.fail_json(msg='partition {0} does not exist on {1}'.format(partition, name))
# Check that IP is not already allowed.
try:
allowed_ips = client.get('/dedicated/nasha/{}/partition/{}/access'.format(name, partition))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the properties '
'of the ip, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
# Allow IP if state present.
if '{0}/32'.format(ip) not in allowed_ips and state == "present":
if not module.check_mode:
try:
client.post('/dedicated/nasha/{}/partition/{}/access'.format(name, partition), ip=ip)
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the properties '
'of the ip, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
result['changed'] = True
# Remove IP if state absent.
if '{0}/32'.format(ip) in allowed_ips and state == "absent":
if not module.check_mode:
try:
client.delete('/dedicated/nasha/{}/partition/{}/access/{}'.format(name, partition, ip))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the properties '
'of the ip, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
...@@ -20,7 +20,7 @@ def test_celerity_config(host): ...@@ -20,7 +20,7 @@ def test_celerity_config(host):
assert f.exists assert f.exists
assert f.contains("SIGNING_KEY =") assert f.contains("SIGNING_KEY =")
assert f.contains("SERVER_URL =") assert f.contains("SERVER_URL =")
assert f.contains("WORKERS_COUNT =") assert f.contains("QUEUES_PER_WORKER =")
def test_celerity_service(host): def test_celerity_service(host):
......
...@@ -32,7 +32,7 @@ def test_postgres_service(host): ...@@ -32,7 +32,7 @@ def test_postgres_service(host):
assert s.is_running assert s.is_running
def test_celerity_socket(host): def test_postgres_socket(host):
s = host.socket("tcp://127.0.0.1:5432") s = host.socket("tcp://127.0.0.1:5432")
assert s.is_listening assert s.is_listening
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
driver: driver:
name: docker name: docker
platforms: platforms:
- name: db0-default - name: db0-${CI_PIPELINE_ID:-default}
image: registry.ubicast.net/docker/debian-systemd:buster image: registry.ubicast.net/docker/debian-systemd:buster
command: /lib/systemd/systemd command: /lib/systemd/systemd
privileged: true privileged: true
...@@ -13,7 +13,7 @@ platforms: ...@@ -13,7 +13,7 @@ platforms:
- /run - /run
groups: groups:
- postgres - postgres
- name: db1-default - name: db1-${CI_PIPELINE_ID:-default}
image: registry.ubicast.net/docker/debian-systemd:buster image: registry.ubicast.net/docker/debian-systemd:buster
command: /lib/systemd/systemd command: /lib/systemd/systemd
privileged: true privileged: true
...@@ -24,7 +24,7 @@ platforms: ...@@ -24,7 +24,7 @@ platforms:
- /run - /run
groups: groups:
- postgres - postgres
- name: db2-default - name: db2-${CI_PIPELINE_ID:-default}
image: registry.ubicast.net/docker/debian-systemd:buster image: registry.ubicast.net/docker/debian-systemd:buster
command: /lib/systemd/systemd command: /lib/systemd/systemd
privileged: true privileged: true
...@@ -49,8 +49,8 @@ provisioner: ...@@ -49,8 +49,8 @@ provisioner:
ANSIBLE_ACTION_PLUGINS: ../../plugins/action ANSIBLE_ACTION_PLUGINS: ../../plugins/action
ANSIBLE_PYTHON_INTERPRETER: /usr/bin/python3 ANSIBLE_PYTHON_INTERPRETER: /usr/bin/python3
SKYREACH_SYSTEM_KEY: s1121eb6e7593525bf3e0302586c82d2 SKYREACH_SYSTEM_KEY: s1121eb6e7593525bf3e0302586c82d2
verifier: verifier:
name: testinfra name: testinfra
options: # options:
verbose: true # verbose: true
# s: true
...@@ -11,12 +11,12 @@ testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLEC ...@@ -11,12 +11,12 @@ testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLEC
def test_postgresql_check_repmgr_status(host): def test_postgresql_check_repmgr_status(host):
''' check if repmgr is working correctly on each node''' ''' check if repmgr is working correctly on each node'''
if host.ansible.get_variables()["inventory_hostname"].startswith("db0-default"): if host.ansible.get_variables()["inventory_hostname"].startswith("db0"):
data = commons.get_status(host) data = commons.get_status(host)
assert data == "primary" assert data == "primary"
if host.ansible.get_variables()["inventory_hostname"].startswith("db1-default"): if host.ansible.get_variables()["inventory_hostname"].startswith("db1"):
data = commons.get_status(host) data = commons.get_status(host)
assert data == "standby" assert data == "standby"
if host.ansible.get_variables()["inventory_hostname"].startswith("db2-default"): if host.ansible.get_variables()["inventory_hostname"].startswith("db2"):
data = commons.get_status(host) data = commons.get_status(host)
assert data == "witness" assert data == "witness"
...@@ -3,7 +3,8 @@ import os ...@@ -3,7 +3,8 @@ import os
import testinfra.utils.ansible_runner import testinfra.utils.ansible_runner
# This test run accross all servers # This test run accross all servers
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("db0-default") hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("postgres")
testinfra_hosts = [i for i in hosts if i.startswith('db0')]
def test_postgresql_create_db(host): def test_postgresql_create_db(host):
......
...@@ -3,7 +3,8 @@ import os ...@@ -3,7 +3,8 @@ import os
import testinfra.utils.ansible_runner import testinfra.utils.ansible_runner
# This test run accross all servers # This test run accross all servers
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("db1-default") hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("postgres")
testinfra_hosts = [i for i in hosts if i.startswith('db1')]
def test_postgresql_create_db(host): def test_postgresql_create_db(host):
......
...@@ -5,7 +5,8 @@ import testinfra.utils.ansible_runner ...@@ -5,7 +5,8 @@ import testinfra.utils.ansible_runner
import time import time
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("db0-default") hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("postgres")
testinfra_hosts = [i for i in hosts if i.startswith('db0')]
def test_postgresql_check_shutdown_primary(host): def test_postgresql_check_shutdown_primary(host):
......
...@@ -3,7 +3,8 @@ import os ...@@ -3,7 +3,8 @@ import os
import testinfra.utils.ansible_runner import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("db1-default") hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("postgres")
testinfra_hosts = [i for i in hosts if i.startswith('db1')]
def test_postgresql_insert_new_master(host): def test_postgresql_insert_new_master(host):
......
...@@ -3,7 +3,10 @@ import os ...@@ -3,7 +3,10 @@ import os
import testinfra.utils.ansible_runner import testinfra.utils.ansible_runner
import time import time
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("db0-default") hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_hosts("postgres")
testinfra_hosts = [i for i in hosts if i.startswith('db0')]
def test_postgresql_delete_data(host): def test_postgresql_delete_data(host):
...@@ -16,9 +19,13 @@ def test_postgresql_delete_data(host): ...@@ -16,9 +19,13 @@ def test_postgresql_delete_data(host):
def test_postgresql_launch_repmgr_sync(host): def test_postgresql_launch_repmgr_sync(host):
''' sync data with primary server using repmgr ''' ''' sync data with primary server using repmgr '''
current_master = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_host("db1-default") current_m = [i for i in hosts if i.startswith('db1')]
# print("current master" + current_m[0])
current_master = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_host(current_m[0])
# current_master = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLECULE_INVENTORY_FILE"]).get_host("db1-default")
# print(current_master)
current_master_ip = current_master.interface('eth0').addresses[0] current_master_ip = current_master.interface('eth0').addresses[0]
rep_mgr_command = "repmgr -f /etc/postgresql/11/main/repmgr.conf --force --verbose standby clone -h " + current_master_ip + " -d repmgr -U repmgr -c" rep_mgr_command = "repmgr -f /etc/postgresql/11/main/repmgr.conf --force --verbose standby clone -h " + str(current_master_ip) + " -d repmgr -U repmgr -c"
s = host.ansible("command", rep_mgr_command, become=True, become_user='postgres', check=False) s = host.ansible("command", rep_mgr_command, become=True, become_user='postgres', check=False)
assert s['changed'] assert s['changed']
......
...@@ -11,12 +11,12 @@ testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLEC ...@@ -11,12 +11,12 @@ testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ["MOLEC
def test_postgresql_check_status_after_shutdown(host): def test_postgresql_check_status_after_shutdown(host):
''' check repmgr status accross server after primary change and server reintegration ''' ''' check repmgr status accross server after primary change and server reintegration '''
if host.ansible.get_variables()["inventory_hostname"].startswith("db0-default"): if host.ansible.get_variables()["inventory_hostname"].startswith("db0"):
data = commons.get_status(host) data = commons.get_status(host)
assert data == "standby" assert data == "standby"
if host.ansible.get_variables()["inventory_hostname"].startswith("db1-default"): if host.ansible.get_variables()["inventory_hostname"].startswith("db1"):
data = commons.get_status(host) data = commons.get_status(host)
assert data == "primary" assert data == "primary"
if host.ansible.get_variables()["inventory_hostname"].startswith("db2-default"): if host.ansible.get_variables()["inventory_hostname"].startswith("db2"):
data = commons.get_status(host) data = commons.get_status(host)
assert data == "witness" assert data == "witness"
#!/usr/bin/env ansible-playbook
---
- name: PYTHON
hosts: all
gather_facts: false
tasks:
- name: ensure python3 is installed
register: python_install
changed_when: "'es_pyinstall' in python_install.stdout_lines"
raw: command -v python3 || echo es_pyinstall && apt update && apt install -y python3-minimal python3-apt
- name: Converge
hosts: all
environment:
HOSTALIASES: /etc/hosts.aliases
pre_tasks:
- name:
stat: "path=/etc/hosts.aliases"
register: etc_hosts_debian
- debug:
msg="{{ etc_hosts_debian }}"
- name: add all hosts to hostaliases
loop: "{{ query('inventory_hostnames', 'all:!localhost') }}"
lineinfile:
path: /etc/hosts.aliases
line: "{{ hostvars[item]['ansible_default_ipv4']['address'] }} {{ item }}"
backup: true
create: yes
tags: always
when: not etc_hosts_debian.stat.exists
- name: add all hosts to hostfile
shell: "/bin/cat /etc/hosts.aliases >> /etc/hosts"
when: not etc_hosts_debian.stat.exists
tags: always
- name: check running in a docker container
register: check_if_docker
stat:
path: /.dockerenv
- name: set docker flag variable
set_fact:
in_docker: "{{ check_if_docker.stat.exists | d(false) }}"
post_tasks:
- name: deploy letsencrypt certificate
when: letsencrypt_enabled | d(false)
include_role:
name: letsencrypt
- name: configure network
when: network_apply | d(false)
include_role:
name: network
- name: configure proxy
when: proxy_apply | d(false)
include_role:
name: proxy
- import_playbook: ../../playbooks/site.yml
...
---
driver:
name: docker
platforms:
- name: ms0-${CI_PIPELINE_ID:-default}
environment:
HOSTALIASES: /etc/hosts.aliases
image: registry.ubicast.net/docker/debian-systemd:buster
command: /lib/systemd/systemd
privileged: true
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- data:/data:rw
tmpfs:
- /tmp
- /run
groups:
- mediaserver
- wowza
- celerity
- name: ms1-${CI_PIPELINE_ID:-default}
environment:
HOSTALIASES: /etc/hosts.aliases
image: registry.ubicast.net/docker/debian-systemd:buster
command: /lib/systemd/systemd
privileged: true
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- data:/data:rw
tmpfs:
- /tmp
- /run
groups:
- mediaserver
- wowza
- name: ms2-${CI_PIPELINE_ID:-default}
image: registry.ubicast.net/docker/debian-systemd:buster
environment:
HOSTALIASES: /etc/hosts.aliases
command: /lib/systemd/systemd
privileged: true
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- data:/data:rw
tmpfs:
- /tmp
- /run
groups:
- mediaserver
- wowza
- name: db0-${CI_PIPELINE_ID:-default}
image: registry.ubicast.net/docker/debian-systemd:buster
command: /lib/systemd/systemd
privileged: true
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
tmpfs:
- /tmp
- /run
groups:
- postgres
- name: db1-${CI_PIPELINE_ID:-default}
image: registry.ubicast.net/docker/debian-systemd:buster
command: /lib/systemd/systemd
privileged: true
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
tmpfs:
- /tmp
- /run
groups:
- postgres
- name: db2-${CI_PIPELINE_ID:-default}
image: registry.ubicast.net/docker/debian-systemd:buster
command: /lib/systemd/systemd
privileged: true
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
tmpfs:
- /tmp
- /run
groups:
- postgres
provisioner:
name: ansible
options:
D: true
inventory:
group_vars:
wowza:
server_wowza_url: localhost
postgres:
repmgr_password: "testrepmgr"
env:
ANSIBLE_ROLES_PATH: ../../roles
ANSIBLE_LIBRARY: ../../library
ANSIBLE_ACTION_PLUGINS: ../../plugins/action
ANSIBLE_PYTHON_INTERPRETER: /usr/bin/python3
SKYREACH_SYSTEM_KEY: s1121eb6e7593525bf3e0302586c82d2
verifier:
name: testinfra
options:
verbose: true
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment