diff options
Diffstat (limited to 'roles')
34 files changed, 1573 insertions, 33 deletions
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index 9151dd0bd..1b5598f46 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -62,7 +62,7 @@ ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }} {% endif -%} #[logging] -ETCD_DEBUG="{{ etcd_debug | default(false) | string }}" +ETCD_DEBUG="{{ etcd_debug | default(false) | bool | string }}" {% if etcd_log_package_levels is defined %} ETCD_LOG_PACKAGE_LEVELS="{{ etcd_log_package_levels }}" {% endif %} diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index ae059b549..e4d1b57e6 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -7,7 +7,7 @@ PartOf=docker.service [Service] EnvironmentFile=/etc/etcd/etcd.conf ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }} -ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} +ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --env-file=/etc/sysconfig/etcd --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} ExecStop=/usr/bin/docker stop {{ etcd_service }} SyslogIdentifier=etcd_container Restart=always diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 7edf141e5..adeb85c3f 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -2155,6 +2155,10 @@ class OpenShiftFacts(object): nfs=dict( directory='/exports', options='*(rw,root_squash)'), + glusterfs=dict( + endpoints='glusterfs-registry-endpoints', + path='glusterfs-registry-volume', + readOnly=False), host=None, access=dict( modes=['ReadWriteMany'] diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py new file mode 100644 index 000000000..c2792a0fe --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -0,0 +1,65 @@ +# pylint: disable=missing-docstring +from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var +from openshift_checks.mixins import NotContainerizedMixin + + +class DiskAvailability(NotContainerizedMixin, OpenShiftCheck): + """Check that recommended disk space is available before a first-time install.""" + + name = "disk_availability" + tags = ["preflight"] + + # Values taken from the official installation documentation: + # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements + recommended_disk_space_bytes = { + "masters": 40 * 10**9, + "nodes": 15 * 10**9, + "etcd": 20 * 10**9, + } + + @classmethod + def is_active(cls, task_vars): + """Skip hosts that do not have recommended disk space requirements.""" + group_names = get_var(task_vars, "group_names", default=[]) + has_disk_space_recommendation = bool(set(group_names).intersection(cls.recommended_disk_space_bytes)) + return super(DiskAvailability, cls).is_active(task_vars) and has_disk_space_recommendation + + def run(self, tmp, task_vars): + group_names = get_var(task_vars, "group_names") + ansible_mounts = get_var(task_vars, "ansible_mounts") + + min_free_bytes = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names) + free_bytes = self.openshift_available_disk(ansible_mounts) + + if free_bytes < min_free_bytes: + return { + 'failed': True, + 'msg': ( + 'Available disk space ({:.1f} GB) for the volume containing ' + '"/var" is below minimum recommended space ({:.1f} GB)' + ).format(float(free_bytes) / 10**9, float(min_free_bytes) / 10**9) + } + + return {} + + @staticmethod + def openshift_available_disk(ansible_mounts): + """Determine the available disk space for an OpenShift installation. + + ansible_mounts should be a list of dicts like the 'setup' Ansible module + returns. + """ + # priority list in descending order + supported_mnt_paths = ["/var", "/"] + available_mnts = {mnt.get("mount"): mnt for mnt in ansible_mounts} + + try: + for path in supported_mnt_paths: + if path in available_mnts: + return available_mnts[path]["size_available"] + except KeyError: + pass + + paths = ''.join(sorted(available_mnts)) or 'none' + msg = "Unable to determine available disk space. Paths mounted: {}.".format(paths) + raise OpenShiftCheckException(msg) diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py new file mode 100644 index 000000000..28805dc37 --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py @@ -0,0 +1,44 @@ +# pylint: disable=missing-docstring +from openshift_checks import OpenShiftCheck, get_var + + +class MemoryAvailability(OpenShiftCheck): + """Check that recommended memory is available.""" + + name = "memory_availability" + tags = ["preflight"] + + # Values taken from the official installation documentation: + # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements + recommended_memory_bytes = { + "masters": 16 * 10**9, + "nodes": 8 * 10**9, + "etcd": 20 * 10**9, + } + + @classmethod + def is_active(cls, task_vars): + """Skip hosts that do not have recommended memory requirements.""" + group_names = get_var(task_vars, "group_names", default=[]) + has_memory_recommendation = bool(set(group_names).intersection(cls.recommended_memory_bytes)) + return super(MemoryAvailability, cls).is_active(task_vars) and has_memory_recommendation + + def run(self, tmp, task_vars): + group_names = get_var(task_vars, "group_names") + total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * 10**6 + + min_memory_bytes = max(self.recommended_memory_bytes.get(name, 0) for name in group_names) + + if total_memory_bytes < min_memory_bytes: + return { + 'failed': True, + 'msg': ( + 'Available memory ({available:.1f} GB) ' + 'below recommended value ({recommended:.1f} GB)' + ).format( + available=float(total_memory_bytes) / 10**9, + recommended=float(min_memory_bytes) / 10**9, + ), + } + + return {} diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index 657e15160..20d160eaf 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -1,4 +1,8 @@ -# pylint: disable=missing-docstring +# pylint: disable=missing-docstring,too-few-public-methods +""" +Mixin classes meant to be used with subclasses of OpenShiftCheck. +""" + from openshift_checks import get_var @@ -7,12 +11,5 @@ class NotContainerizedMixin(object): @classmethod def is_active(cls, task_vars): - return ( - # This mixin is meant to be used with subclasses of OpenShiftCheck. - super(NotContainerizedMixin, cls).is_active(task_vars) and - not cls.is_containerized(task_vars) - ) - - @staticmethod - def is_containerized(task_vars): - return get_var(task_vars, "openshift", "common", "is_containerized") + is_containerized = get_var(task_vars, "openshift", "common", "is_containerized") + return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index cca2d8b75..682f6bd40 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -9,6 +9,13 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): name = "package_version" tags = ["preflight"] + @classmethod + def is_active(cls, task_vars): + """Skip hosts that do not have package requirements.""" + group_names = get_var(task_vars, "group_names", default=[]) + master_or_node = 'masters' in group_names or 'nodes' in group_names + return super(PackageVersion, cls).is_active(task_vars) and master_or_node + def run(self, tmp, task_vars): args = { "requested_openshift_release": get_var(task_vars, "openshift_release", default=''), diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py new file mode 100644 index 000000000..970b474d7 --- /dev/null +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -0,0 +1,155 @@ +import pytest + +from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckException + + +@pytest.mark.parametrize('group_names,is_containerized,is_active', [ + (['masters'], False, True), + # ensure check is skipped on containerized installs + (['masters'], True, False), + (['nodes'], False, True), + (['etcd'], False, True), + (['masters', 'nodes'], False, True), + (['masters', 'etcd'], False, True), + ([], False, False), + (['lb'], False, False), + (['nfs'], False, False), +]) +def test_is_active(group_names, is_containerized, is_active): + task_vars = dict( + group_names=group_names, + openshift=dict(common=dict(is_containerized=is_containerized)), + ) + assert DiskAvailability.is_active(task_vars=task_vars) == is_active + + +@pytest.mark.parametrize('ansible_mounts,extra_words', [ + ([], ['none']), # empty ansible_mounts + ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths + ([{'mount': '/var'}], ['/var']), # missing size_available +]) +def test_cannot_determine_available_disk(ansible_mounts, extra_words): + task_vars = dict( + group_names=['masters'], + ansible_mounts=ansible_mounts, + ) + check = DiskAvailability(execute_module=fake_execute_module) + + with pytest.raises(OpenShiftCheckException) as excinfo: + check.run(tmp=None, task_vars=task_vars) + + for word in 'determine available disk'.split() + extra_words: + assert word in str(excinfo.value) + + +@pytest.mark.parametrize('group_names,ansible_mounts', [ + ( + ['masters'], + [{ + 'mount': '/', + 'size_available': 40 * 10**9 + 1, + }], + ), + ( + ['nodes'], + [{ + 'mount': '/', + 'size_available': 15 * 10**9 + 1, + }], + ), + ( + ['etcd'], + [{ + 'mount': '/', + 'size_available': 20 * 10**9 + 1, + }], + ), + ( + ['etcd'], + [{ + # not enough space on / ... + 'mount': '/', + 'size_available': 0, + }, { + # ... but enough on /var + 'mount': '/var', + 'size_available': 20 * 10**9 + 1, + }], + ), +]) +def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts): + task_vars = dict( + group_names=group_names, + ansible_mounts=ansible_mounts, + ) + + check = DiskAvailability(execute_module=fake_execute_module) + result = check.run(tmp=None, task_vars=task_vars) + + assert not result.get('failed', False) + + +@pytest.mark.parametrize('group_names,ansible_mounts,extra_words', [ + ( + ['masters'], + [{ + 'mount': '/', + 'size_available': 1, + }], + ['0.0 GB'], + ), + ( + ['nodes'], + [{ + 'mount': '/', + 'size_available': 1 * 10**9, + }], + ['1.0 GB'], + ), + ( + ['etcd'], + [{ + 'mount': '/', + 'size_available': 1, + }], + ['0.0 GB'], + ), + ( + ['nodes', 'masters'], + [{ + 'mount': '/', + # enough space for a node, not enough for a master + 'size_available': 15 * 10**9 + 1, + }], + ['15.0 GB'], + ), + ( + ['etcd'], + [{ + # enough space on / ... + 'mount': '/', + 'size_available': 20 * 10**9 + 1, + }, { + # .. but not enough on /var + 'mount': '/var', + 'size_available': 0, + }], + ['0.0 GB'], + ), +]) +def test_fails_with_insufficient_disk_space(group_names, ansible_mounts, extra_words): + task_vars = dict( + group_names=group_names, + ansible_mounts=ansible_mounts, + ) + + check = DiskAvailability(execute_module=fake_execute_module) + result = check.run(tmp=None, task_vars=task_vars) + + assert result['failed'] + for word in 'below recommended'.split() + extra_words: + assert word in result['msg'] + + +def fake_execute_module(*args): + raise AssertionError('this function should not be called') diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py new file mode 100644 index 000000000..e161a5b9e --- /dev/null +++ b/roles/openshift_health_checker/test/memory_availability_test.py @@ -0,0 +1,91 @@ +import pytest + +from openshift_checks.memory_availability import MemoryAvailability + + +@pytest.mark.parametrize('group_names,is_active', [ + (['masters'], True), + (['nodes'], True), + (['etcd'], True), + (['masters', 'nodes'], True), + (['masters', 'etcd'], True), + ([], False), + (['lb'], False), + (['nfs'], False), +]) +def test_is_active(group_names, is_active): + task_vars = dict( + group_names=group_names, + ) + assert MemoryAvailability.is_active(task_vars=task_vars) == is_active + + +@pytest.mark.parametrize('group_names,ansible_memtotal_mb', [ + ( + ['masters'], + 17200, + ), + ( + ['nodes'], + 8200, + ), + ( + ['etcd'], + 22200, + ), + ( + ['masters', 'nodes'], + 17000, + ), +]) +def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb): + task_vars = dict( + group_names=group_names, + ansible_memtotal_mb=ansible_memtotal_mb, + ) + + check = MemoryAvailability(execute_module=fake_execute_module) + result = check.run(tmp=None, task_vars=task_vars) + + assert not result.get('failed', False) + + +@pytest.mark.parametrize('group_names,ansible_memtotal_mb,extra_words', [ + ( + ['masters'], + 0, + ['0.0 GB'], + ), + ( + ['nodes'], + 100, + ['0.1 GB'], + ), + ( + ['etcd'], + -1, + ['0.0 GB'], + ), + ( + ['nodes', 'masters'], + # enough memory for a node, not enough for a master + 11000, + ['11.0 GB'], + ), +]) +def test_fails_with_insufficient_memory(group_names, ansible_memtotal_mb, extra_words): + task_vars = dict( + group_names=group_names, + ansible_memtotal_mb=ansible_memtotal_mb, + ) + + check = MemoryAvailability(execute_module=fake_execute_module) + result = check.run(tmp=None, task_vars=task_vars) + + assert result['failed'] + for word in 'below recommended'.split() + extra_words: + assert word in result['msg'] + + +def fake_execute_module(*args): + raise AssertionError('this function should not be called') diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index c6889ee9b..196d9816a 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -1,3 +1,5 @@ +import pytest + from openshift_checks.package_version import PackageVersion @@ -22,3 +24,23 @@ def test_package_version(): check = PackageVersion(execute_module=execute_module) result = check.run(tmp=None, task_vars=task_vars) assert result is return_value + + +@pytest.mark.parametrize('group_names,is_containerized,is_active', [ + (['masters'], False, True), + # ensure check is skipped on containerized installs + (['masters'], True, False), + (['nodes'], False, True), + (['masters', 'nodes'], False, True), + (['masters', 'etcd'], False, True), + ([], False, False), + (['etcd'], False, False), + (['lb'], False, False), + (['nfs'], False, False), +]) +def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active): + task_vars = dict( + group_names=group_names, + openshift=dict(common=dict(is_containerized=is_containerized)), + ) + assert PackageVersion.is_active(task_vars=task_vars) == is_active diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 0b8042473..6e691c26f 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -109,7 +109,7 @@ type: persistentVolumeClaim claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim" when: - - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack'] + - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs'] - name: Create OpenShift registry oc_adm_registry: @@ -123,3 +123,7 @@ volume_mounts: "{{ openshift_hosted_registry_volumes }}" edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" + +- include: storage/glusterfs.yml + when: + - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml new file mode 100644 index 000000000..b18b24266 --- /dev/null +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -0,0 +1,51 @@ +--- +- name: Wait for registry pods + oc_obj: + namespace: "{{ openshift_hosted_registry_namespace }}" + state: list + kind: pod + selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}" + register: registry_pods + until: + - "registry_pods.results.results[0]['items'] | count > 0" + # There must be as many matching pods with 'Ready' status True as there are expected replicas + - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int" + delay: 10 + retries: "{{ (600 / 10) | int }}" + +- name: Determine registry fsGroup + set_fact: + openshift_hosted_registry_fsgroup: "{{ registry_pods.results.results[0]['items'][0].spec.securityContext.fsGroup }}" + +- name: Create temp mount directory + command: mktemp -d /tmp/openshift-glusterfs-registry-XXXXXX + register: mktemp + changed_when: False + check_mode: no + +- name: Mount registry volume + mount: + state: mounted + fstype: glusterfs + src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" + name: "{{ mktemp.stdout }}" + +- name: Set registry volume permissions + file: + dest: "{{ mktemp.stdout }}" + state: directory + group: "{{ openshift_hosted_registry_fsgroup }}" + mode: "2775" + recurse: True + +- name: Unmount registry volume + mount: + state: unmounted + name: "{{ mktemp.stdout }}" + +- name: Delete temp mount directory + file: + dest: "{{ mktemp.stdout }}" + state: absent + changed_when: False + check_mode: no diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2 index 16185fc1d..f89855bf5 100644 --- a/roles/openshift_logging/templates/es.j2 +++ b/roles/openshift_logging/templates/es.j2 @@ -95,6 +95,13 @@ spec: readOnly: true - name: elasticsearch-storage mountPath: /elasticsearch/persistent + readinessProbe: + exec: + command: + - "/usr/share/elasticsearch/probe/readiness.sh" + initialDelaySeconds: 5 + timeoutSeconds: 4 + periodSeconds: 5 volumes: - name: elasticsearch secret: diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml index 9679d209a..92e68a0a3 100644 --- a/roles/openshift_logging/vars/openshift-enterprise.yml +++ b/roles/openshift_logging/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(openshift_release | default ('3.5.0') ) }}" +__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('3.6.0') }}" diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml index f28c3ce48..b20957550 100644 --- a/roles/openshift_metrics/vars/openshift-enterprise.yml +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default(openshift_release | default ('3.5.0') ) }}" +__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('3.6.0') }}" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 626248306..59003bbf9 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -34,6 +34,33 @@ dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}" env_vars: "{{ openshift_node_env_vars | default(None) }}" +# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory +- name: Check for swap usage + command: grep "^[^#].*swap" /etc/fstab + # grep: match any lines which don't begin with '#' and contain 'swap' + # command: swapon --summary + # Alternate option, however if swap entries are in fstab, swap will be + # enabled at boot. Grepping fstab should catch a condition when swap was + # disabled, but the fstab entries were not removed. + changed_when: false + failed_when: false + register: swap_result + + # Disable Swap Block +- block: + + - name: Disable swap + command: swapoff --all + + - name: Remove swap entries from /etc/fstab + lineinfile: + dest: /etc/fstab + regexp: 'swap' + state: absent + + when: swap_result.stdout_lines | length > 0 + # End Disable Swap Block + # We have to add tuned-profiles in the same transaction otherwise we run into depsolving # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. - name: Install Node package diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml index cd2f362aa..2a36d8945 100644 --- a/roles/openshift_node_upgrade/meta/main.yml +++ b/roles/openshift_node_upgrade/meta/main.yml @@ -10,4 +10,5 @@ galaxy_info: versions: - 7 dependencies: +- role: lib_utils - role: openshift_common diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index 6ae8dbc12..01bd3bf38 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -84,6 +84,33 @@ value: "{{ oreg_url }}" when: oreg_url is defined +# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory +- name: Check for swap usage + command: grep "^[^#].*swap" /etc/fstab + # grep: match any lines which don't begin with '#' and contain 'swap' + # command: swapon --summary + # Alternate option, however if swap entries are in fstab, swap will be + # enabled at boot. Grepping fstab should catch a condition when swap was + # disabled, but the fstab entries were not removed. + changed_when: false + failed_when: false + register: swap_result + + # Disable Swap Block +- block: + + - name: Disable swap + command: swapoff --all + + - name: Remove swap entries from /etc/fstab + lineinfile: + dest: /etc/fstab + regexp: 'swap' + state: absent + + when: swap_result.stdout_lines | length > 0 + # End Disable Swap Block + - name: Restart rpm node service service: name: "{{ openshift.common.service_type }}-node" diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md new file mode 100644 index 000000000..cf0fb94c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/README.md @@ -0,0 +1,60 @@ +OpenShift GlusterFS Cluster +=========================== + +OpenShift GlusterFS Cluster Installation + +Requirements +------------ + +* Ansible 2.2 + +Role Variables +-------------- + +From this role: + +| Name | Default value | | +|--------------------------------------------------|-------------------------|-----------------------------------------| +| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready +| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources +| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized +| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode +| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' +| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods +| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** +| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized +| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' +| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods +| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin +| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes +| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi +| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode +| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` + +Dependencies +------------ + +* os_firewall +* openshift_hosted_facts +* openshift_repos +* lib_openshift + +Example Playbook +---------------- + +``` +- name: Configure GlusterFS hosts + hosts: oo_first_master + roles: + - role: openshift_storage_glusterfs +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Jose A. Rivera (jarrpa@redhat.com) diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml new file mode 100644 index 000000000..ade850747 --- /dev/null +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -0,0 +1,17 @@ +--- +openshift_storage_glusterfs_timeout: 300 +openshift_storage_glusterfs_namespace: 'default' +openshift_storage_glusterfs_is_native: True +openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}" +openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" +openshift_storage_glusterfs_version: 'latest' +openshift_storage_glusterfs_wipe: False +openshift_storage_glusterfs_heketi_is_native: True +openshift_storage_glusterfs_heketi_is_missing: True +openshift_storage_glusterfs_heketi_deploy_is_missing: True +openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" +openshift_storage_glusterfs_heketi_version: 'latest' +openshift_storage_glusterfs_heketi_admin_key: '' +openshift_storage_glusterfs_heketi_user_key: '' +openshift_storage_glusterfs_heketi_topology_load: True +openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" diff --git a/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml new file mode 100644 index 000000000..c9945be13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml @@ -0,0 +1,115 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +labels: + template: deploy-heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi + port: 8080 + targetPort: 8080 + selector: + name: deploy-heketi +- kind: Route + apiVersion: v1 + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + name: deploy-heketi + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + name: deploy-heketi + glusterfs: deploy-heketi-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-service-account + containers: + - name: deploy-heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: kubernetes + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: IMAGE_NAME + displayName: GlusterFS container name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container versiona + required: True diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml new file mode 100644 index 000000000..3f8d8f507 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: glusterfs-registry-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml new file mode 100644 index 000000000..c66705752 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml @@ -0,0 +1,128 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs + labels: + glusterfs: daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs-node: pod + template: + metadata: + name: glusterfs + labels: + glusterfs-node: pod + spec: + nodeSelector: + storagenode: glusterfs + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 100 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 100 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + resources: {} + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: IMAGE_NAME + displayName: GlusterFS container name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container versiona + required: True diff --git a/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml new file mode 100644 index 000000000..df045c170 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml @@ -0,0 +1,113 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +labels: + template: heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi + labels: + glusterfs: heketi-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-pod +- kind: Route + apiVersion: v1 + metadata: + name: heketi + labels: + glusterfs: heketi-route + spec: + to: + kind: Service + name: heketi +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi + labels: + glusterfs: heketi-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi + labels: + glusterfs: heketi-pod + spec: + serviceAccountName: heketi-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: kubernetes + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-storage-endpoints + path: heketidbstorage +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: IMAGE_NAME + displayName: GlusterFS container name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container versiona + required: True diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py new file mode 100644 index 000000000..88801e487 --- /dev/null +++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py @@ -0,0 +1,23 @@ +''' + Openshift Storage GlusterFS class that provides useful filters used in GlusterFS +''' + + +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(source.split(delim) for item in source.split(",")) + + +# pylint: disable=too-few-public-methods +class FilterModule(object): + ''' OpenShift Storage GlusterFS Filters ''' + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + ''' Returns the names of the filters provided by this class ''' + return { + 'map_from_pairs': map_from_pairs + } diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml new file mode 100644 index 000000000..aab9851f9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Jose A. Rivera + description: OpenShift GlusterFS Cluster + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 +dependencies: +- role: openshift_hosted_facts +- role: openshift_repos +- role: lib_openshift diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml new file mode 100644 index 000000000..26ca5eebf --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -0,0 +1,107 @@ +--- +- assert: + that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1" + msg: Only one GlusterFS nodeselector key pair should be provided + +- assert: + that: "groups.oo_glusterfs_to_config | count >= 3" + msg: There must be at least three GlusterFS nodes specified + +- name: Delete pre-existing GlusterFS resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "template,daemonset" + name: glusterfs + state: absent + when: openshift_storage_glusterfs_wipe + +- name: Unlabel any existing GlusterFS nodes + oc_label: + name: "{{ item }}" + kind: node + state: absent + labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + with_items: "{{ groups.all }}" + when: openshift_storage_glusterfs_wipe + +- name: Delete pre-existing GlusterFS config + file: + path: /var/lib/glusterd + state: absent + delegate_to: "{{ item }}" + with_items: "{{ groups.oo_glusterfs_to_config }}" + when: openshift_storage_glusterfs_wipe + +- name: Get GlusterFS storage devices state + command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}" + register: devices_info + delegate_to: "{{ item }}" + with_items: "{{ groups.oo_glusterfs_to_config }}" + failed_when: False + when: openshift_storage_glusterfs_wipe + + # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume. +- name: Clear GlusterFS storage device contents + shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}" + delegate_to: "{{ item.item }}" + with_items: "{{ devices_info.results }}" + when: + - openshift_storage_glusterfs_wipe + - item.stdout_lines | count > 0 + +- name: Add service accounts to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}" + resource_kind: scc + resource_name: privileged + state: present + with_items: + - 'default' + - 'router' + +- name: Label GlusterFS nodes + oc_label: + name: "{{ glusterfs_host }}" + kind: node + state: add + labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + with_items: "{{ groups.oo_glusterfs_to_config }}" + loop_control: + loop_var: glusterfs_host + +- name: Copy GlusterFS DaemonSet template + copy: + src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml" + dest: "{{ mktemp.stdout }}/glusterfs-template.yml" + +- name: Create GlusterFS template + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: template + name: glusterfs + state: present + files: + - "{{ mktemp.stdout }}/glusterfs-template.yml" + +- name: Deploy GlusterFS pods + oc_process: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + template_name: "glusterfs" + create: True + params: + IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}" + IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}" + +- name: Wait for GlusterFS pods + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs-node=pod" + register: glusterfs_pods + until: + - "glusterfs_pods.results.results[0]['items'] | count > 0" + # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods + - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml new file mode 100644 index 000000000..9f092d5d5 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -0,0 +1,48 @@ +--- +- name: Delete pre-existing GlusterFS registry resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "svc,ep" + name: "glusterfs-registry-endpoints" + failed_when: False + +- name: Generate GlusterFS registry endpoints + template: + src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2" + dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml" + +- name: Copy GlusterFS registry service + copy: + src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml" + dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml" + +- name: Create GlusterFS registry endpoints + oc_obj: + namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + state: present + kind: endpoints + name: glusterfs-registry-endpoints + files: + - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml" + +- name: Create GlusterFS registry service + oc_obj: + namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + state: present + kind: service + name: glusterfs-registry-endpoints + files: + - "{{ mktemp.stdout }}/glusterfs-registry-service.yml" + +- name: Check if GlusterFS registry volume exists + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list" + register: registry_volume + +- name: Create GlusterFS registry volume + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" + when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml new file mode 100644 index 000000000..76ae1db75 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -0,0 +1,41 @@ +--- +- name: Copy initial heketi resource files + copy: + src: "{{ openshift.common.examples_content_version }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "deploy-heketi-template.yml" + +- name: Create deploy-heketi resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: template + name: deploy-heketi + state: present + files: + - "{{ mktemp.stdout }}/deploy-heketi-template.yml" + +- name: Deploy deploy-heketi pod + oc_process: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + template_name: "deploy-heketi" + create: True + params: + IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + +- name: Wait for deploy-heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + register: heketi_pod + until: + - "heketi_pod.results.results[0]['items'] | count > 0" + # Pod's 'Ready' status must be True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml new file mode 100644 index 000000000..84b85e95d --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -0,0 +1,109 @@ +--- +- name: Create heketi DB volume + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" + register: setup_storage + failed_when: False + +# This is used in the subsequent task +- name: Copy the admin client config + command: > + cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: False + check_mode: no + +# Need `command` here because heketi-storage.json contains multiple objects. +- name: Copy heketi DB to GlusterFS volume + command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}" + when: "setup_storage.rc == 0" + +- name: Wait for copy job to finish + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: job + state: list + name: "heketi-storage-copy-job" + register: heketi_job + until: + - "'results' in heketi_job.results and heketi_job.results.results | count > 0" + # Pod's 'Complete' status must be True + - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + failed_when: + - "'results' in heketi_job.results" + - "heketi_job.results.results | count > 0" + # Fail when pod's 'Failed' status is True + - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1" + when: "setup_storage.rc == 0" + +- name: Delete deploy resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,jobs,dc,secret" + selector: "deploy-heketi" + failed_when: False + +- name: Copy heketi template + copy: + src: "{{ openshift.common.examples_content_version }}/heketi-template.yml" + dest: "{{ mktemp.stdout }}/heketi-template.yml" + +- name: Create heketi resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: template + name: heketi + state: present + files: + - "{{ mktemp.stdout }}/heketi-template.yml" + +- name: Deploy heketi pod + oc_process: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + template_name: "heketi" + create: True + params: + IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + +- name: Wait for heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=heketi-pod" + register: heketi_pod + until: + - "heketi_pod.results.results[0]['items'] | count > 0" + # Pod's 'Ready' status must be True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + +- name: Determine heketi URL + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: ep + selector: "glusterfs=heketi-service" + register: heketi_url + until: + - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" + - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + +- name: Set heketi URL + set_fact: + openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + +- name: Verify heketi service + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" + changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml new file mode 100644 index 000000000..265a3cc6e --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -0,0 +1,182 @@ +--- +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX + register: mktemp + changed_when: False + check_mode: no + +- name: Verify target namespace exists + oc_project: + state: present + name: "{{ openshift_storage_glusterfs_namespace }}" + when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native + +- include: glusterfs_deploy.yml + when: openshift_storage_glusterfs_is_native + +- name: Make sure heketi-client is installed + package: name=heketi-client state=present + +- name: Delete pre-existing heketi resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,jobs,dc,secret" + selector: "deploy-heketi" + - kind: "template,route,dc,service" + name: "heketi" + - kind: "svc,ep" + name: "heketi-storage-endpoints" + - kind: "sa" + name: "heketi-service-account" + failed_when: False + when: openshift_storage_glusterfs_heketi_wipe + +- name: Wait for deploy-heketi pods to terminate + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + when: openshift_storage_glusterfs_heketi_wipe + +- name: Wait for heketi pods to terminate + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + when: openshift_storage_glusterfs_heketi_wipe + +- name: Create heketi service account + oc_serviceaccount: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + name: heketi-service-account + state: present + when: openshift_storage_glusterfs_heketi_is_native + +- name: Add heketi service account to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" + resource_kind: scc + resource_name: privileged + state: present + when: openshift_storage_glusterfs_heketi_is_native + +- name: Allow heketi service account to view/edit pods + oc_adm_policy_user: + user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" + resource_kind: role + resource_name: edit + state: present + when: openshift_storage_glusterfs_heketi_is_native + +- name: Check for existing deploy-heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + register: heketi_pod + when: openshift_storage_glusterfs_heketi_is_native + +- name: Check if need to deploy deploy-heketi + set_fact: + openshift_storage_glusterfs_heketi_deploy_is_missing: False + when: + - "openshift_storage_glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- name: Check for existing heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=heketi-pod" + register: heketi_pod + when: openshift_storage_glusterfs_heketi_is_native + +- name: Check if need to deploy heketi + set_fact: + openshift_storage_glusterfs_heketi_is_missing: False + when: + - "openshift_storage_glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- include: heketi_deploy_part1.yml + when: + - openshift_storage_glusterfs_heketi_is_native + - openshift_storage_glusterfs_heketi_deploy_is_missing + - openshift_storage_glusterfs_heketi_is_missing + +- name: Determine heketi URL + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: ep + selector: "glusterfs in (deploy-heketi-service, heketi-service)" + register: heketi_url + until: + - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" + - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + when: + - openshift_storage_glusterfs_heketi_is_native + - openshift_storage_glusterfs_heketi_url is undefined + +- name: Set heketi URL + set_fact: + openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + when: + - openshift_storage_glusterfs_heketi_is_native + - openshift_storage_glusterfs_heketi_url is undefined + +- name: Verify heketi service + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" + changed_when: False + +- name: Generate topology file + template: + src: "{{ openshift.common.examples_content_version }}/topology.json.j2" + dest: "{{ mktemp.stdout }}/topology.json" + when: + - openshift_storage_glusterfs_is_native + - openshift_storage_glusterfs_heketi_topology_load + +- name: Load heketi topology + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" + register: topology_load + failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" + when: + - openshift_storage_glusterfs_is_native + - openshift_storage_glusterfs_heketi_topology_load + +- include: heketi_deploy_part2.yml + when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing + +- include: glusterfs_registry.yml + when: "openshift.hosted.registry.storage.kind == 'glusterfs'" + +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False + check_mode: no diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..d72d085c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Endpoints +metadata: + name: glusterfs-registry-endpoints +subsets: +- addresses: +{% for node in groups.oo_glusterfs_to_config %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 new file mode 100644 index 000000000..eb5b4544f --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 @@ -0,0 +1,39 @@ +{ + "clusters": [ +{%- set clusters = {} -%} +{%- for node in groups.oo_glusterfs_to_config -%} + {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} + {%- if cluster in clusters -%} + {%- set _dummy = clusters[cluster].append(node) -%} + {%- else -%} + {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} + {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} + { + "nodes": [ +{%- for node in clusters[cluster] -%} + { + "node": { + "hostnames": { + "manage": [ + "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.common.hostname) }}" + ], + "storage": [ + "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}" + ] + }, + "zone": {{ hostvars[node].glusterfs_zone | default(1) }} + }, + "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} + "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] +} diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index c3d001bb4..fa9b20e92 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -7,8 +7,13 @@ # Block attempts to install origin without specifying some kind of version information. # This is because the latest tags for origin are usually alpha builds, which should not # be used by default. Users must indicate what they want. -- fail: - msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)" +- name: Abort when we cannot safely guess what Origin image version the user wanted + fail: + msg: |- + To install a containerized Origin release, you must set openshift_release or + openshift_image_tag in your inventory to specify which version of the OpenShift + component images to use. You may want the latest (usually alpha) releases or + a more stable release. (Suggestion: add openshift_release="x.y" to inventory.) when: - is_containerized | bool - openshift.common.deployment_type == 'origin' @@ -27,7 +32,10 @@ when: openshift_release is defined # Verify that the image tag is in a valid format -- block: +- when: + - openshift_image_tag is defined + - openshift_image_tag != "latest" + block: # Verifies that when the deployment type is origin the version: # - starts with a v @@ -35,12 +43,14 @@ # It also allows for optional trailing data which: # - must start with a dash # - may contain numbers, letters, dashes and dots. - - name: Verify Origin openshift_image_tag is valid + - name: (Origin) Verify openshift_image_tag is valid + when: openshift.common.deployment_type == 'origin' assert: that: - "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}" - msg: "openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1" - when: openshift.common.deployment_type == 'origin' + msg: |- + openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1 + You specified openshift_image_tag={{ openshift_image_tag }} # Verifies that when the deployment type is openshift-enterprise the version: # - starts with a v @@ -48,16 +58,14 @@ # It also allows for optional trailing data which: # - must start with a dash # - may contain numbers - - name: Verify Enterprise openshift_image_tag is valid + - name: (Enterprise) Verify openshift_image_tag is valid + when: openshift.common.deployment_type == 'openshift-enterprise' assert: that: - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}" - msg: "openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4" - when: openshift.common.deployment_type == 'openshift-enterprise' - - when: - - openshift_image_tag is defined - - openshift_image_tag != "latest" + msg: |- + openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4 + You specified openshift_image_tag={{ openshift_image_tag }} # Make sure we copy this to a fact if given a var: - set_fact: @@ -119,30 +127,42 @@ - fail: msg: openshift_version role was unable to set openshift_version + name: Abort if openshift_version was not set when: openshift_version is not defined - fail: msg: openshift_version role was unable to set openshift_image_tag + name: Abort if openshift_image_tag was not set when: openshift_image_tag is not defined - fail: msg: openshift_version role was unable to set openshift_pkg_version + name: Abort if openshift_pkg_version was not set when: openshift_pkg_version is not defined - fail: - msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories." + msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." + name: Abort if openshift_pkg_version was not set when: - not is_containerized | bool - openshift_version == '0.0' -# We can't map an openshift_release to full rpm version like we can with containers, make sure +# We can't map an openshift_release to full rpm version like we can with containers; make sure # the rpm version we looked up matches the release requested and error out if not. -- fail: - msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook." +- name: For an RPM install, abort when the release requested does not match the available version. when: - not is_containerized | bool - openshift_release is defined - - not openshift_version.startswith(openshift_release) | bool + assert: + that: + - openshift_version.startswith(openshift_release) | bool + msg: |- + You requested openshift_release {{ openshift_release }}, which is not matched by + the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }} + on host {{ inventory_hostname }}. + We will only install the latest RPMs, so please ensure you are getting the release + you expect. You may need to adjust your Ansible inventory, modify the repositories + available on the host, or run the appropriate OpenShift upgrade playbook. # The end result of these three variables is quite important so make sure they are displayed and logged: - debug: var=openshift_release |