diff options
Diffstat (limited to 'roles')
64 files changed, 931 insertions, 229 deletions
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index ed97d539c..7e206ded1 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1 +1,6 @@ --- +docker_cli_auth_config_path: '/root/.docker' + +oreg_url: '' +oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" +oreg_auth_credentials_replace: False diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index bc52ab60c..145b552a6 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -117,6 +117,18 @@ notify: - restart docker +- name: Check for credentials file for registry auth + stat: + path: "{{ docker_cli_auth_config_path }}/config.json" + when: oreg_auth_user is defined + register: docker_cli_auth_credentials_stat + +- name: Create credentials for docker cli registry auth + command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + - name: Start the Docker service systemd: name: docker diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 24ca0d9f8..0bab0899c 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -95,7 +95,7 @@ - name: Set to default prepend set_fact: l_crio_image_prepend: "docker.io/gscrivano" - l_crio_image_name: "crio-o-fedora" + l_crio_image_name: "cri-o-fedora" - name: Use Centos based image when distribution is CentOS set_fact: diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 57a84bc2c..146e5f430 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -100,18 +100,22 @@ l_docker_image_prepend: "registry.fedoraproject.org/f25" when: ansible_distribution == 'Fedora' - # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 - - name: Use a testing registry if requested - set_fact: - l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}" - when: - - openshift_docker_systemcontainer_image_registry_override is defined - - openshift_docker_systemcontainer_image_registry_override != "" - - name: Set the full image name set_fact: l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest" + # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 + - name: Use a specific image if requested + set_fact: + l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}" + when: + - openshift_docker_systemcontainer_image_override is defined + - openshift_docker_systemcontainer_image_override != "" + + # Be nice and let the user see the variable result + - debug: + var: l_docker_image + # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull Container Engine System Container image command: "atomic pull --storage ostree {{ l_docker_image }}" diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml index 89993f7ea..b67411f40 100644 --- a/roles/etcd_common/defaults/main.yml +++ b/roles/etcd_common/defaults/main.yml @@ -56,7 +56,7 @@ etcd_is_containerized: False etcd_is_thirdparty: False # etcd dir vars -etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" +etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" # etcd ports and protocols etcd_client_port: 2379 diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml index 2bc486d3f..c1580640f 100644 --- a/roles/etcd_common/tasks/backup.yml +++ b/roles/etcd_common/tasks/backup.yml @@ -29,7 +29,6 @@ - name: Check current etcd disk usage shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1 register: l_etcd_disk_usage - when: r_etcd_common_embedded_etcd | bool # AUDIT:changed_when: `false` because we are only inspecting # state, not manipulating anything changed_when: false @@ -39,7 +38,7 @@ msg: > {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ l_avail_disk.stdout }} Kb available. - when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int) + when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int # For non containerized and non embedded we should have the correct version of # etcd installed already. So don't do anything. diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index ebfa6bb8f..517e0231d 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1602,11 +1602,13 @@ def set_builddefaults_facts(facts): builddefaults['git_no_proxy'] = builddefaults['no_proxy'] # If we're actually defining a builddefaults config then create admission_plugin_config # then merge builddefaults[config] structure into admission_plugin_config + + # 'config' is the 'openshift_builddefaults_json' inventory variable if 'config' in builddefaults: if 'admission_plugin_config' not in facts['master']: - facts['master']['admission_plugin_config'] = dict() + # Scaffold out the full expected datastructure + facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}} facts['master']['admission_plugin_config'].update(builddefaults['config']) - # if the user didn't actually provide proxy values, delete the proxy env variable defaults. delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env']) return facts diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py index 8d35db6b5..d02a43655 100644 --- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py +++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py @@ -187,7 +187,7 @@ def normalize(checks): def run_check(name, check, user_disabled_checks): """Run a single check if enabled and return a result dict.""" - if name in user_disabled_checks: + if name in user_disabled_checks or '*' in user_disabled_checks: return dict(skipped=True, skipped_reason="Disabled by user request") # pylint: disable=broad-except; capturing exceptions broadly is intentional, diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index 349655966..dcaf87eca 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -10,6 +10,7 @@ import traceback from ansible.plugins.callback import CallbackBase from ansible import constants as C from ansible.utils.color import stringc +from ansible.module_utils.six import string_types FAILED_NO_MSG = u'Failed without returning a message.' @@ -140,11 +141,19 @@ def deduplicate_failures(failures): Returns a new list of failures such that identical failures from different hosts are grouped together in a single entry. The relative order of failures is preserved. + + If failures is unhashable, the original list of failures is returned. """ groups = defaultdict(list) for failure in failures: group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host')) - groups[group_key].append(failure) + try: + groups[group_key].append(failure) + except TypeError: + # abort and return original list of failures when failures has an + # unhashable type. + return failures + result = [] for failure in failures: group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host')) @@ -159,7 +168,10 @@ def format_failure(failure): """Return a list of pretty-formatted text entries describing a failure, including relevant information about it. Expect that the list of text entries will be joined by a newline separator when output to the user.""" - host = u', '.join(failure['host']) + if isinstance(failure['host'], string_types): + host = failure['host'] + else: + host = u', '.join(failure['host']) play = failure['play'] task = failure['task'] msg = failure['msg'] diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index 02ee1d0f9..987c955b6 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -4,6 +4,7 @@ Health checks for OpenShift clusters. import operator import os +import time from abc import ABCMeta, abstractmethod, abstractproperty from importlib import import_module @@ -57,6 +58,9 @@ class OpenShiftCheck(object): self._execute_module = execute_module self.task_vars = task_vars or {} self.tmp = tmp + # mainly for testing purposes; see execute_module_with_retries + self._module_retries = 3 + self._module_retry_interval = 5 # seconds # set to True when the check changes the host, for accurate total "changed" count self.changed = False @@ -115,6 +119,19 @@ class OpenShiftCheck(object): ) return self._execute_module(module_name, module_args, self.tmp, self.task_vars) + def execute_module_with_retries(self, module_name, module_args): + """Run execute_module and retry on failure.""" + result = {} + tries = 0 + while True: + res = self.execute_module(module_name, module_args) + if tries > self._module_retries or not res.get("failed"): + result.update(res) + return result + result["last_failed"] = res + tries += 1 + time.sleep(self._module_retry_interval) + def get_var(self, *keys, **kwargs): """Get deeply nested values from task_vars. diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 866c74d7c..9c35f0f92 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -32,7 +32,12 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # we use python-docker-py to check local docker for images, and skopeo # to look for images available remotely without waiting to pull them. dependencies = ["python-docker-py", "skopeo"] - skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false" + skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}" + + def __init__(self, *args, **kwargs): + super(DockerImageAvailability, self).__init__(*args, **kwargs) + # record whether we could reach a registry or not (and remember results) + self.reachable_registries = {} def is_active(self): """Skip hosts with unsupported deployment types.""" @@ -64,15 +69,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): unavailable_images = set(missing_images) - set(available_images) if unavailable_images: - return { - "failed": True, - "msg": ( - "One or more required Docker images are not available:\n {}\n" - "Configured registries: {}\n" - "Checked by: {}" - ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries), - self.skopeo_img_check_command), - } + registries = [ + reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)" + for reg in registries + ] + msg = ( + "One or more required Docker images are not available:\n {}\n" + "Configured registries: {}\n" + "Checked by: {}" + ).format( + ",\n ".join(sorted(unavailable_images)), + ", ".join(registries), + self.skopeo_img_check_command + ) + + return dict(failed=True, msg=msg) return {} @@ -128,31 +139,31 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): def local_images(self, images): """Filter a list of images and return those available locally.""" - return [ - image for image in images - if self.is_image_local(image) - ] + registries = self.known_docker_registries() + found_images = [] + for image in images: + # docker could have the image name as-is or prefixed with any registry + imglist = [image] + [reg + "/" + image for reg in registries] + if self.is_image_local(imglist): + found_images.append(image) + return found_images def is_image_local(self, image): """Check if image is already in local docker index.""" result = self.execute_module("docker_image_facts", {"name": image}) - if result.get("failed", False): - return False - - return bool(result.get("images", [])) + return bool(result.get("images")) and not result.get("failed") def known_docker_registries(self): """Build a list of docker registries available according to inventory vars.""" - docker_facts = self.get_var("openshift", "docker") - regs = set(docker_facts["additional_registries"]) + regs = list(self.get_var("openshift.docker.additional_registries", default=[])) deployment_type = self.get_var("openshift_deployment_type") - if deployment_type == "origin": - regs.update(["docker.io"]) - elif "enterprise" in deployment_type: - regs.update(["registry.access.redhat.com"]) + if deployment_type == "origin" and "docker.io" not in regs: + regs.append("docker.io") + elif "enterprise" in deployment_type and "registry.access.redhat.com" not in regs: + regs.append("registry.access.redhat.com") - return list(regs) + return regs def available_images(self, images, default_registries): """Search remotely for images. Returns: list of images found.""" @@ -165,17 +176,35 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): """Use Skopeo to determine if required image exists in known registry(s).""" registries = default_registries - # if image already includes a registry, only use that + # If image already includes a registry, only use that. + # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g. + # registry.access.redhat.com/rhel7 as if the registry were a namespace. + # It's not clear that there's any way to distinguish them, but fortunately + # the current set of images all look like [registry/]namespace/name[:version]. if image.count("/") > 1: registry, image = image.split("/", 1) registries = [registry] for registry in registries: - args = { - "_raw_params": self.skopeo_img_check_command + " docker://{}/{}".format(registry, image) - } - result = self.execute_module("command", args) + if registry not in self.reachable_registries: + self.reachable_registries[registry] = self.connect_to_registry(registry) + if not self.reachable_registries[registry]: + continue + + args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)} + result = self.execute_module_with_retries("command", args) if result.get("rc", 0) == 0 and not result.get("failed"): return True + if result.get("rc") == 124: # RC 124 == timed out; mark unreachable + self.reachable_registries[registry] = False return False + + def connect_to_registry(self, registry): + """Use ansible wait_for module to test connectivity from host to registry. Returns bool.""" + # test a simple TCP connection + host, _, port = registry.partition(":") + port = port or 443 + args = dict(host=host, port=port, state="started", timeout=30) + result = self.execute_module("wait_for", args) + return result.get("rc", 0) == 0 and not result.get("failed") diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index e9bae60a3..24f1d938a 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -36,7 +36,7 @@ class DockerHostMixin(object): # NOTE: we would use the "package" module but it's actually an action plugin # and it's not clear how to invoke one of those. This is about the same anyway: - result = self.execute_module( + result = self.execute_module_with_retries( self.get_var("ansible_pkg_mgr", default="yum"), {"name": self.dependencies, "state": "present"}, ) diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py index a86180b00..21355c2f0 100644 --- a/roles/openshift_health_checker/openshift_checks/package_availability.py +++ b/roles/openshift_health_checker/openshift_checks/package_availability.py @@ -26,7 +26,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck): packages.update(self.node_packages(rpm_prefix)) args = {"packages": sorted(set(packages))} - return self.execute_module("check_yum_update", args) + return self.execute_module_with_retries("check_yum_update", args) @staticmethod def master_packages(rpm_prefix): diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py index 1e9aecbe0..8464e8a5e 100644 --- a/roles/openshift_health_checker/openshift_checks/package_update.py +++ b/roles/openshift_health_checker/openshift_checks/package_update.py @@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck): def run(self): args = {"packages": []} - return self.execute_module("check_yum_update", args) + return self.execute_module_with_retries("check_yum_update", args) diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index 0b795b6c4..d4aec3ed8 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -76,7 +76,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): ], } - return self.execute_module("aos_version", args) + return self.execute_module_with_retries("aos_version", args) def get_required_ovs_version(self): """Return the correct Open vSwitch version(s) for the current OpenShift version.""" diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py index c109ebd24..58864da21 100644 --- a/roles/openshift_health_checker/test/action_plugin_test.py +++ b/roles/openshift_health_checker/test/action_plugin_test.py @@ -110,11 +110,16 @@ def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch): assert not skipped(result) -def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch): +@pytest.mark.parametrize('to_disable', [ + 'fake_check', + ['fake_check', 'spam'], + '*,spam,eggs', +]) +def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch): checks = [fake_check('fake_check', is_active=True)] monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks)) - task_vars['openshift_disable_check'] = 'fake_check' + task_vars['openshift_disable_check'] = to_disable result = plugin.run(tmp=None, task_vars=task_vars) assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request") diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index 8d0a53df9..6a7c16c7e 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -3,6 +3,23 @@ import pytest from openshift_checks.docker_image_availability import DockerImageAvailability +@pytest.fixture() +def task_vars(): + return dict( + openshift=dict( + common=dict( + service_type='origin', + is_containerized=False, + is_atomic=False, + ), + docker=dict(), + ), + openshift_deployment_type='origin', + openshift_image_tag='', + group_names=['nodes', 'masters'], + ) + + @pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [ ("origin", True, [], True), ("openshift-enterprise", True, [], True), @@ -15,12 +32,10 @@ from openshift_checks.docker_image_availability import DockerImageAvailability ("origin", False, ["nodes", "masters"], True), ("openshift-enterprise", False, ["etcd"], False), ]) -def test_is_active(deployment_type, is_containerized, group_names, expect_active): - task_vars = dict( - openshift=dict(common=dict(is_containerized=is_containerized)), - openshift_deployment_type=deployment_type, - group_names=group_names, - ) +def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active): + task_vars['openshift_deployment_type'] = deployment_type + task_vars['openshift']['common']['is_containerized'] = is_containerized + task_vars['group_names'] = group_names assert DockerImageAvailability(None, task_vars).is_active() == expect_active @@ -30,10 +45,10 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active (True, False), (False, True), ]) -def test_all_images_available_locally(is_containerized, is_atomic): +def test_all_images_available_locally(task_vars, is_containerized, is_atomic): def execute_module(module_name, module_args, *_): if module_name == "yum": - return {"changed": True} + return {} assert module_name == "docker_image_facts" assert 'name' in module_args @@ -42,19 +57,9 @@ def test_all_images_available_locally(is_containerized, is_atomic): 'images': [module_args['name']], } - result = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=is_containerized, - is_atomic=is_atomic, - ), - docker=dict(additional_registries=["docker.io"]), - ), - openshift_deployment_type='origin', - openshift_image_tag='3.4', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift']['common']['is_containerized'] = is_containerized + task_vars['openshift']['common']['is_atomic'] = is_atomic + result = DockerImageAvailability(execute_module, task_vars).run() assert not result.get('failed', False) @@ -63,53 +68,36 @@ def test_all_images_available_locally(is_containerized, is_atomic): False, True, ]) -def test_all_images_available_remotely(available_locally): +def test_all_images_available_remotely(task_vars, available_locally): def execute_module(module_name, *_): if module_name == 'docker_image_facts': return {'images': [], 'failed': available_locally} - return {'changed': False} + return {} - result = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]), - ), - openshift_deployment_type='origin', - openshift_image_tag='v3.4', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift']['docker']['additional_registries'] = ["docker.io", "registry.access.redhat.com"] + task_vars['openshift_image_tag'] = 'v3.4' + check = DockerImageAvailability(execute_module, task_vars) + check._module_retry_interval = 0 + result = check.run() assert not result.get('failed', False) -def test_all_images_unavailable(): - def execute_module(module_name=None, *_): - if module_name == "command": - return { - 'failed': True, - } +def test_all_images_unavailable(task_vars): + def execute_module(module_name=None, *args): + if module_name == "wait_for": + return {} + elif module_name == "command": + return {'failed': True} - return { - 'changed': False, - } + return {} # docker_image_facts failure - actual = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=["docker.io"]), - ), - openshift_deployment_type="openshift-enterprise", - openshift_image_tag='latest', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift']['docker']['additional_registries'] = ["docker.io"] + task_vars['openshift_deployment_type'] = "openshift-enterprise" + task_vars['openshift_image_tag'] = 'latest' + check = DockerImageAvailability(execute_module, task_vars) + check._module_retry_interval = 0 + actual = check.run() assert actual['failed'] assert "required Docker images are not available" in actual['msg'] @@ -125,62 +113,63 @@ def test_all_images_unavailable(): ["dependencies can be installed via `yum`"] ), ]) -def test_skopeo_update_failure(message, extra_words): +def test_skopeo_update_failure(task_vars, message, extra_words): def execute_module(module_name=None, *_): if module_name == "yum": return { "failed": True, "msg": message, - "changed": False, } - return {'changed': False} + return {} - actual = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=["unknown.io"]), - ), - openshift_deployment_type="openshift-enterprise", - openshift_image_tag='', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift']['docker']['additional_registries'] = ["unknown.io"] + task_vars['openshift_deployment_type'] = "openshift-enterprise" + check = DockerImageAvailability(execute_module, task_vars) + check._module_retry_interval = 0 + actual = check.run() assert actual["failed"] for word in extra_words: assert word in actual["msg"] -@pytest.mark.parametrize("deployment_type,registries", [ - ("origin", ["unknown.io"]), - ("openshift-enterprise", ["registry.access.redhat.com"]), - ("openshift-enterprise", []), -]) -def test_registry_availability(deployment_type, registries): +@pytest.mark.parametrize( + "image, registries, connection_test_failed, skopeo_failed, " + "expect_success, expect_registries_reached", [ + ( + "spam/eggs:v1", ["test.reg"], + True, True, + False, + {"test.reg": False}, + ), + ( + "spam/eggs:v1", ["test.reg"], + False, True, + False, + {"test.reg": True}, + ), + ( + "eggs.reg/spam/eggs:v1", ["test.reg"], + False, False, + True, + {"eggs.reg": True}, + ), + ]) +def test_registry_availability(image, registries, connection_test_failed, skopeo_failed, + expect_success, expect_registries_reached): def execute_module(module_name=None, *_): - return { - 'changed': False, - } + if module_name == "wait_for": + return dict(msg="msg", failed=connection_test_failed) + elif module_name == "command": + return dict(msg="msg", failed=skopeo_failed) - actual = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=registries), - ), - openshift_deployment_type=deployment_type, - openshift_image_tag='', - group_names=['nodes', 'masters'], - )).run() + check = DockerImageAvailability(execute_module, task_vars()) + check._module_retry_interval = 0 - assert not actual.get("failed", False) + available = check.is_available_skopeo_image(image, registries) + assert available == expect_success + assert expect_registries_reached == check.reachable_registries @pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [ @@ -257,7 +246,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex openshift_image_tag='vtest', ) - assert expected == DockerImageAvailability("DUMMY", task_vars).required_images() + assert expected == DockerImageAvailability(task_vars=task_vars).required_images() def test_containerized_etcd(): @@ -271,4 +260,4 @@ def test_containerized_etcd(): group_names=['etcd'], ) expected = set(['registry.access.redhat.com/rhel7/etcd']) - assert expected == DockerImageAvailability("DUMMY", task_vars).required_images() + assert expected == DockerImageAvailability(task_vars=task_vars).required_images() diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index 1fe648b75..8aa87ca59 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -56,7 +56,7 @@ def test_package_availability(task_vars, must_have_packages, must_not_have_packa assert 'packages' in module_args assert set(module_args['packages']).issuperset(must_have_packages) assert not set(module_args['packages']).intersection(must_not_have_packages) - return return_value + return {'foo': return_value} result = PackageAvailability(execute_module, task_vars).run() - assert result is return_value + assert result['foo'] is return_value diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py index 06489b0d7..7d9035a36 100644 --- a/roles/openshift_health_checker/test/package_update_test.py +++ b/roles/openshift_health_checker/test/package_update_test.py @@ -9,7 +9,7 @@ def test_package_update(): assert 'packages' in module_args # empty list of packages means "generic check if 'yum update' will work" assert module_args['packages'] == [] - return return_value + return {'foo': return_value} result = PackageUpdate(execute_module).run() - assert result is return_value + assert result['foo'] is return_value diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index e871f39f0..8564cd4db 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -52,7 +52,7 @@ def test_invalid_openshift_release_format(): ]) def test_package_version(openshift_release): - return_value = object() + return_value = {"foo": object()} def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_): assert module_name == 'aos_version' @@ -66,7 +66,7 @@ def test_package_version(openshift_release): check = PackageVersion(execute_module, task_vars_for(openshift_release, 'origin')) result = check.run() - assert result is return_value + assert result == return_value @pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [ @@ -79,7 +79,7 @@ def test_package_version(openshift_release): ]) def test_docker_package_version(deployment_type, openshift_release, expected_docker_version): - return_value = object() + return_value = {"foo": object()} def execute_module(module_name=None, module_args=None, *_): assert module_name == 'aos_version' @@ -93,7 +93,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc check = PackageVersion(execute_module, task_vars_for(openshift_release, deployment_type)) result = check.run() - assert result is return_value + assert result == return_value @pytest.mark.parametrize('group_names,is_containerized,is_active', [ diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py index 0fc258133..69f27653c 100644 --- a/roles/openshift_health_checker/test/zz_failure_summary_test.py +++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py @@ -65,6 +65,21 @@ import pytest }, ], ), + # if a failure contain an unhashable value, it will not be deduplicated + ( + [ + { + 'host': 'master1', + 'msg': {'unhashable': 'value'}, + }, + ], + [ + { + 'host': 'master1', + 'msg': {'unhashable': 'value'}, + }, + ], + ), ]) def test_deduplicate_failures(failures, deduplicated): assert deduplicate_failures(failures) == deduplicated diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index 08c1d849e..712a2a591 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -5,8 +5,8 @@ r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" -openshift_hosted_router_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}" -openshift_hosted_registry_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}" +openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" +openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" registry_volume_claim: 'registry-claim' diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index d73c290ff..48f53aef8 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -137,7 +137,7 @@ edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" -- when: openshift_hosted_registry_wait +- when: openshift_hosted_registry_wait | bool block: - name: Ensure OpenShift registry correctly rolls out (best-effort today) command: | diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index 68ec7233e..2a42b5a7c 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -94,7 +94,7 @@ stats_port: "{{ item.stats_port }}" with_items: "{{ openshift_hosted_routers }}" -- when: openshift_hosted_router_wait +- when: openshift_hosted_router_wait | bool block: - name: Ensure OpenShift router correctly rolls out (best-effort today) command: | diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml index 11478263c..72754df2e 100644 --- a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml index 0e3d006a7..298f8039e 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml index 28feac4e6..dace26793 100644 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml index 8bf98ba41..f821efd6b 100644 --- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml index bbaf76c17..019d836fe 100644 --- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 7ccc2fc3b..f142f89f0 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -3,6 +3,9 @@ manage_iq_tasks: - resource_kind: role resource_name: admin user: management-admin +- resource_kind: role + resource_name: admin + user: system:serviceaccount:management-infra:management-admin - resource_kind: cluster-role resource_name: management-infra-admin user: system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 71bb09a76..4c8d6fdad 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -24,7 +24,7 @@ oreg_url: '' oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" oreg_auth_credentials_replace: False - +l_bind_docker_reg_auth: False # NOTE # r_openshift_master_*_default may be defined external to this role. diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 121261e94..894fe8e2b 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -229,21 +229,7 @@ - restart master controllers when: openshift_master_bootstrap_enabled | default(False) -- name: Check for credentials file for registry auth - stat: - path: "{{oreg_auth_credentials_path }}" - when: - - oreg_auth_user is defined - register: master_oreg_auth_credentials_stat - -- name: Create credentials for registry auth - command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" - when: - - oreg_auth_user is defined - - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - notify: - - restart master api - - restart master controllers +- include: registry_auth.yml - include: set_loopback_context.yml when: diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml new file mode 100644 index 000000000..96b6c614e --- /dev/null +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -0,0 +1,27 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ oreg_auth_credentials_path }}" + when: oreg_auth_user is defined + register: master_oreg_auth_credentials_stat + +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True + when: + - openshift.common.is_containerized | bool + - oreg_auth_user is defined + - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart master api + - restart master controllers + +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart master api + - restart master controllers diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index f06448d71..a184a59f6 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -12,7 +12,17 @@ Requires={{ openshift.docker.service_name }}.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host \ + --name {{ openshift.common.service_type }}-master-api \ + --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \ + -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \ + -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \ + -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \ + --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api LimitNOFILE=131072 diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index b7f36491b..2ded05f53 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -11,7 +11,17 @@ PartOf={{ openshift.docker.service_name }}.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host \ + --name {{ openshift.common.service_type }}-master-controllers \ + --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \ + -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \ + --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers LimitNOFILE=131072 diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index e767772ce..5558f55cb 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -383,7 +383,7 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase): if 'extraAuthorizeParameters' in self._idp: if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']: val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes')) - self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val + self._idp['extraAuthorizeParameters']['include_granted_scopes'] = '"true"' if val else '"false"' def validate(self): ''' validate this idp instance ''' diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml index 2e2013d40..d6756f9b9 100644 --- a/roles/openshift_metrics/tasks/pre_install.yaml +++ b/roles/openshift_metrics/tasks/pre_install.yaml @@ -10,7 +10,7 @@ is invalid, must be one of: emptydir, pv, dynamic when: - openshift_metrics_cassandra_storage_type not in openshift_metrics_cassandra_storage_types - - "not {{ openshift_metrics_heapster_standalone | bool }}" + - not (openshift_metrics_heapster_standalone | bool) - name: list existing secrets command: > diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index f1e64f3aa..5424a64d2 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -84,7 +84,7 @@ oreg_url: '' oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False - +l_bind_docker_reg_auth: False # NOTE # r_openshift_node_*_default may be defined external to this role. diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 7af3f54b5..1504d01af 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -2,17 +2,9 @@ - name: Install the systemd units include: systemd_units.yml -- name: Check for tuned package - command: rpm -q tuned - args: - warn: no - register: tuned_installed - changed_when: false - failed_when: false - -- name: Set atomic-guest tuned profile - command: "tuned-adm profile atomic-guest" - when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool +- name: Setup tuned + include: tuned.yml + static: yes - name: Start and enable openvswitch service systemd: diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 02b8ee67c..265bf2c46 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -1,11 +1,9 @@ --- -# We have to add tuned-profiles in the same transaction otherwise we run into depsolving -# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. - when: not openshift.common.is_containerized | bool block: - name: Install Node package package: - name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present - name: Install sdn-ovs package diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 22ff6dfd2..ff8d1942c 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -76,20 +76,7 @@ include: config.yml when: not openshift_node_bootstrap -- name: Check for credentials file for registry auth - stat: - path: "{{oreg_auth_credentials_path }}" - when: - - oreg_auth_user is defined - register: node_oreg_auth_credentials_stat - -- name: Create credentials for registry auth - command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" - when: - - oreg_auth_user is defined - - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - notify: - - restart node +- include: registry_auth.yml - name: Configure AWS Cloud Provider Settings lineinfile: diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml new file mode 100644 index 000000000..f370bb260 --- /dev/null +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -0,0 +1,25 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ oreg_auth_credentials_path }}" + when: oreg_auth_user is defined + register: node_oreg_auth_credentials_stat + +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True + when: + - openshift.common.is_containerized | bool + - oreg_auth_user is defined + - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart node + +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart node diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 57094f28e..4ab10b95f 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -21,7 +21,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ + --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \ + -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \ + -e HOST=/rootfs -e HOST_ETC=/host-etc \ + -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \ + -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \ + -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \ + -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \ + -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \ + -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \ + -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \ + -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js index 1f25cc39f..16a307c06 100644 --- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js +++ b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js @@ -1,2 +1 @@ window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true; -window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.pod_presets = true; diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index a059745a6..d0bc0e028 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -76,10 +76,11 @@ GlusterFS cluster into a new or existing OpenShift cluster: | Name | Default value | Description | |--------------------------------------------------|-------------------------|-----------------------------------------| | openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready -| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources +| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources | openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized | openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names | openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name +| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. | openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster | openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' | openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods @@ -91,7 +92,7 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin | openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes | openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service. +| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. | openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode | openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes | openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 0b3d3aef1..148549887 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -3,6 +3,7 @@ openshift_storage_glusterfs_timeout: 300 openshift_storage_glusterfs_is_native: True openshift_storage_glusterfs_name: 'storage' openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" +openshift_storage_glusterfs_use_default_selector: False openshift_storage_glusterfs_storageclass: True openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' @@ -31,6 +32,7 @@ openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.na openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" openshift_storage_glusterfs_registry_name: 'registry' openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" +openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" openshift_storage_glusterfs_registry_storageclass: False openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" @@ -58,9 +60,9 @@ r_openshift_storage_glusterfs_os_firewall_deny: [] r_openshift_storage_glusterfs_os_firewall_allow: - service: glusterfs_sshd port: "2222/tcp" -- service: glusterfs_daemon - port: "24007/tcp" - service: glusterfs_management + port: "24007/tcp" +- service: glusterfs_rdma port: "24008/tcp" - service: glusterfs_bricks port: "49152-49251/tcp" diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml new file mode 100644 index 000000000..9ebb0d5ec --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml @@ -0,0 +1,143 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi-${CLUSTER_NAME} + port: 8080 + targetPort: 8080 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: topology + mountPath: ${TOPOLOGY_PATH} + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + - name: topology + secret: + secretName: heketi-${CLUSTER_NAME}-topology-secret + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs +- name: TOPOLOGY_PATH + displayName: heketi topology file location + required: True diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml new file mode 100644 index 000000000..8c5e1ded3 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs: ${CLUSTER_NAME}-pod + template: + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-pod + glusterfs-node: pod + spec: + nodeSelector: "${{NODE_LABELS}}" + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + resources: {} + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: NODE_LABELS + displayName: Daemonset Node Labels + description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' + value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME + displayName: GlusterFS container image name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml new file mode 100644 index 000000000..61b6a8c13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml @@ -0,0 +1,134 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: heketi-${CLUSTER_NAME}-route + spec: + to: + kind: Service + name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-pod + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-db-${CLUSTER_NAME}-endpoints + path: heketidbstorage + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index a31c5bd5e..bc0dde17d 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -15,6 +15,7 @@ oc_project: state: present name: "{{ glusterfs_namespace }}" + node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}" when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass - name: Delete pre-existing heketi resources diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 7a2987883..012c722ff 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -5,6 +5,7 @@ glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_name }}" glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" + glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_version }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 17f87578d..1bcab8e49 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -5,6 +5,7 @@ glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}" glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" + glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..095fb780f --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} + secretNamespace: "{{ glusterfs_namespace }}" + secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 new file mode 100644 index 000000000..579b11bb7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 @@ -0,0 +1,36 @@ +{ + "_port_comment": "Heketi Server Port Number", + "port" : "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth" : false, + + "_jwt" : "Private keys for access", + "jwt" : { + "_admin" : "Admin has access to all APIs", + "admin" : { + "key" : "My Secret" + }, + "_user" : "User only has access to /volumes endpoint", + "user" : { + "key" : "My Secret" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs" : { + + "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", + "executor" : "{{ glusterfs_heketi_executor }}", + + "_db_comment": "Database file name", + "db" : "/var/lib/heketi/heketi.db", + + "sshexec" : { + "keyfile" : "/etc/heketi/private_key", + "port" : "{{ glusterfs_heketi_ssh_port }}", + "user" : "{{ glusterfs_heketi_ssh_user }}", + "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} + } + } +} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 @@ -0,0 +1,49 @@ +{ + "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} + {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} + {%- if cluster in clusters -%} + {%- set _dummy = clusters[cluster].append(node) -%} + {%- else -%} + {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} + {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} + { + "nodes": [ +{%- for node in clusters[cluster] -%} + { + "node": { + "hostnames": { + "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} + "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} + "{{ node }}" +{%- endif -%} + ], + "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} + "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} + ] + }, + "zone": {{ hostvars[node].glusterfs_zone | default(1) }} + }, + "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} + "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] +} |