diff options
10 files changed, 67 insertions, 16 deletions
diff --git a/ansible.cfg b/ansible.cfg index 0c74d63da..14b77ba0f 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -29,3 +29,7 @@ nocows = True # Additional ssh options for OpenShift Ansible [ssh_connection] pipelining = True +# shorten the ControlPath which is often too long; when it is, +# ssh connection reuse silently fails, making everything slower. +control_path = %(directory)s/%%h-%%r + diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7 index 9d7eeec24..f861d4bcf 100644 --- a/images/installer/Dockerfile.rhel7 +++ b/images/installer/Dockerfile.rhel7 @@ -2,16 +2,20 @@ FROM openshift3/playbook2image MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com> +# override env vars from base image +ENV SUMMARY="OpenShift's installation and configuration tool" \ + DESCRIPTION="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" + LABEL name="openshift3/ose-ansible" \ - summary="OpenShift's installation and configuration tool" \ - description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \ + summary="$SUMMARY" \ + description="$DESCRIPTION" \ url="https://github.com/openshift/openshift-ansible" \ io.k8s.display-name="openshift-ansible" \ - io.k8s.description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \ + io.k8s.description="$DESCRIPTION" \ io.openshift.expose-services="" \ io.openshift.tags="openshift,install,upgrade,ansible" \ com.redhat.component="aos3-installation-docker" \ - version="v3.4.1" \ + version="v3.6.0" \ release="1" \ architecture="x86_64" @@ -20,13 +24,18 @@ LABEL name="openshift3/ose-ansible" \ # because all content and dependencies (like 'oc') is already # installed via yum. USER root -RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto" && \ +RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto skopeo openssl java-1.8.0-openjdk-headless httpd-tools" && \ yum repolist > /dev/null && \ - yum-config-manager --enable rhel-7-server-ose-3.4-rpms && \ + yum-config-manager --enable rhel-7-server-ose-3.6-rpms && \ yum-config-manager --enable rhel-7-server-rh-common-rpms && \ yum install -y $INSTALL_PKGS && \ yum clean all +# The symlinks below are a (hopefully temporary) hack to work around the fact that this +# image is based on python s2i which uses the python27 SCL instead of system python, +# and so the system python modules we need would otherwise not be in the path. +RUN ln -s /usr/lib/python2.7/site-packages/{boto,passlib} /opt/app-root/lib64/python2.7/ + USER ${USER_UID} # The playbook to be run is specified via the PLAYBOOK_FILE env var. @@ -36,6 +45,7 @@ USER ${USER_UID} # $APP_HOME by the 'assemble' script, we set the WORK_DIR env var to the # location of openshift-ansible. ENV PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \ + ANSIBLE_CONFIG=/usr/share/atomic-openshift-utils/ansible.cfg \ WORK_DIR=/usr/share/ansible/openshift-ansible \ OPTS="-v" diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index 64c29a8d9..443b76ea1 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -39,7 +39,8 @@ class CallbackModule(CallbackBase): def v2_runner_on_failed(self, result, ignore_errors=False): super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors) - self.__failures.append(dict(result=result, ignore_errors=ignore_errors)) + if not ignore_errors: + self.__failures.append(dict(result=result, ignore_errors=ignore_errors)) def v2_playbook_on_stats(self, stats): super(CallbackModule, self).v2_playbook_on_stats(stats) diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py index 8d0fbcc9c..e80691ef3 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_storage.py +++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py @@ -17,7 +17,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck): tags = ["pre-install", "health", "preflight"] dependencies = ["python-docker-py"] - storage_drivers = ["devicemapper", "overlay2"] + storage_drivers = ["devicemapper", "overlay", "overlay2"] max_thinpool_data_usage_percent = 90.0 max_thinpool_meta_usage_percent = 90.0 diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py index 442f407b1..551e8dfa0 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/kibana.py +++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py @@ -62,7 +62,7 @@ class Kibana(LoggingCheck): # TODO(lmeyer): give users option to validate certs status_code=302, ) - result = self.execute_module('uri', args, task_vars) + result = self.execute_module('uri', args, None, task_vars) if result.get('failed'): return result['msg'] return None diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py index 05b4d300c..6e951e82c 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/logging.py +++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py @@ -54,12 +54,12 @@ class LoggingCheck(OpenShiftCheck): """Returns: list of pods not in a ready and running state""" return [ pod for pod in pods - if any( + if not pod.get("status", {}).get("containerStatuses") or any( container['ready'] is False for container in pod['status']['containerStatuses'] ) or not any( condition['type'] == 'Ready' and condition['status'] == 'True' - for condition in pod['status']['conditions'] + for condition in pod['status'].get('conditions', []) ) ] @@ -78,7 +78,7 @@ class LoggingCheck(OpenShiftCheck): "extra_args": list(extra_args) if extra_args else [], } - result = execute_module("ocutil", args, task_vars) + result = execute_module("ocutil", args, None, task_vars) if result.get("failed"): msg = ( 'Unexpected error using `oc` to validate the logging stack components.\n' diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py index 876614b1d..bb25e3f66 100644 --- a/roles/openshift_health_checker/test/docker_storage_test.py +++ b/roles/openshift_health_checker/test/docker_storage_test.py @@ -65,8 +65,8 @@ non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}} dict(info={ "Driver": "overlay", }), - True, - ["unsupported Docker storage driver"], + False, + [], ), ( dict(info={ diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py index 19140a1b6..40a5d19d8 100644 --- a/roles/openshift_health_checker/test/kibana_test.py +++ b/roles/openshift_health_checker/test/kibana_test.py @@ -169,7 +169,7 @@ def test_get_kibana_url(route, expect_url, expect_error): ), ]) def test_verify_url_internal_failure(exec_result, expect): - check = Kibana(execute_module=lambda module_name, args, task_vars: dict(failed=True, msg=exec_result)) + check = Kibana(execute_module=lambda module_name, args, tmp, task_vars: dict(failed=True, msg=exec_result)) check._get_kibana_url = lambda task_vars: ('url', None) error = check._check_kibana_route({}) diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py index b6db34fe3..128b76b12 100644 --- a/roles/openshift_health_checker/test/logging_check_test.py +++ b/roles/openshift_health_checker/test/logging_check_test.py @@ -50,6 +50,16 @@ plain_kibana_pod = { } } +plain_kibana_pod_no_containerstatus = { + "metadata": { + "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"}, + "name": "logging-kibana-1", + }, + "status": { + "conditions": [{"status": "True", "type": "Ready"}], + } +} + fluentd_pod_node1 = { "metadata": { "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"}, @@ -80,7 +90,7 @@ plain_curator_pod = { ("Permission denied", "Unexpected error using `oc`"), ]) def test_oc_failure(problem, expect): - def execute_module(module_name, args, task_vars): + def execute_module(module_name, args, tmp, task_vars): if module_name == "ocutil": return dict(failed=True, result=problem) return dict(changed=False) @@ -135,3 +145,23 @@ def test_get_pods_for_component(pod_output, expect_pods, expect_error): {} ) assert_error(error, expect_error) + + +@pytest.mark.parametrize('name, pods, expected_pods', [ + ( + 'test single pod found, scheduled, but no containerStatuses field', + [plain_kibana_pod_no_containerstatus], + [plain_kibana_pod_no_containerstatus], + ), + ( + 'set of pods has at least one pod with containerStatuses (scheduled); should still fail', + [plain_kibana_pod_no_containerstatus, plain_kibana_pod], + [plain_kibana_pod_no_containerstatus], + ), + +], ids=lambda argvals: argvals[0]) +def test_get_not_running_pods_no_container_status(name, pods, expected_pods): + check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: '') + result = check.not_running_pods(pods) + + assert result == expected_pods diff --git a/utils/etc/ansible.cfg b/utils/etc/ansible.cfg index 3425e7e62..f7e6fe2ff 100644 --- a/utils/etc/ansible.cfg +++ b/utils/etc/ansible.cfg @@ -28,3 +28,9 @@ deprecation_warnings = False # remote_tmp - set if provided by user (cli) # ssh_args - set if provided by user (cli) # control_path + +# Additional ssh options for OpenShift Ansible +[ssh_connection] +# shorten the ControlPath which is often too long; when it is, +# ssh connection reuse silently fails, making everything slower. +control_path = %(directory)s/%%h-%%r |