diff options
author | juanvallejo <jvallejo@redhat.com> | 2017-07-10 15:38:22 -0400 |
---|---|---|
committer | Luke Meyer <lmeyer@redhat.com> | 2017-07-11 15:38:43 -0400 |
commit | 1c213b51468fa698acb08d18ce5d6b2a5796d93d (patch) | |
tree | fba4a1496cd7c9028deaa53a846a5796ba1b21f5 | |
parent | c630e6dbd29e80ad57cb230244fe1cb830a891aa (diff) | |
download | openshift-1c213b51468fa698acb08d18ce5d6b2a5796d93d.tar.gz openshift-1c213b51468fa698acb08d18ce5d6b2a5796d93d.tar.bz2 openshift-1c213b51468fa698acb08d18ce5d6b2a5796d93d.tar.xz openshift-1c213b51468fa698acb08d18ce5d6b2a5796d93d.zip |
add scheduled pods check
-rw-r--r-- | roles/openshift_health_checker/openshift_checks/logging/logging.py | 4 | ||||
-rw-r--r-- | roles/openshift_health_checker/test/logging_check_test.py | 30 |
2 files changed, 32 insertions, 2 deletions
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py index e8821f0ba..6e951e82c 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/logging.py +++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py @@ -54,12 +54,12 @@ class LoggingCheck(OpenShiftCheck): """Returns: list of pods not in a ready and running state""" return [ pod for pod in pods - if any( + if not pod.get("status", {}).get("containerStatuses") or any( container['ready'] is False for container in pod['status']['containerStatuses'] ) or not any( condition['type'] == 'Ready' and condition['status'] == 'True' - for condition in pod['status']['conditions'] + for condition in pod['status'].get('conditions', []) ) ] diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py index 3b9e3fa8d..128b76b12 100644 --- a/roles/openshift_health_checker/test/logging_check_test.py +++ b/roles/openshift_health_checker/test/logging_check_test.py @@ -50,6 +50,16 @@ plain_kibana_pod = { } } +plain_kibana_pod_no_containerstatus = { + "metadata": { + "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"}, + "name": "logging-kibana-1", + }, + "status": { + "conditions": [{"status": "True", "type": "Ready"}], + } +} + fluentd_pod_node1 = { "metadata": { "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"}, @@ -135,3 +145,23 @@ def test_get_pods_for_component(pod_output, expect_pods, expect_error): {} ) assert_error(error, expect_error) + + +@pytest.mark.parametrize('name, pods, expected_pods', [ + ( + 'test single pod found, scheduled, but no containerStatuses field', + [plain_kibana_pod_no_containerstatus], + [plain_kibana_pod_no_containerstatus], + ), + ( + 'set of pods has at least one pod with containerStatuses (scheduled); should still fail', + [plain_kibana_pod_no_containerstatus, plain_kibana_pod], + [plain_kibana_pod_no_containerstatus], + ), + +], ids=lambda argvals: argvals[0]) +def test_get_not_running_pods_no_container_status(name, pods, expected_pods): + check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: '') + result = check.not_running_pods(pods) + + assert result == expected_pods |