summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRodolfo Carvalho <rhcarvalho@gmail.com>2017-04-11 17:09:33 +0200
committerjuanvallejo <jvallejo@redhat.com>2017-04-17 13:29:24 -0400
commit2aca8a4b833897ace556df407040e9eaeff1ebc4 (patch)
tree3c17a7c40f735da77a784eb53564a19d85934746
parent59e781baaa33d1ffb0d57529c23fd9a1c01a377a (diff)
downloadopenshift-2aca8a4b833897ace556df407040e9eaeff1ebc4.tar.gz
openshift-2aca8a4b833897ace556df407040e9eaeff1ebc4.tar.bz2
openshift-2aca8a4b833897ace556df407040e9eaeff1ebc4.tar.xz
openshift-2aca8a4b833897ace556df407040e9eaeff1ebc4.zip
Simplify memory availability check, review tests
- Fix required memory for etcd hosts (10 -> 20 GB), as per documentation. - Some changes to make the code more similar to the similar DiskAvailability check. - Do not raise exception for hosts that do not have a recommended memory value (those are ignored anyway through `is_active`, so that was essentially dead code). - Test that the required memory is the max of the recommended memories for all groups assigned to a host. E.g. if a host is master and node, we should check that it has enough memory to be a master, because the memory requirement for a master is higher than for a node.
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py50
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py6
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py105
3 files changed, 87 insertions, 74 deletions
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
index b1629aa89..28805dc37 100644
--- a/roles/openshift_health_checker/openshift_checks/memory_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -1,5 +1,5 @@
# pylint: disable=missing-docstring
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, get_var
class MemoryAvailability(OpenShiftCheck):
@@ -8,31 +8,37 @@ class MemoryAvailability(OpenShiftCheck):
name = "memory_availability"
tags = ["preflight"]
- recommended_memory_mb = {
- "nodes": 8 * 1000,
- "masters": 16 * 1000,
- "etcd": 10 * 1000,
+ # Values taken from the official installation documentation:
+ # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
+ recommended_memory_bytes = {
+ "masters": 16 * 10**9,
+ "nodes": 8 * 10**9,
+ "etcd": 20 * 10**9,
}
@classmethod
def is_active(cls, task_vars):
- """Skip hosts that do not have recommended memory space requirements."""
+ """Skip hosts that do not have recommended memory requirements."""
group_names = get_var(task_vars, "group_names", default=[])
- has_mem_space_recommendation = bool(set(group_names).intersection(cls.recommended_memory_mb))
- return super(MemoryAvailability, cls).is_active(task_vars) and has_mem_space_recommendation
+ has_memory_recommendation = bool(set(group_names).intersection(cls.recommended_memory_bytes))
+ return super(MemoryAvailability, cls).is_active(task_vars) and has_memory_recommendation
def run(self, tmp, task_vars):
- group_names = get_var(task_vars, "group_names", default=[])
- total_memory = get_var(task_vars, "ansible_memtotal_mb")
-
- recommended_memory_mb = max(self.recommended_memory_mb.get(group, 0) for group in group_names)
- if not recommended_memory_mb:
- msg = "Unable to determine recommended memory size for group_name {group_name}"
- raise OpenShiftCheckException(msg.format(group_name=group_names))
-
- if total_memory < recommended_memory_mb:
- msg = ("Available memory ({available} MB) below recommended storage. "
- "Minimum required memory: {recommended} MB")
- return {"failed": True, "msg": msg.format(available=total_memory, recommended=recommended_memory_mb)}
-
- return {"changed": False}
+ group_names = get_var(task_vars, "group_names")
+ total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * 10**6
+
+ min_memory_bytes = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
+
+ if total_memory_bytes < min_memory_bytes:
+ return {
+ 'failed': True,
+ 'msg': (
+ 'Available memory ({available:.1f} GB) '
+ 'below recommended value ({recommended:.1f} GB)'
+ ).format(
+ available=float(total_memory_bytes) / 10**9,
+ recommended=float(min_memory_bytes) / 10**9,
+ ),
+ }
+
+ return {}
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index 8054ad25c..970b474d7 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -24,9 +24,9 @@ def test_is_active(group_names, is_containerized, is_active):
@pytest.mark.parametrize('ansible_mounts,extra_words', [
- ([], ['none']), # empty ansible_mounts
- ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
- ([{'mount': '/var'}], ['/var']), # missing size_available
+ ([], ['none']), # empty ansible_mounts
+ ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
+ ([{'mount': '/var'}], ['/var']), # missing size_available
])
def test_cannot_determine_available_disk(ansible_mounts, extra_words):
task_vars = dict(
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
index a8ba032cc..e161a5b9e 100644
--- a/roles/openshift_health_checker/test/memory_availability_test.py
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -1,84 +1,91 @@
import pytest
-from openshift_checks.memory_availability import MemoryAvailability, OpenShiftCheckException
+from openshift_checks.memory_availability import MemoryAvailability
-@pytest.mark.parametrize('group_names,is_containerized,is_active', [
- (['masters'], False, True),
- # ensure check is skipped on containerized installs
- (['masters'], True, True),
- (['nodes'], True, True),
- (['etcd'], False, True),
- (['masters', 'nodes'], False, True),
- (['masters', 'etcd'], False, True),
- ([], False, False),
- (['lb'], False, False),
- (['nfs'], False, False),
+@pytest.mark.parametrize('group_names,is_active', [
+ (['masters'], True),
+ (['nodes'], True),
+ (['etcd'], True),
+ (['masters', 'nodes'], True),
+ (['masters', 'etcd'], True),
+ ([], False),
+ (['lb'], False),
+ (['nfs'], False),
])
-def test_is_active(group_names, is_containerized, is_active):
+def test_is_active(group_names, is_active):
task_vars = dict(
group_names=group_names,
- openshift=dict(common=dict(is_containerized=is_containerized)),
)
assert MemoryAvailability.is_active(task_vars=task_vars) == is_active
-@pytest.mark.parametrize("group_name,size_available", [
+@pytest.mark.parametrize('group_names,ansible_memtotal_mb', [
(
- "masters",
+ ['masters'],
17200,
),
(
- "nodes",
+ ['nodes'],
8200,
),
(
- "etcd",
- 12200,
+ ['etcd'],
+ 22200,
+ ),
+ (
+ ['masters', 'nodes'],
+ 17000,
),
])
-def test_mem_check_with_recommended_memtotal(group_name, size_available):
- result = MemoryAvailability(execute_module=NotImplementedError).run(tmp=None, task_vars=dict(
- group_names=[group_name],
- ansible_memtotal_mb=size_available,
- ))
+def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_memtotal_mb=ansible_memtotal_mb,
+ )
+
+ check = MemoryAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
assert not result.get('failed', False)
-@pytest.mark.parametrize("group_name,size_available", [
+@pytest.mark.parametrize('group_names,ansible_memtotal_mb,extra_words', [
(
- "masters",
- 1,
+ ['masters'],
+ 0,
+ ['0.0 GB'],
),
(
- "nodes",
- 2,
+ ['nodes'],
+ 100,
+ ['0.1 GB'],
),
(
- "etcd",
- 3,
+ ['etcd'],
+ -1,
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes', 'masters'],
+ # enough memory for a node, not enough for a master
+ 11000,
+ ['11.0 GB'],
),
])
-def test_mem_check_with_insufficient_memtotal(group_name, size_available):
- result = MemoryAvailability(execute_module=NotImplementedError).run(tmp=None, task_vars=dict(
- group_names=[group_name],
- ansible_memtotal_mb=size_available,
- ))
+def test_fails_with_insufficient_memory(group_names, ansible_memtotal_mb, extra_words):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_memtotal_mb=ansible_memtotal_mb,
+ )
- assert result['failed']
- assert "below recommended storage" in result['msg']
+ check = MemoryAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+ assert result['failed']
+ for word in 'below recommended'.split() + extra_words:
+ assert word in result['msg']
-def test_mem_check_with_invalid_groupname():
- with pytest.raises(OpenShiftCheckException) as excinfo:
- result = MemoryAvailability(execute_module=NotImplementedError).run(tmp=None, task_vars=dict(
- openshift=dict(common=dict(
- service_type='origin',
- is_containerized=False,
- )),
- group_names=["invalid"],
- ansible_memtotal_mb=1234567,
- ))
- assert "'invalid'" in str(excinfo.value)
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')