summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py7
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py7
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py7
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py9
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py7
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py7
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py21
-rw-r--r--roles/lib_openshift/library/oc_configmap.py7
-rw-r--r--roles/lib_openshift/library/oc_edit.py7
-rw-r--r--roles/lib_openshift/library/oc_env.py7
-rw-r--r--roles/lib_openshift/library/oc_group.py7
-rw-r--r--roles/lib_openshift/library/oc_image.py7
-rw-r--r--roles/lib_openshift/library/oc_label.py7
-rw-r--r--roles/lib_openshift/library/oc_obj.py7
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py7
-rw-r--r--roles/lib_openshift/library/oc_process.py7
-rw-r--r--roles/lib_openshift/library/oc_project.py7
-rw-r--r--roles/lib_openshift/library/oc_pvc.py7
-rw-r--r--roles/lib_openshift/library/oc_route.py7
-rw-r--r--roles/lib_openshift/library/oc_scale.py7
-rw-r--r--roles/lib_openshift/library/oc_secret.py7
-rw-r--r--roles/lib_openshift/library/oc_service.py7
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py7
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py7
-rw-r--r--roles/lib_openshift/library/oc_user.py7
-rw-r--r--roles/lib_openshift/library/oc_version.py7
-rw-r--r--roles/lib_openshift/library/oc_volume.py7
-rw-r--r--roles/lib_openshift/src/class/oc_adm_policy_user.py2
-rw-r--r--roles/lib_openshift/src/class/oc_clusterrole.py6
-rw-r--r--roles/lib_openshift/src/lib/base.py7
-rw-r--r--roles/lib_openshift/src/lib/rule.py8
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_registry.py10
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_router.py7
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_objectvalidator.py28
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py65
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py44
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py17
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py4
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py155
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py91
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml4
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml345
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml342
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml345
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml342
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml168
-rw-r--r--roles/openshift_logging/defaults/main.yml11
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml2
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml23
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml40
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml32
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml32
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml178
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_mux.yaml67
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml94
-rw-r--r--roles/openshift_logging/tasks/procure_shared_key.yaml25
-rw-r--r--roles/openshift_logging/tasks/set_es_storage.yaml82
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml20
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml20
-rw-r--r--roles/openshift_logging/templates/curator.j25
-rw-r--r--roles/openshift_logging/templates/es.j27
-rw-r--r--roles/openshift_logging/templates/fluentd.j212
-rw-r--r--roles/openshift_logging/templates/mux.j2121
-rw-r--r--roles/openshift_logging/templates/service.j26
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml88
-rw-r--r--roles/openshift_manageiq/vars/main.yml64
-rw-r--r--roles/openshift_metrics/tasks/generate_heapster_certificates.yaml40
-rw-r--r--roles/openshift_metrics/tasks/generate_heapster_secrets.yaml14
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml4
-rw-r--r--roles/openshift_metrics/templates/heapster.j211
-rw-r--r--roles/openshift_metrics/templates/service.j26
-rw-r--r--roles/openshift_node/tasks/main.yml23
-rw-r--r--roles/openshift_node_upgrade/tasks/docker/upgrade.yml17
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml59
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml (renamed from roles/openshift_node_upgrade/tasks/docker/restart.yml)3
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml6
-rw-r--r--roles/openshift_version/tasks/main.yml56
82 files changed, 1426 insertions, 2447 deletions
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 4d083c4d5..8a311cd0f 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -900,6 +900,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 48e80a7cd..0930faadb 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -886,6 +886,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 35168d1a3..6a7be65d0 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -872,6 +872,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 5f7e4b8fa..44923ecd2 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -872,6 +872,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1960,7 +1967,7 @@ class PolicyUser(OpenShiftCLI):
@property
def policybindings(self):
if self._policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ results = self._get('policybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve policybindings')
self._policy_bindings = results['results'][0]['items'][0]
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index a6718d921..0604f48bb 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -990,6 +990,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 0e4b336fb..bdcf94a58 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1015,6 +1015,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index a34ce351e..af48ce636 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -864,6 +864,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1531,10 +1538,10 @@ class Rule(object):
results = []
for rule in inc_rules:
- results.append(Rule(rule['apiGroups'],
- rule['attributeRestrictions'],
- rule['resources'],
- rule['verbs']))
+ results.append(Rule(rule.get('apiGroups', ['']),
+ rule.get('attributeRestrictions', None),
+ rule.get('resources', []),
+ rule.get('verbs', [])))
return results
@@ -1633,7 +1640,7 @@ class OCClusterRole(OpenShiftCLI):
@property
def clusterrole(self):
''' property for clusterrole'''
- if not self._clusterrole:
+ if self._clusterrole is None:
self.get()
return self._clusterrole
@@ -1669,6 +1676,7 @@ class OCClusterRole(OpenShiftCLI):
elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
+ self.clusterrole = None
return result
@@ -1738,6 +1746,9 @@ class OCClusterRole(OpenShiftCLI):
# Create it here
api_rval = oc_clusterrole.create()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
# return the created object
api_rval = oc_clusterrole.get()
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 69dd23a0e..385ed888b 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -870,6 +870,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 70329ccfe..649de547e 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -914,6 +914,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index bda5eebc5..74bf63353 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -881,6 +881,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 462e14868..2dd3d28ec 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -854,6 +854,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index 8aed060bb..bb7f97689 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -873,6 +873,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 0d18a7afe..ec9abcda7 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -890,6 +890,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 0b01670c6..706972de2 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -893,6 +893,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 9b321b47c..bc5245216 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -825,6 +825,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 34f80ce13..de5426c51 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -882,6 +882,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 331f31e41..02cd810ce 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -879,6 +879,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 3e4601cc3..a9103ebf6 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -874,6 +874,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 755ab3b02..f005adffc 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -924,6 +924,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 0c83338b0..9dcb38216 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -868,6 +868,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 26e52a926..2ac0abcec 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -914,6 +914,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 440cda1b3..0af695e08 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -920,6 +920,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 5eb36ee32..ba8a1fdac 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -866,6 +866,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 1bc788e87..5bff7621c 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -866,6 +866,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 3009e661a..450a30f57 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -926,6 +926,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 88f295a74..0937df5a1 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -838,6 +838,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 5f936fb49..d0e7e77e1 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -903,6 +903,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py
index 88fcc1ddc..37a685ebb 100644
--- a/roles/lib_openshift/src/class/oc_adm_policy_user.py
+++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py
@@ -46,7 +46,7 @@ class PolicyUser(OpenShiftCLI):
@property
def policybindings(self):
if self._policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ results = self._get('policybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve policybindings')
self._policy_bindings = results['results'][0]['items'][0]
diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py
index 1d3d977db..ae6795446 100644
--- a/roles/lib_openshift/src/class/oc_clusterrole.py
+++ b/roles/lib_openshift/src/class/oc_clusterrole.py
@@ -22,7 +22,7 @@ class OCClusterRole(OpenShiftCLI):
@property
def clusterrole(self):
''' property for clusterrole'''
- if not self._clusterrole:
+ if self._clusterrole is None:
self.get()
return self._clusterrole
@@ -58,6 +58,7 @@ class OCClusterRole(OpenShiftCLI):
elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
+ self.clusterrole = None
return result
@@ -127,6 +128,9 @@ class OCClusterRole(OpenShiftCLI):
# Create it here
api_rval = oc_clusterrole.create()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
# return the created object
api_rval = oc_clusterrole.get()
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 1868b1420..fc1b6f1ec 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -76,6 +76,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/src/lib/rule.py b/roles/lib_openshift/src/lib/rule.py
index 4590dcf90..fe5ed9723 100644
--- a/roles/lib_openshift/src/lib/rule.py
+++ b/roles/lib_openshift/src/lib/rule.py
@@ -136,9 +136,9 @@ class Rule(object):
results = []
for rule in inc_rules:
- results.append(Rule(rule['apiGroups'],
- rule['attributeRestrictions'],
- rule['resources'],
- rule['verbs']))
+ results.append(Rule(rule.get('apiGroups', ['']),
+ rule.get('attributeRestrictions', None),
+ rule.get('resources', []),
+ rule.get('verbs', [])))
return results
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
index bab36fddc..30e13ce4b 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
@@ -205,10 +205,11 @@ class RegistryTest(unittest.TestCase):
}
]}'''
+ @mock.patch('oc_adm_registry.locate_oc_binary')
@mock.patch('oc_adm_registry.Utils._write')
@mock.patch('oc_adm_registry.Utils.create_tmpfile_copy')
@mock.patch('oc_adm_registry.Registry._run')
- def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write):
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary):
''' Testing state present '''
params = {'state': 'present',
'debug': False,
@@ -240,10 +241,9 @@ class RegistryTest(unittest.TestCase):
(0, '', ''),
]
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- '/tmp/mocked_kubeconfig',
- ]
+ mock_tmpfile_copy.return_value = '/tmp/mocked_kubeconfig'
+
+ mock_oc_binary.return_value = 'oc'
results = Registry.run_ansible(params, False)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
index 51393dbaf..5481ac623 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
@@ -286,10 +286,11 @@ class RouterTest(unittest.TestCase):
]
}'''
+ @mock.patch('oc_adm_router.locate_oc_binary')
@mock.patch('oc_adm_router.Utils._write')
@mock.patch('oc_adm_router.Utils.create_tmpfile_copy')
@mock.patch('oc_adm_router.Router._run')
- def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write):
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary):
''' Testing a create '''
params = {'state': 'present',
'debug': False,
@@ -345,6 +346,10 @@ class RouterTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc',
+ ]
+
results = Router.run_ansible(params, False)
self.assertTrue(results['changed'])
diff --git a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
index da326742f..b19a5a880 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
@@ -25,9 +25,10 @@ class OCObjectValidatorTest(unittest.TestCase):
maxDiff = None
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_no_data(self, mock_cmd, mock_tmpfile_copy):
+ def test_no_data(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when both all objects are empty '''
# Arrange
@@ -62,6 +63,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc',
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
@@ -76,9 +81,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_error_code(self, mock_cmd, mock_tmpfile_copy):
+ def test_error_code(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when we fail to get objects '''
# Arrange
@@ -98,6 +104,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
error_results = {
'returncode': 1,
'stderr': 'Error.',
@@ -120,9 +130,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_valid_both(self, mock_cmd, mock_tmpfile_copy):
+ def test_valid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when both all objects are valid '''
# Arrange
@@ -427,6 +438,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
@@ -441,9 +456,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_invalid_both(self, mock_cmd, mock_tmpfile_copy):
+ def test_invalid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when all objects are invalid '''
# Arrange
@@ -886,6 +902,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
new file mode 100644
index 000000000..c2792a0fe
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -0,0 +1,65 @@
+# pylint: disable=missing-docstring
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks.mixins import NotContainerizedMixin
+
+
+class DiskAvailability(NotContainerizedMixin, OpenShiftCheck):
+ """Check that recommended disk space is available before a first-time install."""
+
+ name = "disk_availability"
+ tags = ["preflight"]
+
+ # Values taken from the official installation documentation:
+ # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
+ recommended_disk_space_bytes = {
+ "masters": 40 * 10**9,
+ "nodes": 15 * 10**9,
+ "etcd": 20 * 10**9,
+ }
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have recommended disk space requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ has_disk_space_recommendation = bool(set(group_names).intersection(cls.recommended_disk_space_bytes))
+ return super(DiskAvailability, cls).is_active(task_vars) and has_disk_space_recommendation
+
+ def run(self, tmp, task_vars):
+ group_names = get_var(task_vars, "group_names")
+ ansible_mounts = get_var(task_vars, "ansible_mounts")
+
+ min_free_bytes = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names)
+ free_bytes = self.openshift_available_disk(ansible_mounts)
+
+ if free_bytes < min_free_bytes:
+ return {
+ 'failed': True,
+ 'msg': (
+ 'Available disk space ({:.1f} GB) for the volume containing '
+ '"/var" is below minimum recommended space ({:.1f} GB)'
+ ).format(float(free_bytes) / 10**9, float(min_free_bytes) / 10**9)
+ }
+
+ return {}
+
+ @staticmethod
+ def openshift_available_disk(ansible_mounts):
+ """Determine the available disk space for an OpenShift installation.
+
+ ansible_mounts should be a list of dicts like the 'setup' Ansible module
+ returns.
+ """
+ # priority list in descending order
+ supported_mnt_paths = ["/var", "/"]
+ available_mnts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
+
+ try:
+ for path in supported_mnt_paths:
+ if path in available_mnts:
+ return available_mnts[path]["size_available"]
+ except KeyError:
+ pass
+
+ paths = ''.join(sorted(available_mnts)) or 'none'
+ msg = "Unable to determine available disk space. Paths mounted: {}.".format(paths)
+ raise OpenShiftCheckException(msg)
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
new file mode 100644
index 000000000..28805dc37
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -0,0 +1,44 @@
+# pylint: disable=missing-docstring
+from openshift_checks import OpenShiftCheck, get_var
+
+
+class MemoryAvailability(OpenShiftCheck):
+ """Check that recommended memory is available."""
+
+ name = "memory_availability"
+ tags = ["preflight"]
+
+ # Values taken from the official installation documentation:
+ # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
+ recommended_memory_bytes = {
+ "masters": 16 * 10**9,
+ "nodes": 8 * 10**9,
+ "etcd": 20 * 10**9,
+ }
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have recommended memory requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ has_memory_recommendation = bool(set(group_names).intersection(cls.recommended_memory_bytes))
+ return super(MemoryAvailability, cls).is_active(task_vars) and has_memory_recommendation
+
+ def run(self, tmp, task_vars):
+ group_names = get_var(task_vars, "group_names")
+ total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * 10**6
+
+ min_memory_bytes = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
+
+ if total_memory_bytes < min_memory_bytes:
+ return {
+ 'failed': True,
+ 'msg': (
+ 'Available memory ({available:.1f} GB) '
+ 'below recommended value ({recommended:.1f} GB)'
+ ).format(
+ available=float(total_memory_bytes) / 10**9,
+ recommended=float(min_memory_bytes) / 10**9,
+ ),
+ }
+
+ return {}
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 657e15160..20d160eaf 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -1,4 +1,8 @@
-# pylint: disable=missing-docstring
+# pylint: disable=missing-docstring,too-few-public-methods
+"""
+Mixin classes meant to be used with subclasses of OpenShiftCheck.
+"""
+
from openshift_checks import get_var
@@ -7,12 +11,5 @@ class NotContainerizedMixin(object):
@classmethod
def is_active(cls, task_vars):
- return (
- # This mixin is meant to be used with subclasses of OpenShiftCheck.
- super(NotContainerizedMixin, cls).is_active(task_vars) and
- not cls.is_containerized(task_vars)
- )
-
- @staticmethod
- def is_containerized(task_vars):
- return get_var(task_vars, "openshift", "common", "is_containerized")
+ is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+ return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index a877246f4..2693ae37b 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -1,5 +1,7 @@
import pytest
+from ansible.playbook.play_context import PlayContext
+
from openshift_health_check import ActionModule, resolve_checks
from openshift_checks import OpenShiftCheckException
@@ -34,7 +36,7 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
@pytest.fixture
def plugin():
task = FakeTask('openshift_health_check', {'checks': ['fake_check']})
- plugin = ActionModule(task, None, None, None, None, None)
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
return plugin
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
new file mode 100644
index 000000000..970b474d7
--- /dev/null
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -0,0 +1,155 @@
+import pytest
+
+from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckException
+
+
+@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+ (['masters'], False, True),
+ # ensure check is skipped on containerized installs
+ (['masters'], True, False),
+ (['nodes'], False, True),
+ (['etcd'], False, True),
+ (['masters', 'nodes'], False, True),
+ (['masters', 'etcd'], False, True),
+ ([], False, False),
+ (['lb'], False, False),
+ (['nfs'], False, False),
+])
+def test_is_active(group_names, is_containerized, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert DiskAvailability.is_active(task_vars=task_vars) == is_active
+
+
+@pytest.mark.parametrize('ansible_mounts,extra_words', [
+ ([], ['none']), # empty ansible_mounts
+ ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
+ ([{'mount': '/var'}], ['/var']), # missing size_available
+])
+def test_cannot_determine_available_disk(ansible_mounts, extra_words):
+ task_vars = dict(
+ group_names=['masters'],
+ ansible_mounts=ansible_mounts,
+ )
+ check = DiskAvailability(execute_module=fake_execute_module)
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+
+ for word in 'determine available disk'.split() + extra_words:
+ assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('group_names,ansible_mounts', [
+ (
+ ['masters'],
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['nodes'],
+ [{
+ 'mount': '/',
+ 'size_available': 15 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['etcd'],
+ [{
+ 'mount': '/',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['etcd'],
+ [{
+ # not enough space on / ...
+ 'mount': '/',
+ 'size_available': 0,
+ }, {
+ # ... but enough on /var
+ 'mount': '/var',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
+])
+def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_mounts=ansible_mounts,
+ )
+
+ check = DiskAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize('group_names,ansible_mounts,extra_words', [
+ (
+ ['masters'],
+ [{
+ 'mount': '/',
+ 'size_available': 1,
+ }],
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes'],
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**9,
+ }],
+ ['1.0 GB'],
+ ),
+ (
+ ['etcd'],
+ [{
+ 'mount': '/',
+ 'size_available': 1,
+ }],
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes', 'masters'],
+ [{
+ 'mount': '/',
+ # enough space for a node, not enough for a master
+ 'size_available': 15 * 10**9 + 1,
+ }],
+ ['15.0 GB'],
+ ),
+ (
+ ['etcd'],
+ [{
+ # enough space on / ...
+ 'mount': '/',
+ 'size_available': 20 * 10**9 + 1,
+ }, {
+ # .. but not enough on /var
+ 'mount': '/var',
+ 'size_available': 0,
+ }],
+ ['0.0 GB'],
+ ),
+])
+def test_fails_with_insufficient_disk_space(group_names, ansible_mounts, extra_words):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_mounts=ansible_mounts,
+ )
+
+ check = DiskAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert result['failed']
+ for word in 'below recommended'.split() + extra_words:
+ assert word in result['msg']
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
new file mode 100644
index 000000000..e161a5b9e
--- /dev/null
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -0,0 +1,91 @@
+import pytest
+
+from openshift_checks.memory_availability import MemoryAvailability
+
+
+@pytest.mark.parametrize('group_names,is_active', [
+ (['masters'], True),
+ (['nodes'], True),
+ (['etcd'], True),
+ (['masters', 'nodes'], True),
+ (['masters', 'etcd'], True),
+ ([], False),
+ (['lb'], False),
+ (['nfs'], False),
+])
+def test_is_active(group_names, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ )
+ assert MemoryAvailability.is_active(task_vars=task_vars) == is_active
+
+
+@pytest.mark.parametrize('group_names,ansible_memtotal_mb', [
+ (
+ ['masters'],
+ 17200,
+ ),
+ (
+ ['nodes'],
+ 8200,
+ ),
+ (
+ ['etcd'],
+ 22200,
+ ),
+ (
+ ['masters', 'nodes'],
+ 17000,
+ ),
+])
+def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_memtotal_mb=ansible_memtotal_mb,
+ )
+
+ check = MemoryAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize('group_names,ansible_memtotal_mb,extra_words', [
+ (
+ ['masters'],
+ 0,
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes'],
+ 100,
+ ['0.1 GB'],
+ ),
+ (
+ ['etcd'],
+ -1,
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes', 'masters'],
+ # enough memory for a node, not enough for a master
+ 11000,
+ ['11.0 GB'],
+ ),
+])
+def test_fails_with_insufficient_memory(group_names, ansible_memtotal_mb, extra_words):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_memtotal_mb=ansible_memtotal_mb,
+ )
+
+ check = MemoryAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert result['failed']
+ for word in 'below recommended'.split() + extra_words:
+ assert word in result['msg']
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
index c67058696..5abb2ef83 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
@@ -223,7 +223,7 @@ items:
-
description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"'
name: IMAGE_VERSION
- value: "3.4.0"
+ value: "v3.4"
-
description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
name: IMAGE_PULL_SECRET
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
index 6ead122c5..1d319eab8 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
@@ -105,7 +105,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "3.4.0"
+ value: "v3.4"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
@@ -118,7 +118,7 @@ parameters:
description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
name: MODE
value: "deploy"
--
+-
description: "Set to true to continue even if the deployer runs into an error."
name: CONTINUE_ON_ERROR
value: "false"
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
deleted file mode 100644
index fdfc285ca..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
+++ /dev/null
@@ -1,345 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- kind: ClusterRole
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- kind: ClusterRole
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- kind: ClusterRole
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
- -
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
- name: IMAGE_VERSION
- value: "3.4.0"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
deleted file mode 100644
index c4ab794ae..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "v3.5"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
deleted file mode 100644
index 5b5503500..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
+++ /dev/null
@@ -1,342 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
- -
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml
deleted file mode 100644
index d191c0439..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "openshift/origin-"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml
deleted file mode 100644
index fdfc285ca..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml
+++ /dev/null
@@ -1,345 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- kind: ClusterRole
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- kind: ClusterRole
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- kind: ClusterRole
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
- -
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
- name: IMAGE_VERSION
- value: "3.4.0"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml
deleted file mode 100644
index c4ab794ae..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "v3.5"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml
deleted file mode 100644
index 5b5503500..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml
+++ /dev/null
@@ -1,342 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
- -
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml
deleted file mode 100644
index d191c0439..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "openshift/origin-"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 96ed44011..5ee8d1e2a 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -119,6 +119,15 @@ openshift_logging_es_ops_number_of_replicas: 0
# storage related defaults
openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
+# mux - secure_forward listener service
+openshift_logging_mux_allow_external: False
+openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
+# this tells the fluentd node agent to use mux instead of sending directly to Elasticsearch
+openshift_logging_use_mux_client: False
+openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_port: 24284
+openshift_logging_mux_cpu_limit: 100m
+openshift_logging_mux_memory_limit: 512Mi
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#es_logging_contents:
@@ -127,3 +136,5 @@ openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_acc
#fluentd_config_contents:
#fluentd_throttle_contents:
#fluentd_secureforward_contents:
+#fluentd_mux_config_contents:
+#fluentd_mux_secureforward_contents:
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 188ea246c..2f5b68b4d 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -44,6 +44,7 @@
- logging-kibana
- logging-kibana-proxy
- logging-curator
+ - logging-mux
ignore_errors: yes
register: delete_result
changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
@@ -109,5 +110,6 @@
- logging-curator
- logging-elasticsearch
- logging-fluentd
+ - logging-mux
register: delete_result
changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 740e490e1..b34df018d 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -45,6 +45,21 @@
- procure_component: kibana-internal
hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: mux
+ hostnames: "logging-mux, {{openshift_logging_mux_hostname}}"
+ when: openshift_logging_use_mux
+
+- include: procure_shared_key.yaml
+ loop_control:
+ loop_var: shared_key_info
+ with_items:
+ - procure_component: mux
+ when: openshift_logging_use_mux
+
- name: Copy proxy TLS configuration file
copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json
when: server_tls_json is undefined
@@ -85,6 +100,14 @@
loop_control:
loop_var: node_name
+- name: Generate PEM cert for mux
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.mux
+ loop_control:
+ loop_var: node_name
+ when: openshift_logging_use_mux
+
- name: Creating necessary JKS certs
include: generate_jks.yaml
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
index 253543f54..44bd0058a 100644
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -134,3 +134,43 @@
when: fluentd_configmap.stdout is defined
changed_when: no
check_mode: no
+
+- block:
+ - copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_securefoward_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_mux_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_mux_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_secureforward_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-mux
+ --from-file=fluent.conf={{mktemp.stdout}}/fluent-mux.conf
+ --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward-mux.conf -o yaml --dry-run
+ register: mux_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{mux_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-mux-configmap.yaml"
+ when: mux_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
+ when: openshift_logging_use_mux
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
index f396bcc6d..c1da49fd8 100644
--- a/roles/openshift_logging/tasks/generate_secrets.yaml
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -34,6 +34,36 @@
check_mode: no
changed_when: no
+- name: Retrieving the cert to use when generating secrets for mux
+ slurp: src="{{generated_certs_dir}}/{{item.file}}"
+ register: mux_key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "mux_key", file: "system.logging.mux.key"}
+ - { name: "mux_cert", file: "system.logging.mux.crt"}
+ - { name: "mux_shared_key", file: "mux_shared_key"}
+ when: openshift_logging_use_mux
+
+- name: Generating secrets for mux
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: "logging-{{component}}"
+ secret_key_file: "{{component}}_key"
+ secret_cert_file: "{{component}}_cert"
+ secrets:
+ - {key: ca, value: "{{mux_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
+ - {key: key, value: "{{mux_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
+ - {key: cert, value: "{{mux_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
+ - {key: shared_key, value: "{{mux_key_pairs | entry_from_named_pair('mux_shared_key')| b64decode }}"}
+ secret_keys: ["ca", "cert", "key", "shared_key"]
+ with_items:
+ - mux
+ loop_control:
+ loop_var: component
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_use_mux
+
- name: Generating secrets for kibana proxy
template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
vars:
@@ -43,7 +73,7 @@
- {key: session-secret, value: "{{session_secret}}"}
- {key: server-key, value: "{{kibana_key_file}}"}
- {key: server-cert, value: "{{kibana_cert_file}}"}
- - {key: server-tls, value: "{{server_tls_file}}"}
+ - {key: server-tls.json, value: "{{server_tls_file}}"}
secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
index 5091c1209..e3a5c5eb3 100644
--- a/roles/openshift_logging/tasks/generate_services.yaml
+++ b/roles/openshift_logging/tasks/generate_services.yaml
@@ -85,3 +85,35 @@
when: openshift_logging_use_ops | bool
check_mode: no
changed_when: no
+
+- name: Generating logging-mux service for external connections
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
+ vars:
+ obj_name: logging-mux
+ ports:
+ - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: mux
+ externalIPs:
+ - "{{ ansible_eth0.ipv4.address }}"
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_mux_allow_external
+
+- name: Generating logging-mux service for intra-cluster connections
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
+ vars:
+ obj_name: logging-mux
+ ports:
+ - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: mux
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_use_mux and not openshift_logging_mux_allow_external
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index 28fad420b..b80f37892 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -5,60 +5,47 @@
- set_fact: openshift_logging_es_pvc_prefix="logging-es"
when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
-- set_fact: es_pvc_pool={{[]}}
-
-- set_fact: openshift_logging_es_pvc_prefix="{{ openshift_logging_es_pvc_prefix | default('logging-es') }}"
-
-- name: Generate PersistentVolumeClaims
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+### evaluate if the PVC attached to the dc currently matches the provided vars
+## if it does then we reuse that pvc in the DC
+- include: set_es_storage.yaml
vars:
- es_pv_selector: "{{openshift_logging_es_pv_selector}}"
- es_pvc_dynamic: "{{openshift_logging_es_pvc_dynamic | bool}}"
- es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
- es_pvc_prefix: "{{openshift_logging_es_pvc_prefix}}"
- es_pvc_size: "{{openshift_logging_es_pvc_size}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
- es_cluster_size: "{{openshift_logging_es_cluster_size}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
-
-# we should initialize the es_dc_pool with the current keys
-- name: Init pool of DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{ es_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ es_component: es
+ es_name: "{{ deployment.0 }}"
+ es_spec: "{{ deployment.1 }}"
+ es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
loop_control:
- loop_var: deploy_name
+ loop_var: deployment
+## if it does not then we should create one that does and attach it
-# This should be used to generate new DC names if necessary
-- name: Create new DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{es_dc_pool|default([]) + [deploy_name]}}
+## create new dc/pvc is needed
+- include: set_es_storage.yaml
vars:
- component: es
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_current_es_size | int }}
- check_mode: no
-
-- name: Generate Elasticsearch DeploymentConfig
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
- vars:
- component: es
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_memory_limit}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}"
+ es_component: es
+ es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ es_spec: "{}"
+ es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_dc_pool }}"
- check_mode: no
- changed_when: no
+ with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }}
# --------- Tasks for Operation clusters ---------
@@ -73,74 +60,53 @@
es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
when:
- - openshift_logging_use_ops | bool
- - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
+ - openshift_logging_use_ops | bool
+ - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
check_mode: no
- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
-- set_fact: es_pvc_pool={{[]}}
-
-- name: Generate PersistentVolumeClaims for Ops
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+- include: set_es_storage.yaml
vars:
- es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
- es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
- es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
- es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic | bool}}"
- es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
- when:
- - openshift_logging_use_ops | bool
- check_mode: no
-
-- name: Init pool of DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{ es_ops_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
+ es_component: es-ops
+ es_name: "{{ deployment.0 }}"
+ es_spec: "{{ deployment.1 }}"
+ es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
+ es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
loop_control:
- loop_var: deploy_name
- when:
- - openshift_logging_use_ops | bool
-
-- name: Create new DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{es_ops_dc_pool | default([]) + [deploy_name]}}
- vars:
- component: es-ops
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }}
+ loop_var: deployment
when:
- - openshift_logging_use_ops | bool
- check_mode: no
+ - openshift_logging_use_ops | bool
+## if it does not then we should create one that does and attach it
-- name: Generate Elasticsearch DeploymentConfig for Ops
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+## create new dc/pvc is needed
+- include: set_es_storage.yaml
vars:
- component: es-ops
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}"
- es_node_quorum: "{{es_ops_node_quorum}}"
- es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
- es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
- openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
- es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}"
+ es_component: es-ops
+ es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ es_spec: "{}"
+ es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_ops_dc_pool | default([]) }}"
+ with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }}
when:
- - openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
+ - openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 83b68fa77..aec455c22 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -27,6 +27,10 @@
loop_control:
loop_var: install_component
+- name: Install logging mux
+ include: "{{ role_path }}/tasks/install_mux.yaml"
+ when: openshift_logging_use_mux
+
- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
register: object_def_files
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml
new file mode 100644
index 000000000..296da626f
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_mux.yaml
@@ -0,0 +1,67 @@
+---
+- set_fact: mux_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
+ check_mode: no
+
+- set_fact: mux_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
+ check_mode: no
+
+- name: Check mux current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-mux
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: mux_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Generating mux deploymentconfig
+ template: src=mux.j2 dest={{mktemp.stdout}}/templates/logging-mux-dc.yaml
+ vars:
+ component: mux
+ logging_component: mux
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-fluentd:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ ops_host: "{{ mux_ops_host }}"
+ ops_port: "{{ mux_ops_port }}"
+ mux_cpu_limit: "{{openshift_logging_mux_cpu_limit}}"
+ mux_memory_limit: "{{openshift_logging_mux_memory_limit}}"
+ replicas: "{{mux_replica_count.stdout | default (0)}}"
+ mux_node_selector: "{{openshift_logging_mux_nodeselector | default({})}}"
+ check_mode: no
+ changed_when: no
+
+- name: "Check mux hostmount-anyuid permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get scc/hostmount-anyuid -o jsonpath='{.users}'
+ register: mux_hostmount_anyuid
+ check_mode: no
+ changed_when: no
+
+- name: "Set hostmount-anyuid permissions for mux"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: mux_output
+ failed_when: "mux_output.rc == 1 and 'exists' not in mux_output.stderr"
+ check_mode: no
+ when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
+
+- name: "Check mux cluster-reader permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
+ register: mux_cluster_reader
+ check_mode: no
+ changed_when: no
+
+- name: "Set cluster-reader permissions for mux"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: mux2_output
+ failed_when: "mux2_output.rc == 1 and 'exists' not in mux2_output.stderr"
+ check_mode: no
+ when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
index cb9509de1..c4db7d033 100644
--- a/roles/openshift_logging/tasks/oc_apply.yaml
+++ b/roles/openshift_logging/tasks/oc_apply.yaml
@@ -1,52 +1,52 @@
---
-- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_init
- failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
- changed_when: no
+- oc_obj:
+ kind: "{{ file_content.kind }}"
+ name: "{{ file_content.metadata.name }}"
+ state: present
+ namespace: "{{ namespace }}"
+ files:
+ - "{{ file_name }}"
+ when: file_content.kind != "Service"
-- name: Applying {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: no
+## still need to do this for services until the template logic is replaced by oc_*
+- block:
+ - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: generation_init
+ failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
+ changed_when: no
-- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_changed
- failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''"
- changed_when: generation_changed.stdout | default (0) | int > generation_init.stdout | default(0) | int
- when:
- - "'field is immutable' not in generation_apply.stderr"
+ - name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
-- name: Removing previous {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- delete -f {{ file_name }}
- -n {{ namespace }}
- register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
- changed_when: generation_delete.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ - name: Removing previous {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ delete -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_delete
+ failed_when: "'error' in generation_delete.stderr"
+ changed_when: generation_delete.rc == 0
+ when: "'field is immutable' in generation_apply.stderr"
-- name: Recreating {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: generation_apply.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ - name: Recreating {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: generation_apply.rc == 0
+ when: "'field is immutable' in generation_apply.stderr"
+ when: file_content.kind == "Service"
diff --git a/roles/openshift_logging/tasks/procure_shared_key.yaml b/roles/openshift_logging/tasks/procure_shared_key.yaml
new file mode 100644
index 000000000..056ff6b98
--- /dev/null
+++ b/roles/openshift_logging/tasks/procure_shared_key.yaml
@@ -0,0 +1,25 @@
+---
+- name: Checking for {{ shared_key_info.procure_component }}_shared_key
+ stat: path="{{generated_certs_dir}}/{{ shared_key_info.procure_component }}_shared_key"
+ register: component_shared_key_file
+ check_mode: no
+
+- name: Trying to discover shared key variable name for {{ shared_key_info.procure_component }}
+ set_fact: procure_component_shared_key={{ lookup('env', '{{shared_key_info.procure_component}}' + '_shared_key') }}
+ when:
+ - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined
+ check_mode: no
+
+- name: Creating shared_key for {{ shared_key_info.procure_component }}
+ copy: content="{{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}"
+ dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key"
+ check_mode: no
+ when:
+ - not component_shared_key_file.stat.exists
+
+- name: Copying shared key for {{ shared_key_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_shared_key}}" dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key"
+ check_mode: no
+ when:
+ - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined
+ - not component_shared_key_file.stat.exists
diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml
new file mode 100644
index 000000000..198b1d04d
--- /dev/null
+++ b/roles/openshift_logging/tasks/set_es_storage.yaml
@@ -0,0 +1,82 @@
+---
+- set_fact: es_storage_type="{{ es_spec.volumes['elasticsearch-storage'] }}"
+ when: es_spec.volumes is defined
+
+- set_fact: es_storage_claim="{{ es_spec.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName }}"
+ when:
+ - es_spec.volumes is defined
+ - es_storage_type.persistentVolumeClaim is defined
+
+- set_fact: es_storage_claim=""
+ when:
+ - not es_spec.volumes is defined or not es_storage_type.persistentVolumeClaim is defined
+
+## take an ES dc and evaluate its storage option
+# if it is a hostmount or emptydir we don't do anything with it
+# if its a pvc we see if the corresponding pvc matches the provided specs (if they exist)
+- oc_obj:
+ state: list
+ kind: pvc
+ name: "{{ es_storage_claim }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ register: pvc_spec
+ failed_when: pvc_spec.results.stderr is defined
+ when:
+ - es_spec.volumes is defined
+ - es_storage_type.persistentVolumeClaim is defined
+
+- set_fact: pvc_size="{{ pvc_spec.results.results[0].spec.resources.requests.storage }}"
+ when:
+ - pvc_spec.results is defined
+ - pvc_spec.results.results[0].spec is defined
+
+# if not create the pvc and use it
+- block:
+
+ - name: Generating PersistentVolumeClaims
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+ size: "{{ es_pvc_size }}"
+ access_modes: "{{ openshift_logging_storage_access_modes }}"
+ pv_selector: "{{ es_pv_selector }}"
+ when: not es_pvc_dynamic | bool
+ check_mode: no
+ changed_when: no
+
+ - name: Generating PersistentVolumeClaims - Dynamic
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ size: "{{ es_pvc_size }}"
+ access_modes: "{{ openshift_logging_storage_access_modes }}"
+ pv_selector: "{{ es_pv_selector }}"
+ when: es_pvc_dynamic | bool
+ check_mode: no
+ changed_when: no
+
+ - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+
+ when:
+ - es_pvc_size | search('^\d.*')
+ - not es_spec.volumes is defined or not es_storage_claim | search( es_pvc_prefix ) or ( not pvc_size | search( es_pvc_size ) and not es_pvc_size | search( pvc_size ) )
+
+- name: Generate Elasticsearch DeploymentConfig
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: "{{ es_component }}"
+ deploy_name: "{{ es_name }}"
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{ es_component }}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{ es_cpu_limit }}"
+ es_memory_limit: "{{ es_memory_limit }}"
+ es_node_selector: "{{ es_node_selector }}"
+ es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}"
+ es_number_of_shards: "{{ es_number_of_shards }}"
+ es_number_of_replicas: "{{ es_number_of_replicas }}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index edbb62c3e..1042b3daa 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -21,6 +21,26 @@
loop_control:
loop_var: fluentd_host
+- name: Retrieve mux
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=mux"
+ namespace: "{{openshift_logging_namespace}}"
+ register: mux_dc
+ when: openshift_logging_use_mux
+
+- name: start mux
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_logging_namespace}}"
+ replicas: "{{ openshift_logging_mux_replica_count | default (1) }}"
+ with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_mux
+
- name: Retrieve elasticsearch
oc_obj:
state: list
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index 4b3722e29..d20c57cc1 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -21,6 +21,26 @@
loop_control:
loop_var: fluentd_host
+- name: Retrieve mux
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=mux"
+ namespace: "{{openshift_logging_namespace}}"
+ register: mux_dc
+ when: openshift_logging_use_mux
+
+- name: stop mux
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_logging_namespace}}"
+ replicas: 0
+ with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_mux
+
- name: Retrieve elasticsearch
oc_obj:
state: list
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
index a0fefd882..c6284166b 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging/templates/curator.j2
@@ -89,9 +89,6 @@ spec:
- name: config
mountPath: /etc/curator/settings
readOnly: true
- - name: elasticsearch-storage
- mountPath: /elasticsearch/persistent
- readOnly: true
volumes:
- name: certs
secret:
@@ -99,5 +96,3 @@ spec:
- name: config
configMap:
name: logging-curator
- - name: elasticsearch-storage
- emptyDir: {}
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
index 16185fc1d..f89855bf5 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging/templates/es.j2
@@ -95,6 +95,13 @@ spec:
readOnly: true
- name: elasticsearch-storage
mountPath: /elasticsearch/persistent
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/elasticsearch/probe/readiness.sh"
+ initialDelaySeconds: 5
+ timeoutSeconds: 4
+ periodSeconds: 5
volumes:
- name: elasticsearch
secret:
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
index 0bf1686ad..d13691259 100644
--- a/roles/openshift_logging/templates/fluentd.j2
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -59,6 +59,11 @@ spec:
- name: dockercfg
mountPath: /etc/sysconfig/docker
readOnly: true
+{% if openshift_logging_use_mux_client | bool %}
+ - name: muxcerts
+ mountPath: /etc/fluent/muxkeys
+ readOnly: true
+{% endif %}
env:
- name: "K8S_HOST_URL"
value: "{{openshift_logging_master_url}}"
@@ -122,6 +127,8 @@ spec:
value: "{{openshift_logging_fluentd_journal_source | default('')}}"
- name: "JOURNAL_READ_FROM_HEAD"
value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ - name: "USE_MUX_CLIENT"
+ value: "{{openshift_logging_use_mux_client| default('false')}}"
volumes:
- name: runlogjournal
hostPath:
@@ -147,3 +154,8 @@ spec:
- name: dockercfg
hostPath:
path: /etc/sysconfig/docker
+{% if openshift_logging_use_mux_client | bool %}
+ - name: muxcerts
+ secret:
+ secretName: logging-mux
+{% endif %}
diff --git a/roles/openshift_logging/templates/mux.j2 b/roles/openshift_logging/templates/mux.j2
new file mode 100644
index 000000000..41e6abd52
--- /dev/null
+++ b/roles/openshift_logging/templates/mux.j2
@@ -0,0 +1,121 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(0)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ serviceAccountName: aggregated-logging-fluentd
+{% if mux_node_selector is iterable and mux_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in mux_node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: "mux"
+ image: {{image}}
+ imagePullPolicy: Always
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if mux_cpu_limit is not none %}
+ cpu: "{{mux_cpu_limit}}"
+{% endif %}
+{% if mux_memory_limit is not none %}
+ memory: "{{mux_memory_limit}}"
+{% endif %}
+{% endif %}
+ ports:
+ - containerPort: "{{ openshift_logging_mux_port }}"
+ name: mux-forward
+ volumeMounts:
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: muxcerts
+ mountPath: /etc/fluent/muxkeys
+ readOnly: true
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{openshift_logging_master_url}}"
+ - name: "ES_HOST"
+ value: "{{openshift_logging_es_host}}"
+ - name: "ES_PORT"
+ value: "{{openshift_logging_es_port}}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{openshift_logging_es_client_cert}}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{openshift_logging_es_client_key}}"
+ - name: "ES_CA"
+ value: "{{openshift_logging_es_ca}}"
+ - name: "OPS_HOST"
+ value: "{{ops_host}}"
+ - name: "OPS_PORT"
+ value: "{{ops_port}}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{openshift_logging_es_ops_client_cert}}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{openshift_logging_es_ops_client_key}}"
+ - name: "OPS_CA"
+ value: "{{openshift_logging_es_ops_ca}}"
+ - name: "USE_JOURNAL"
+ value: "false"
+ - name: "JOURNAL_SOURCE"
+ value: "{{openshift_logging_fluentd_journal_source | default('')}}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ - name: FORWARD_LISTEN_HOST
+ value: "{{ openshift_logging_mux_hostname }}"
+ - name: FORWARD_LISTEN_PORT
+ value: "{{ openshift_logging_mux_port }}"
+ - name: USE_MUX
+ value: "true"
+ - name: MUX_ALLOW_EXTERNAL
+ value: "{{ openshift_logging_mux_allow_external| default('false') }}"
+ volumes:
+ - name: config
+ configMap:
+ name: logging-mux
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: muxcerts
+ secret:
+ secretName: logging-mux
diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2
index 6c4ec0c76..70644a39c 100644
--- a/roles/openshift_logging/templates/service.j2
+++ b/roles/openshift_logging/templates/service.j2
@@ -26,3 +26,9 @@ spec:
{% for key, value in selector.iteritems() %}
{{key}}: {{value}}
{% endfor %}
+{% if externalIPs is defined -%}
+ externalIPs:
+{% for ip in externalIPs %}
+ - {{ ip }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index f202486a5..cfc4e2722 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -3,24 +3,13 @@
msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1."
when: not openshift.common.version_gte_3_1_or_1_1 | bool
-- name: Copy Configuration to temporary conf
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}}
- changed_when: false
-
- name: Add Management Infrastructure project
- command: >
- {{ openshift.common.client_binary }} adm new-project
- management-infra
- --description="Management Infrastructure"
- --config={{manage_iq_tmp_conf}}
- register: osmiq_create_mi_project
- failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0"
- changed_when: osmiq_create_mi_project.rc == 0
+ oc_project:
+ name: management-infra
+ description: Management Infrastructure
- name: Create Admin and Image Inspector Service Account
oc_serviceaccount:
- kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
name: "{{ item }}"
namespace: management-infra
state: present
@@ -28,51 +17,42 @@
- management-admin
- inspector-admin
-- name: Create Cluster Role
- shell: >
- echo {{ manageiq_cluster_role | to_json | quote }} |
- {{ openshift.common.client_binary }} create
- --config={{manage_iq_tmp_conf}}
- -f -
- register: osmiq_create_cluster_role
- failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0"
- changed_when: osmiq_create_cluster_role.rc == 0
+- name: Create manageiq cluster role
+ oc_clusterrole:
+ name: management-infra-admin
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods/proxy
+ verbs:
+ - "*"
- name: Create Hawkular Metrics Admin Cluster Role
- shell: >
- echo {{ manageiq_metrics_admin_clusterrole | to_json | quote }} |
- {{ openshift.common.client_binary }}
- --config={{manage_iq_tmp_conf}}
- create -f -
- register: oshawkular_create_cluster_role
- failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0"
- changed_when: oshawkular_create_cluster_role.rc == 0
- # AUDIT:changed_when_note: Checking the return code is insufficient
- # here. We really need to verify the if the role even exists before
- # we run this task.
+ oc_clusterrole:
+ name: hawkular-metrics-admin
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - hawkular-alerts
+ - hawkular-metrics
+ verbs:
+ - "*"
- name: Configure role/user permissions
- command: >
- {{ openshift.common.client_binary }} adm {{item}}
- --config={{manage_iq_tmp_conf}}
- with_items: "{{manage_iq_tasks}}"
- register: osmiq_perm_task
- failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0"
- changed_when: osmiq_perm_task.rc == 0
- # AUDIT:changed_when_note: Checking the return code is insufficient
- # here. We really need to compare the current role/user permissions
- # with their expected state. I think we may have a module for this?
-
+ oc_adm_policy_user:
+ namespace: management-infra
+ resource_name: "{{ item.resource_name }}"
+ resource_kind: "{{ item.resource_kind }}"
+ user: "{{ item.user }}"
+ with_items: "{{ manage_iq_tasks }}"
- name: Configure 3_2 role/user permissions
- command: >
- {{ openshift.common.client_binary }} adm {{item}}
- --config={{manage_iq_tmp_conf}}
+ oc_adm_policy_user:
+ namespace: management-infra
+ resource_name: "{{ item.resource_name }}"
+ resource_kind: "{{ item.resource_kind }}"
+ user: "{{ item.user }}"
with_items: "{{manage_iq_openshift_3_2_tasks}}"
- register: osmiq_perm_3_2_task
- failed_when: osmiq_perm_3_2_task.rc != 0
- changed_when: osmiq_perm_3_2_task.rc == 0
when: openshift.common.version_gte_3_2_or_1_2 | bool
-
-- name: Clean temporary configuration file
- file: path={{manage_iq_tmp_conf}} state=absent
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 9936bb126..15d667628 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -1,41 +1,31 @@
---
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-manageiq_cluster_role:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: management-infra-admin
- rules:
- - resources:
- - pods/proxy
- verbs:
- - '*'
-
-manageiq_metrics_admin_clusterrole:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: hawkular-metrics-admin
- rules:
- - apiGroups:
- - ""
- resources:
- - hawkular-metrics
- - hawkular-alerts
- verbs:
- - '*'
-
-manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
-
manage_iq_tasks:
-- policy add-role-to-user -n management-infra admin -z management-admin
-- policy add-role-to-user -n management-infra management-infra-admin -z management-admin
-- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
-- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
-- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
-- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
-- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
-- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
+- resource_kind: role
+ resource_name: admin
+ user: management-admin
+- resource_kind: role
+ resource_name: management-infra-admin
+ user: management-admin
+- resource_kind: cluster-role
+ resource_name: cluster-reader
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: scc
+ resource_name: privileged
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: system:image-puller
+ user: system:serviceaccount:management-infra:inspector-admin
+- resource_kind: scc
+ resource_name: privileged
+ user: system:serviceaccount:management-infra:inspector-admin
+- resource_kind: cluster-role
+ resource_name: self-provisioner
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: hawkular-metrics-admin
+ user: system:serviceaccount:management-infra:management-admin
manage_iq_openshift_3_2_tasks:
-- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: system:image-auditor
+ user: system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml
deleted file mode 100644
index ced2df1d0..000000000
--- a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: generate heapster key/cert
- command: >
- {{ openshift.common.admin_binary }} ca create-server-cert
- --config={{ mktemp.stdout }}/admin.kubeconfig
- --key='{{ mktemp.stdout }}/heapster.key'
- --cert='{{ mktemp.stdout }}/heapster.cert'
- --hostnames=heapster
- --signer-cert='{{ mktemp.stdout }}/ca.crt'
- --signer-key='{{ mktemp.stdout }}/ca.key'
- --signer-serial='{{ mktemp.stdout }}/ca.serial.txt'
-
-- when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines"
- block:
- - name: read files for the heapster secret
- slurp: src={{ item }}
- register: heapster_secret
- with_items:
- - "{{ mktemp.stdout }}/heapster.cert"
- - "{{ mktemp.stdout }}/heapster.key"
- - "{{ client_ca }}"
- vars:
- custom_ca: "{{ mktemp.stdout }}/heapster_client_ca.crt"
- default_ca: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- client_ca: "{{ custom_ca|exists|ternary(custom_ca, default_ca) }}"
- - name: generate heapster secret template
- template:
- src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml"
- force: no
- vars:
- name: heapster-secrets
- labels:
- metrics-infra: heapster
- data:
- heapster.cert: "{{ heapster_secret.results[0].content }}"
- heapster.key: "{{ heapster_secret.results[1].content }}"
- heapster.client-ca: "{{ heapster_secret.results[2].content }}"
- heapster.allowed-users: >
- {{ openshift_metrics_heapster_allowed_users|b64encode }}
diff --git a/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml
new file mode 100644
index 000000000..e81d90ae7
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml
@@ -0,0 +1,14 @@
+---
+- name: generate heapster secret template
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml"
+ force: no
+ vars:
+ name: heapster-secrets
+ labels:
+ metrics-infra: heapster
+ data:
+ heapster.allowed-users: >
+ {{ openshift_metrics_heapster_allowed_users|b64encode }}
+ when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines"
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index 8d27c4930..d13b96be1 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -41,6 +41,8 @@
- {port: 80, targetPort: http-endpoint}
selector:
name: "{{obj_name}}"
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: heapster-certs
labels:
metrics-infra: "{{obj_name}}"
name: "{{obj_name}}"
@@ -64,4 +66,4 @@
namespace: "{{ openshift_metrics_project }}"
changed_when: no
-- include: generate_heapster_certificates.yaml
+- include: generate_heapster_secrets.yaml
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
index f01ccfd58..ab998c2fb 100644
--- a/roles/openshift_metrics/templates/heapster.j2
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -34,9 +34,9 @@ spec:
- "heapster-wrapper.sh"
- "--wrapper.allowed_users_file=/secrets/heapster.allowed-users"
- "--source=kubernetes.summary_api:${MASTER_URL}?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250"
- - "--tls_cert=/secrets/heapster.cert"
- - "--tls_key=/secrets/heapster.key"
- - "--tls_client_ca=/secrets/heapster.client-ca"
+ - "--tls_cert=/heapster-certs/tls.crt"
+ - "--tls_key=/heapster-certs/tls.key"
+ - "--tls_client_ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- "--allowed_users=%allowed_users%"
- "--metric_resolution={{openshift_metrics_resolution}}"
{% if not openshift_metrics_heapster_standalone %}
@@ -80,6 +80,8 @@ spec:
volumeMounts:
- name: heapster-secrets
mountPath: "/secrets"
+ - name: heapster-certs
+ mountPath: "/heapster-certs"
{% if not openshift_metrics_heapster_standalone %}
- name: hawkular-metrics-certs
mountPath: "/hawkular-metrics-certs"
@@ -94,6 +96,9 @@ spec:
- name: heapster-secrets
secret:
secretName: heapster-secrets
+ - name: heapster-certs
+ secret:
+ secretName: heapster-certs
{% if not openshift_metrics_heapster_standalone %}
- name: hawkular-metrics-certs
secret:
diff --git a/roles/openshift_metrics/templates/service.j2 b/roles/openshift_metrics/templates/service.j2
index 8df89127b..ce0bc2eec 100644
--- a/roles/openshift_metrics/templates/service.j2
+++ b/roles/openshift_metrics/templates/service.j2
@@ -2,6 +2,12 @@ apiVersion: "v1"
kind: "Service"
metadata:
name: "{{obj_name}}"
+{% if annotations is defined%}
+ annotations:
+{% for key, value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
{% if labels is defined%}
labels:
{% for key, value in labels.iteritems() %}
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 59003bbf9..98139cac2 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -38,28 +38,33 @@
- name: Check for swap usage
command: grep "^[^#].*swap" /etc/fstab
# grep: match any lines which don't begin with '#' and contain 'swap'
- # command: swapon --summary
- # Alternate option, however if swap entries are in fstab, swap will be
- # enabled at boot. Grepping fstab should catch a condition when swap was
- # disabled, but the fstab entries were not removed.
changed_when: false
failed_when: false
register: swap_result
- # Disable Swap Block
+# Disable Swap Block
- block:
- name: Disable swap
command: swapoff --all
- name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
lineinfile:
dest: /etc/fstab
- regexp: 'swap'
- state: absent
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
- when: swap_result.stdout_lines | length > 0
- # End Disable Swap Block
+ when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true)
+# End Disable Swap Block
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
index e91891ca9..416cf605a 100644
--- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
+++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
@@ -6,20 +6,6 @@
# - docker_version
# - skip_docker_restart
-# We need docker service up to remove all the images, but these services will keep
-# trying to re-start and thus re-pull the images we're trying to delete.
-- name: Stop containerized services
- service: name={{ item }} state=stopped
- with_items:
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- - etcd_container
- - openvswitch
- failed_when: false
- when: openshift.common.is_containerized | bool
-
- name: Check Docker image count
shell: "docker images -aq | wc -l"
register: docker_image_count
@@ -45,5 +31,4 @@
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
-- include: restart.yml
- when: not skip_docker_restart | default(False) | bool
+# starting docker happens back in ../main.yml where it calls ../restart.yml
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index 01bd3bf38..e725f4a5d 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -9,6 +9,28 @@
# - openshift_release
# tasks file for openshift_node_upgrade
+
+- name: Stop node and openvswitch services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-node"
+ - openvswitch
+ failed_when: false
+
+- name: Stop additional containerized services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-master-api"
+ - etcd_container
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
- include: docker/upgrade.yml
vars:
# We will restart Docker ourselves after everything is ready:
@@ -16,7 +38,6 @@
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
- - not openshift.common.is_containerized | bool
- include: "{{ node_config_hook }}"
when: node_config_hook is defined
@@ -67,16 +88,6 @@
state: latest
when: not openshift.common.is_containerized | bool
-- name: Restart openvswitch
- systemd:
- name: openvswitch
- state: started
- when:
- - not openshift.common.is_containerized | bool
-
-# Mandatory Docker restart, ensure all containerized services are running:
-- include: docker/restart.yml
-
- name: Update oreg value
yedit:
src: "{{ openshift.common.config_base }}/node/node-config.yaml"
@@ -88,10 +99,6 @@
- name: Check for swap usage
command: grep "^[^#].*swap" /etc/fstab
# grep: match any lines which don't begin with '#' and contain 'swap'
- # command: swapon --summary
- # Alternate option, however if swap entries are in fstab, swap will be
- # enabled at boot. Grepping fstab should catch a condition when swap was
- # disabled, but the fstab entries were not removed.
changed_when: false
failed_when: false
register: swap_result
@@ -103,19 +110,25 @@
command: swapoff --all
- name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
lineinfile:
dest: /etc/fstab
- regexp: 'swap'
- state: absent
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
- when: swap_result.stdout_lines | length > 0
+ when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true)
# End Disable Swap Block
-- name: Restart rpm node service
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: restarted
- when: not openshift.common.is_containerized | bool
+# Restart all services
+- include: restart.yml
- name: Wait for node to be ready
oc_obj:
diff --git a/roles/openshift_node_upgrade/tasks/docker/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index 176fc3c0b..a9fab74e1 100644
--- a/roles/openshift_node_upgrade/tasks/docker/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -12,7 +12,7 @@
openshift_facts:
role: docker
-- name: Restart containerized services
+- name: Start services
service: name={{ item }} state=started
with_items:
- etcd_container
@@ -22,7 +22,6 @@
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
- name: Wait for master API to come back online
wait_for:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 26ca5eebf..2b35e5137 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -29,14 +29,14 @@
path: /var/lib/glusterd
state: absent
delegate_to: "{{ item }}"
- with_items: "{{ groups.oo_glusterfs_to_config }}"
+ with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
when: openshift_storage_glusterfs_wipe
- name: Get GlusterFS storage devices state
command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
register: devices_info
delegate_to: "{{ item }}"
- with_items: "{{ groups.oo_glusterfs_to_config }}"
+ with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
failed_when: False
when: openshift_storage_glusterfs_wipe
@@ -65,7 +65,7 @@
kind: node
state: add
labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
- with_items: "{{ groups.oo_glusterfs_to_config }}"
+ with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
loop_control:
loop_var: glusterfs_host
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index c3d001bb4..fa9b20e92 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -7,8 +7,13 @@
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.
-- fail:
- msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)"
+- name: Abort when we cannot safely guess what Origin image version the user wanted
+ fail:
+ msg: |-
+ To install a containerized Origin release, you must set openshift_release or
+ openshift_image_tag in your inventory to specify which version of the OpenShift
+ component images to use. You may want the latest (usually alpha) releases or
+ a more stable release. (Suggestion: add openshift_release="x.y" to inventory.)
when:
- is_containerized | bool
- openshift.common.deployment_type == 'origin'
@@ -27,7 +32,10 @@
when: openshift_release is defined
# Verify that the image tag is in a valid format
-- block:
+- when:
+ - openshift_image_tag is defined
+ - openshift_image_tag != "latest"
+ block:
# Verifies that when the deployment type is origin the version:
# - starts with a v
@@ -35,12 +43,14 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers, letters, dashes and dots.
- - name: Verify Origin openshift_image_tag is valid
+ - name: (Origin) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'origin'
assert:
that:
- "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
- msg: "openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1"
- when: openshift.common.deployment_type == 'origin'
+ msg: |-
+ openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Verifies that when the deployment type is openshift-enterprise the version:
# - starts with a v
@@ -48,16 +58,14 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers
- - name: Verify Enterprise openshift_image_tag is valid
+ - name: (Enterprise) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'openshift-enterprise'
assert:
that:
- "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}"
- msg: "openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4"
- when: openshift.common.deployment_type == 'openshift-enterprise'
-
- when:
- - openshift_image_tag is defined
- - openshift_image_tag != "latest"
+ msg: |-
+ openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Make sure we copy this to a fact if given a var:
- set_fact:
@@ -119,30 +127,42 @@
- fail:
msg: openshift_version role was unable to set openshift_version
+ name: Abort if openshift_version was not set
when: openshift_version is not defined
- fail:
msg: openshift_version role was unable to set openshift_image_tag
+ name: Abort if openshift_image_tag was not set
when: openshift_image_tag is not defined
- fail:
msg: openshift_version role was unable to set openshift_pkg_version
+ name: Abort if openshift_pkg_version was not set
when: openshift_pkg_version is not defined
- fail:
- msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories."
+ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
+ name: Abort if openshift_pkg_version was not set
when:
- not is_containerized | bool
- openshift_version == '0.0'
-# We can't map an openshift_release to full rpm version like we can with containers, make sure
+# We can't map an openshift_release to full rpm version like we can with containers; make sure
# the rpm version we looked up matches the release requested and error out if not.
-- fail:
- msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook."
+- name: For an RPM install, abort when the release requested does not match the available version.
when:
- not is_containerized | bool
- openshift_release is defined
- - not openshift_version.startswith(openshift_release) | bool
+ assert:
+ that:
+ - openshift_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
# The end result of these three variables is quite important so make sure they are displayed and logged:
- debug: var=openshift_release